aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-07-29 20:11:05 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-07-29 20:11:05 -0400
commitc2078e4c9120e7b38b1a02cd9fc6dd4f792110bf (patch)
treea30b29c0bf8cf2288a32ceaeb75013cb0b5d5865 /drivers/edac
parent73bcc49959e4e40911dd0dd634bf1b353827df66 (diff)
parentf58d0dee07fe6328f775669eb6aa3a123efad6c2 (diff)
Merge branch 'devel'
* devel: (33 commits) edac i5000, i5400: fix pointer math in i5000_get_mc_regs() edac: allow specifying the error count with fake_inject edac: add support for Calxeda highbank L2 cache ecc edac: add support for Calxeda highbank memory controller edac: create top-level debugfs directory sb_edac: properly handle error count i7core_edac: properly handle error count edac: edac_mc_handle_error(): add an error_count parameter edac: remove arch-specific parameter for the error handler amd64_edac: Don't pass driver name as an error parameter edac_mc: check for allocation failure in edac_mc_alloc() edac: Increase version to 3.0.0 edac_mc: Cleanup per-dimm_info debug messages edac: Convert debugfX to edac_dbg(X, edac: Use more normal debugging macro style edac: Don't add __func__ or __FILE__ for debugf[0-9] msgs Edac: Add ABI Documentation for the new device nodes edac: move documentation ABI to ABI/testing/sysfs-devices-edac i7core_edac: change the mem allocation scheme to make Documentation/kobject.txt happy edac: change the mem allocation scheme to make Documentation/kobject.txt happy ...
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/Kconfig24
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/amd64_edac.c376
-rw-r--r--drivers/edac/amd64_edac.h29
-rw-r--r--drivers/edac/amd64_edac_dbg.c89
-rw-r--r--drivers/edac/amd64_edac_inj.c134
-rw-r--r--drivers/edac/amd76x_edac.c34
-rw-r--r--drivers/edac/cell_edac.c28
-rw-r--r--drivers/edac/cpc925_edac.c96
-rw-r--r--drivers/edac/e752x_edac.c92
-rw-r--r--drivers/edac/e7xxx_edac.c89
-rw-r--r--drivers/edac/edac_core.h39
-rw-r--r--drivers/edac/edac_device.c47
-rw-r--r--drivers/edac/edac_device_sysfs.c71
-rw-r--r--drivers/edac/edac_mc.c395
-rw-r--r--drivers/edac/edac_mc_sysfs.c1355
-rw-r--r--drivers/edac/edac_module.c20
-rw-r--r--drivers/edac/edac_module.h26
-rw-r--r--drivers/edac/edac_pci.c26
-rw-r--r--drivers/edac/edac_pci_sysfs.c49
-rw-r--r--drivers/edac/highbank_l2_edac.c149
-rw-r--r--drivers/edac/highbank_mc_edac.c264
-rw-r--r--drivers/edac/i3000_edac.c47
-rw-r--r--drivers/edac/i3200_edac.c48
-rw-r--r--drivers/edac/i5000_edac.c207
-rw-r--r--drivers/edac/i5100_edac.c14
-rw-r--r--drivers/edac/i5400_edac.c201
-rw-r--r--drivers/edac/i7300_edac.c173
-rw-r--r--drivers/edac/i7core_edac.c520
-rw-r--r--drivers/edac/i82443bxgx_edac.c51
-rw-r--r--drivers/edac/i82860_edac.c45
-rw-r--r--drivers/edac/i82875p_edac.c53
-rw-r--r--drivers/edac/i82975x_edac.c55
-rw-r--r--drivers/edac/mpc85xx_edac.c131
-rw-r--r--drivers/edac/mv64x60_edac.c40
-rw-r--r--drivers/edac/pasemi_edac.c22
-rw-r--r--drivers/edac/ppc4xx_edac.c16
-rw-r--r--drivers/edac/r82600_edac.c48
-rw-r--r--drivers/edac/sb_edac.c257
-rw-r--r--drivers/edac/tile_edac.c12
-rw-r--r--drivers/edac/x38_edac.c48
41 files changed, 3047 insertions, 2376 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fdffa1beca17..409b92b8d346 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,7 +7,7 @@
7menuconfig EDAC 7menuconfig EDAC
8 bool "EDAC (Error Detection And Correction) reporting" 8 bool "EDAC (Error Detection And Correction) reporting"
9 depends on HAS_IOMEM 9 depends on HAS_IOMEM
10 depends on X86 || PPC || TILE 10 depends on X86 || PPC || TILE || ARM
11 help 11 help
12 EDAC is designed to report errors in the core system. 12 EDAC is designed to report errors in the core system.
13 These are low-level errors that are reported in the CPU or 13 These are low-level errors that are reported in the CPU or
@@ -31,6 +31,14 @@ if EDAC
31 31
32comment "Reporting subsystems" 32comment "Reporting subsystems"
33 33
34config EDAC_LEGACY_SYSFS
35 bool "EDAC legacy sysfs"
36 default y
37 help
38 Enable the compatibility sysfs nodes.
39 Use 'Y' if your edac utilities aren't ported to work with the newer
40 structures.
41
34config EDAC_DEBUG 42config EDAC_DEBUG
35 bool "Debugging" 43 bool "Debugging"
36 help 44 help
@@ -294,4 +302,18 @@ config EDAC_TILE
294 Support for error detection and correction on the 302 Support for error detection and correction on the
295 Tilera memory controller. 303 Tilera memory controller.
296 304
305config EDAC_HIGHBANK_MC
306 tristate "Highbank Memory Controller"
307 depends on EDAC_MM_EDAC && ARCH_HIGHBANK
308 help
309 Support for error detection and correction on the
310 Calxeda Highbank memory controller.
311
312config EDAC_HIGHBANK_L2
313 tristate "Highbank L2 Cache"
314 depends on EDAC_MM_EDAC && ARCH_HIGHBANK
315 help
316 Support for error detection and correction on the
317 Calxeda Highbank memory controller.
318
297endif # EDAC 319endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 196a63dd37c5..7e5129a733f8 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -55,3 +55,6 @@ obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
55obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o 55obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
56 56
57obj-$(CONFIG_EDAC_TILE) += tile_edac.o 57obj-$(CONFIG_EDAC_TILE) += tile_edac.o
58
59obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
60obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7be9b7288e90..5a297a26211d 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -321,8 +321,8 @@ found:
321 return edac_mc_find((int)node_id); 321 return edac_mc_find((int)node_id);
322 322
323err_no_match: 323err_no_match:
324 debugf2("sys_addr 0x%lx doesn't match any node\n", 324 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
325 (unsigned long)sys_addr); 325 (unsigned long)sys_addr);
326 326
327 return NULL; 327 return NULL;
328} 328}
@@ -393,15 +393,15 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
393 mask = ~mask; 393 mask = ~mask;
394 394
395 if ((input_addr & mask) == (base & mask)) { 395 if ((input_addr & mask) == (base & mask)) {
396 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", 396 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
397 (unsigned long)input_addr, csrow, 397 (unsigned long)input_addr, csrow,
398 pvt->mc_node_id); 398 pvt->mc_node_id);
399 399
400 return csrow; 400 return csrow;
401 } 401 }
402 } 402 }
403 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", 403 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404 (unsigned long)input_addr, pvt->mc_node_id); 404 (unsigned long)input_addr, pvt->mc_node_id);
405 405
406 return -1; 406 return -1;
407} 407}
@@ -430,20 +430,20 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
430 430
431 /* only revE and later have the DRAM Hole Address Register */ 431 /* only revE and later have the DRAM Hole Address Register */
432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { 432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
433 debugf1(" revision %d for node %d does not support DHAR\n", 433 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
434 pvt->ext_model, pvt->mc_node_id); 434 pvt->ext_model, pvt->mc_node_id);
435 return 1; 435 return 1;
436 } 436 }
437 437
438 /* valid for Fam10h and above */ 438 /* valid for Fam10h and above */
439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { 439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
440 debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); 440 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
441 return 1; 441 return 1;
442 } 442 }
443 443
444 if (!dhar_valid(pvt)) { 444 if (!dhar_valid(pvt)) {
445 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", 445 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
446 pvt->mc_node_id); 446 pvt->mc_node_id);
447 return 1; 447 return 1;
448 } 448 }
449 449
@@ -475,9 +475,9 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
475 else 475 else
476 *hole_offset = k8_dhar_offset(pvt); 476 *hole_offset = k8_dhar_offset(pvt);
477 477
478 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", 478 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479 pvt->mc_node_id, (unsigned long)*hole_base, 479 pvt->mc_node_id, (unsigned long)*hole_base,
480 (unsigned long)*hole_offset, (unsigned long)*hole_size); 480 (unsigned long)*hole_offset, (unsigned long)*hole_size);
481 481
482 return 0; 482 return 0;
483} 483}
@@ -528,10 +528,9 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
528 /* use DHAR to translate SysAddr to DramAddr */ 528 /* use DHAR to translate SysAddr to DramAddr */
529 dram_addr = sys_addr - hole_offset; 529 dram_addr = sys_addr - hole_offset;
530 530
531 debugf2("using DHAR to translate SysAddr 0x%lx to " 531 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
532 "DramAddr 0x%lx\n", 532 (unsigned long)sys_addr,
533 (unsigned long)sys_addr, 533 (unsigned long)dram_addr);
534 (unsigned long)dram_addr);
535 534
536 return dram_addr; 535 return dram_addr;
537 } 536 }
@@ -548,9 +547,8 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
548 */ 547 */
549 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; 548 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
550 549
551 debugf2("using DRAM Base register to translate SysAddr 0x%lx to " 550 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
552 "DramAddr 0x%lx\n", (unsigned long)sys_addr, 551 (unsigned long)sys_addr, (unsigned long)dram_addr);
553 (unsigned long)dram_addr);
554 return dram_addr; 552 return dram_addr;
555} 553}
556 554
@@ -586,9 +584,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
586 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + 584 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
587 (dram_addr & 0xfff); 585 (dram_addr & 0xfff);
588 586
589 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", 587 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
590 intlv_shift, (unsigned long)dram_addr, 588 intlv_shift, (unsigned long)dram_addr,
591 (unsigned long)input_addr); 589 (unsigned long)input_addr);
592 590
593 return input_addr; 591 return input_addr;
594} 592}
@@ -604,8 +602,8 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
604 input_addr = 602 input_addr =
605 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); 603 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
606 604
607 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", 605 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
608 (unsigned long)sys_addr, (unsigned long)input_addr); 606 (unsigned long)sys_addr, (unsigned long)input_addr);
609 607
610 return input_addr; 608 return input_addr;
611} 609}
@@ -637,8 +635,8 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
637 635
638 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); 636 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
639 if (intlv_shift == 0) { 637 if (intlv_shift == 0) {
640 debugf1(" InputAddr 0x%lx translates to DramAddr of " 638 edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
641 "same value\n", (unsigned long)input_addr); 639 (unsigned long)input_addr);
642 640
643 return input_addr; 641 return input_addr;
644 } 642 }
@@ -649,9 +647,9 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
649 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); 647 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
650 dram_addr = bits + (intlv_sel << 12); 648 dram_addr = bits + (intlv_sel << 12);
651 649
652 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " 650 edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
653 "(%d node interleave bits)\n", (unsigned long)input_addr, 651 (unsigned long)input_addr,
654 (unsigned long)dram_addr, intlv_shift); 652 (unsigned long)dram_addr, intlv_shift);
655 653
656 return dram_addr; 654 return dram_addr;
657} 655}
@@ -673,9 +671,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
673 (dram_addr < (hole_base + hole_size))) { 671 (dram_addr < (hole_base + hole_size))) {
674 sys_addr = dram_addr + hole_offset; 672 sys_addr = dram_addr + hole_offset;
675 673
676 debugf1("using DHAR to translate DramAddr 0x%lx to " 674 edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
677 "SysAddr 0x%lx\n", (unsigned long)dram_addr, 675 (unsigned long)dram_addr,
678 (unsigned long)sys_addr); 676 (unsigned long)sys_addr);
679 677
680 return sys_addr; 678 return sys_addr;
681 } 679 }
@@ -697,9 +695,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
697 */ 695 */
698 sys_addr |= ~((sys_addr & (1ull << 39)) - 1); 696 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
699 697
700 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", 698 edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
701 pvt->mc_node_id, (unsigned long)dram_addr, 699 pvt->mc_node_id, (unsigned long)dram_addr,
702 (unsigned long)sys_addr); 700 (unsigned long)sys_addr);
703 701
704 return sys_addr; 702 return sys_addr;
705} 703}
@@ -768,49 +766,48 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
768 766
769static void amd64_dump_dramcfg_low(u32 dclr, int chan) 767static void amd64_dump_dramcfg_low(u32 dclr, int chan)
770{ 768{
771 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 769 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
772 770
773 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", 771 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
774 (dclr & BIT(16)) ? "un" : "", 772 (dclr & BIT(16)) ? "un" : "",
775 (dclr & BIT(19)) ? "yes" : "no"); 773 (dclr & BIT(19)) ? "yes" : "no");
776 774
777 debugf1(" PAR/ERR parity: %s\n", 775 edac_dbg(1, " PAR/ERR parity: %s\n",
778 (dclr & BIT(8)) ? "enabled" : "disabled"); 776 (dclr & BIT(8)) ? "enabled" : "disabled");
779 777
780 if (boot_cpu_data.x86 == 0x10) 778 if (boot_cpu_data.x86 == 0x10)
781 debugf1(" DCT 128bit mode width: %s\n", 779 edac_dbg(1, " DCT 128bit mode width: %s\n",
782 (dclr & BIT(11)) ? "128b" : "64b"); 780 (dclr & BIT(11)) ? "128b" : "64b");
783 781
784 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", 782 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
785 (dclr & BIT(12)) ? "yes" : "no", 783 (dclr & BIT(12)) ? "yes" : "no",
786 (dclr & BIT(13)) ? "yes" : "no", 784 (dclr & BIT(13)) ? "yes" : "no",
787 (dclr & BIT(14)) ? "yes" : "no", 785 (dclr & BIT(14)) ? "yes" : "no",
788 (dclr & BIT(15)) ? "yes" : "no"); 786 (dclr & BIT(15)) ? "yes" : "no");
789} 787}
790 788
791/* Display and decode various NB registers for debug purposes. */ 789/* Display and decode various NB registers for debug purposes. */
792static void dump_misc_regs(struct amd64_pvt *pvt) 790static void dump_misc_regs(struct amd64_pvt *pvt)
793{ 791{
794 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 792 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
795 793
796 debugf1(" NB two channel DRAM capable: %s\n", 794 edac_dbg(1, " NB two channel DRAM capable: %s\n",
797 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); 795 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
798 796
799 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", 797 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
800 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", 798 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
801 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); 799 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
802 800
803 amd64_dump_dramcfg_low(pvt->dclr0, 0); 801 amd64_dump_dramcfg_low(pvt->dclr0, 0);
804 802
805 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 803 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
806 804
807 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " 805 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
808 "offset: 0x%08x\n", 806 pvt->dhar, dhar_base(pvt),
809 pvt->dhar, dhar_base(pvt), 807 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
810 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) 808 : f10_dhar_offset(pvt));
811 : f10_dhar_offset(pvt));
812 809
813 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); 810 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
814 811
815 amd64_debug_display_dimm_sizes(pvt, 0); 812 amd64_debug_display_dimm_sizes(pvt, 0);
816 813
@@ -857,15 +854,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
857 u32 *base1 = &pvt->csels[1].csbases[cs]; 854 u32 *base1 = &pvt->csels[1].csbases[cs];
858 855
859 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) 856 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
860 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 857 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
861 cs, *base0, reg0); 858 cs, *base0, reg0);
862 859
863 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) 860 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
864 continue; 861 continue;
865 862
866 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) 863 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
867 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 864 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
868 cs, *base1, reg1); 865 cs, *base1, reg1);
869 } 866 }
870 867
871 for_each_chip_select_mask(cs, 0, pvt) { 868 for_each_chip_select_mask(cs, 0, pvt) {
@@ -875,15 +872,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
875 u32 *mask1 = &pvt->csels[1].csmasks[cs]; 872 u32 *mask1 = &pvt->csels[1].csmasks[cs];
876 873
877 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) 874 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
878 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 875 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
879 cs, *mask0, reg0); 876 cs, *mask0, reg0);
880 877
881 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) 878 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
882 continue; 879 continue;
883 880
884 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) 881 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
885 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 882 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
886 cs, *mask1, reg1); 883 cs, *mask1, reg1);
887 } 884 }
888} 885}
889 886
@@ -1049,24 +1046,22 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1049 if (!src_mci) { 1046 if (!src_mci) {
1050 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", 1047 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1051 (unsigned long)sys_addr); 1048 (unsigned long)sys_addr);
1052 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1049 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1053 page, offset, syndrome, 1050 page, offset, syndrome,
1054 -1, -1, -1, 1051 -1, -1, -1,
1055 EDAC_MOD_STR,
1056 "failed to map error addr to a node", 1052 "failed to map error addr to a node",
1057 NULL); 1053 "");
1058 return; 1054 return;
1059 } 1055 }
1060 1056
1061 /* Now map the sys_addr to a CSROW */ 1057 /* Now map the sys_addr to a CSROW */
1062 csrow = sys_addr_to_csrow(src_mci, sys_addr); 1058 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1063 if (csrow < 0) { 1059 if (csrow < 0) {
1064 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1060 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1065 page, offset, syndrome, 1061 page, offset, syndrome,
1066 -1, -1, -1, 1062 -1, -1, -1,
1067 EDAC_MOD_STR,
1068 "failed to map error addr to a csrow", 1063 "failed to map error addr to a csrow",
1069 NULL); 1064 "");
1070 return; 1065 return;
1071 } 1066 }
1072 1067
@@ -1082,12 +1077,11 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1082 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - " 1077 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1083 "possible error reporting race\n", 1078 "possible error reporting race\n",
1084 syndrome); 1079 syndrome);
1085 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1080 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1086 page, offset, syndrome, 1081 page, offset, syndrome,
1087 csrow, -1, -1, 1082 csrow, -1, -1,
1088 EDAC_MOD_STR,
1089 "unknown syndrome - possible error reporting race", 1083 "unknown syndrome - possible error reporting race",
1090 NULL); 1084 "");
1091 return; 1085 return;
1092 } 1086 }
1093 } else { 1087 } else {
@@ -1102,10 +1096,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1102 channel = ((sys_addr & BIT(3)) != 0); 1096 channel = ((sys_addr & BIT(3)) != 0);
1103 } 1097 }
1104 1098
1105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1099 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1,
1106 page, offset, syndrome, 1100 page, offset, syndrome,
1107 csrow, channel, -1, 1101 csrow, channel, -1,
1108 EDAC_MOD_STR, "", NULL); 1102 "", "");
1109} 1103}
1110 1104
1111static int ddr2_cs_size(unsigned i, bool dct_width) 1105static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1193,7 +1187,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
1193 * Need to check DCT0[0] and DCT1[0] to see if only one of them has 1187 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1194 * their CSEnable bit on. If so, then SINGLE DIMM case. 1188 * their CSEnable bit on. If so, then SINGLE DIMM case.
1195 */ 1189 */
1196 debugf0("Data width is not 128 bits - need more decoding\n"); 1190 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1197 1191
1198 /* 1192 /*
1199 * Check DRAM Bank Address Mapping values for each DIMM to see if there 1193 * Check DRAM Bank Address Mapping values for each DIMM to see if there
@@ -1272,25 +1266,24 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
1272 return; 1266 return;
1273 1267
1274 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { 1268 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1275 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", 1269 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1276 pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); 1270 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1277 1271
1278 debugf0(" DCTs operate in %s mode.\n", 1272 edac_dbg(0, " DCTs operate in %s mode\n",
1279 (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); 1273 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1280 1274
1281 if (!dct_ganging_enabled(pvt)) 1275 if (!dct_ganging_enabled(pvt))
1282 debugf0(" Address range split per DCT: %s\n", 1276 edac_dbg(0, " Address range split per DCT: %s\n",
1283 (dct_high_range_enabled(pvt) ? "yes" : "no")); 1277 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1284 1278
1285 debugf0(" data interleave for ECC: %s, " 1279 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1286 "DRAM cleared since last warm reset: %s\n", 1280 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1287 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), 1281 (dct_memory_cleared(pvt) ? "yes" : "no"));
1288 (dct_memory_cleared(pvt) ? "yes" : "no"));
1289 1282
1290 debugf0(" channel interleave: %s, " 1283 edac_dbg(0, " channel interleave: %s, "
1291 "interleave bits selector: 0x%x\n", 1284 "interleave bits selector: 0x%x\n",
1292 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), 1285 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1293 dct_sel_interleave_addr(pvt)); 1286 dct_sel_interleave_addr(pvt));
1294 } 1287 }
1295 1288
1296 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); 1289 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
@@ -1428,7 +1421,7 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1428 1421
1429 pvt = mci->pvt_info; 1422 pvt = mci->pvt_info;
1430 1423
1431 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); 1424 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1432 1425
1433 for_each_chip_select(csrow, dct, pvt) { 1426 for_each_chip_select(csrow, dct, pvt) {
1434 if (!csrow_enabled(csrow, dct, pvt)) 1427 if (!csrow_enabled(csrow, dct, pvt))
@@ -1436,19 +1429,18 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1436 1429
1437 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); 1430 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1438 1431
1439 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", 1432 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1440 csrow, cs_base, cs_mask); 1433 csrow, cs_base, cs_mask);
1441 1434
1442 cs_mask = ~cs_mask; 1435 cs_mask = ~cs_mask;
1443 1436
1444 debugf1(" (InputAddr & ~CSMask)=0x%llx " 1437 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1445 "(CSBase & ~CSMask)=0x%llx\n", 1438 (in_addr & cs_mask), (cs_base & cs_mask));
1446 (in_addr & cs_mask), (cs_base & cs_mask));
1447 1439
1448 if ((in_addr & cs_mask) == (cs_base & cs_mask)) { 1440 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1449 cs_found = f10_process_possible_spare(pvt, dct, csrow); 1441 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1450 1442
1451 debugf1(" MATCH csrow=%d\n", cs_found); 1443 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1452 break; 1444 break;
1453 } 1445 }
1454 } 1446 }
@@ -1505,8 +1497,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1505 u8 intlv_en = dram_intlv_en(pvt, range); 1497 u8 intlv_en = dram_intlv_en(pvt, range);
1506 u32 intlv_sel = dram_intlv_sel(pvt, range); 1498 u32 intlv_sel = dram_intlv_sel(pvt, range);
1507 1499
1508 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", 1500 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1509 range, sys_addr, get_dram_limit(pvt, range)); 1501 range, sys_addr, get_dram_limit(pvt, range));
1510 1502
1511 if (dhar_valid(pvt) && 1503 if (dhar_valid(pvt) &&
1512 dhar_base(pvt) <= sys_addr && 1504 dhar_base(pvt) <= sys_addr &&
@@ -1562,7 +1554,7 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1562 (chan_addr & 0xfff); 1554 (chan_addr & 0xfff);
1563 } 1555 }
1564 1556
1565 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); 1557 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1566 1558
1567 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); 1559 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1568 1560
@@ -1616,12 +1608,11 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1616 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1608 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1617 1609
1618 if (csrow < 0) { 1610 if (csrow < 0) {
1619 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1611 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1620 page, offset, syndrome, 1612 page, offset, syndrome,
1621 -1, -1, -1, 1613 -1, -1, -1,
1622 EDAC_MOD_STR,
1623 "failed to map error addr to a csrow", 1614 "failed to map error addr to a csrow",
1624 NULL); 1615 "");
1625 return; 1616 return;
1626 } 1617 }
1627 1618
@@ -1633,10 +1624,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1633 if (dct_ganging_enabled(pvt)) 1624 if (dct_ganging_enabled(pvt))
1634 chan = get_channel_from_ecc_syndrome(mci, syndrome); 1625 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1635 1626
1636 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1627 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1637 page, offset, syndrome, 1628 page, offset, syndrome,
1638 csrow, chan, -1, 1629 csrow, chan, -1,
1639 EDAC_MOD_STR, "", NULL); 1630 "", "");
1640} 1631}
1641 1632
1642/* 1633/*
@@ -1664,7 +1655,8 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1664 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases 1655 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1665 : pvt->csels[0].csbases; 1656 : pvt->csels[0].csbases;
1666 1657
1667 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); 1658 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1659 ctrl, dbam);
1668 1660
1669 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 1661 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1670 1662
@@ -1840,7 +1832,7 @@ static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1840 } 1832 }
1841 } 1833 }
1842 1834
1843 debugf0("syndrome(%x) not found\n", syndrome); 1835 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1844 return -1; 1836 return -1;
1845} 1837}
1846 1838
@@ -1917,12 +1909,11 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1917 /* Ensure that the Error Address is VALID */ 1909 /* Ensure that the Error Address is VALID */
1918 if (!(m->status & MCI_STATUS_ADDRV)) { 1910 if (!(m->status & MCI_STATUS_ADDRV)) {
1919 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1911 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1920 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1912 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1921 0, 0, 0, 1913 0, 0, 0,
1922 -1, -1, -1, 1914 -1, -1, -1,
1923 EDAC_MOD_STR,
1924 "HW has no ERROR_ADDRESS available", 1915 "HW has no ERROR_ADDRESS available",
1925 NULL); 1916 "");
1926 return; 1917 return;
1927 } 1918 }
1928 1919
@@ -1946,12 +1937,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1946 1937
1947 if (!(m->status & MCI_STATUS_ADDRV)) { 1938 if (!(m->status & MCI_STATUS_ADDRV)) {
1948 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1939 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1949 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1940 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1950 0, 0, 0, 1941 0, 0, 0,
1951 -1, -1, -1, 1942 -1, -1, -1,
1952 EDAC_MOD_STR,
1953 "HW has no ERROR_ADDRESS available", 1943 "HW has no ERROR_ADDRESS available",
1954 NULL); 1944 "");
1955 return; 1945 return;
1956 } 1946 }
1957 1947
@@ -1966,11 +1956,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1966 if (!src_mci) { 1956 if (!src_mci) {
1967 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", 1957 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1968 (unsigned long)sys_addr); 1958 (unsigned long)sys_addr);
1969 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1959 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1970 page, offset, 0, 1960 page, offset, 0,
1971 -1, -1, -1, 1961 -1, -1, -1,
1972 EDAC_MOD_STR, 1962 "ERROR ADDRESS NOT mapped to a MC",
1973 "ERROR ADDRESS NOT mapped to a MC", NULL); 1963 "");
1974 return; 1964 return;
1975 } 1965 }
1976 1966
@@ -1980,17 +1970,16 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1980 if (csrow < 0) { 1970 if (csrow < 0) {
1981 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", 1971 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1982 (unsigned long)sys_addr); 1972 (unsigned long)sys_addr);
1983 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1973 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1984 page, offset, 0, 1974 page, offset, 0,
1985 -1, -1, -1, 1975 -1, -1, -1,
1986 EDAC_MOD_STR,
1987 "ERROR ADDRESS NOT mapped to CS", 1976 "ERROR ADDRESS NOT mapped to CS",
1988 NULL); 1977 "");
1989 } else { 1978 } else {
1990 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1979 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1991 page, offset, 0, 1980 page, offset, 0,
1992 csrow, -1, -1, 1981 csrow, -1, -1,
1993 EDAC_MOD_STR, "", NULL); 1982 "", "");
1994 } 1983 }
1995} 1984}
1996 1985
@@ -2047,9 +2036,9 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2047 2036
2048 return -ENODEV; 2037 return -ENODEV;
2049 } 2038 }
2050 debugf1("F1: %s\n", pci_name(pvt->F1)); 2039 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2051 debugf1("F2: %s\n", pci_name(pvt->F2)); 2040 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2052 debugf1("F3: %s\n", pci_name(pvt->F3)); 2041 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2053 2042
2054 return 0; 2043 return 0;
2055} 2044}
@@ -2076,15 +2065,15 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2076 * those are Read-As-Zero 2065 * those are Read-As-Zero
2077 */ 2066 */
2078 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); 2067 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2079 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); 2068 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2080 2069
2081 /* check first whether TOP_MEM2 is enabled */ 2070 /* check first whether TOP_MEM2 is enabled */
2082 rdmsrl(MSR_K8_SYSCFG, msr_val); 2071 rdmsrl(MSR_K8_SYSCFG, msr_val);
2083 if (msr_val & (1U << 21)) { 2072 if (msr_val & (1U << 21)) {
2084 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); 2073 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2085 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); 2074 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2086 } else 2075 } else
2087 debugf0(" TOP_MEM2 disabled.\n"); 2076 edac_dbg(0, " TOP_MEM2 disabled\n");
2088 2077
2089 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); 2078 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2090 2079
@@ -2100,17 +2089,17 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2100 if (!rw) 2089 if (!rw)
2101 continue; 2090 continue;
2102 2091
2103 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", 2092 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2104 range, 2093 range,
2105 get_dram_base(pvt, range), 2094 get_dram_base(pvt, range),
2106 get_dram_limit(pvt, range)); 2095 get_dram_limit(pvt, range));
2107 2096
2108 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", 2097 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2109 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", 2098 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2110 (rw & 0x1) ? "R" : "-", 2099 (rw & 0x1) ? "R" : "-",
2111 (rw & 0x2) ? "W" : "-", 2100 (rw & 0x2) ? "W" : "-",
2112 dram_intlv_sel(pvt, range), 2101 dram_intlv_sel(pvt, range),
2113 dram_dst_node(pvt, range)); 2102 dram_dst_node(pvt, range));
2114 } 2103 }
2115 2104
2116 read_dct_base_mask(pvt); 2105 read_dct_base_mask(pvt);
@@ -2191,9 +2180,9 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2191 2180
2192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); 2181 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2193 2182
2194 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); 2183 edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2195 debugf0(" nr_pages/channel= %u channel-count = %d\n", 2184 edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
2196 nr_pages, pvt->channel_count); 2185 nr_pages, pvt->channel_count);
2197 2186
2198 return nr_pages; 2187 return nr_pages;
2199} 2188}
@@ -2205,6 +2194,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2205static int init_csrows(struct mem_ctl_info *mci) 2194static int init_csrows(struct mem_ctl_info *mci)
2206{ 2195{
2207 struct csrow_info *csrow; 2196 struct csrow_info *csrow;
2197 struct dimm_info *dimm;
2208 struct amd64_pvt *pvt = mci->pvt_info; 2198 struct amd64_pvt *pvt = mci->pvt_info;
2209 u64 base, mask; 2199 u64 base, mask;
2210 u32 val; 2200 u32 val;
@@ -2217,22 +2207,19 @@ static int init_csrows(struct mem_ctl_info *mci)
2217 2207
2218 pvt->nbcfg = val; 2208 pvt->nbcfg = val;
2219 2209
2220 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2210 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2221 pvt->mc_node_id, val, 2211 pvt->mc_node_id, val,
2222 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); 2212 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2223 2213
2224 for_each_chip_select(i, 0, pvt) { 2214 for_each_chip_select(i, 0, pvt) {
2225 csrow = &mci->csrows[i]; 2215 csrow = mci->csrows[i];
2226 2216
2227 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) { 2217 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2228 debugf1("----CSROW %d EMPTY for node %d\n", i, 2218 edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
2229 pvt->mc_node_id); 2219 i, pvt->mc_node_id);
2230 continue; 2220 continue;
2231 } 2221 }
2232 2222
2233 debugf1("----CSROW %d VALID for MC node %d\n",
2234 i, pvt->mc_node_id);
2235
2236 empty = 0; 2223 empty = 0;
2237 if (csrow_enabled(i, 0, pvt)) 2224 if (csrow_enabled(i, 0, pvt))
2238 nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2225 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
@@ -2244,8 +2231,9 @@ static int init_csrows(struct mem_ctl_info *mci)
2244 2231
2245 mtype = amd64_determine_memory_type(pvt, i); 2232 mtype = amd64_determine_memory_type(pvt, i);
2246 2233
2247 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); 2234 edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2248 debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count); 2235 edac_dbg(1, " nr_pages: %u\n",
2236 nr_pages * pvt->channel_count);
2249 2237
2250 /* 2238 /*
2251 * determine whether CHIPKILL or JUST ECC or NO ECC is operating 2239 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
@@ -2257,9 +2245,10 @@ static int init_csrows(struct mem_ctl_info *mci)
2257 edac_mode = EDAC_NONE; 2245 edac_mode = EDAC_NONE;
2258 2246
2259 for (j = 0; j < pvt->channel_count; j++) { 2247 for (j = 0; j < pvt->channel_count; j++) {
2260 csrow->channels[j].dimm->mtype = mtype; 2248 dimm = csrow->channels[j]->dimm;
2261 csrow->channels[j].dimm->edac_mode = edac_mode; 2249 dimm->mtype = mtype;
2262 csrow->channels[j].dimm->nr_pages = nr_pages; 2250 dimm->edac_mode = edac_mode;
2251 dimm->nr_pages = nr_pages;
2263 } 2252 }
2264 } 2253 }
2265 2254
@@ -2296,9 +2285,9 @@ static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2296 struct msr *reg = per_cpu_ptr(msrs, cpu); 2285 struct msr *reg = per_cpu_ptr(msrs, cpu);
2297 nbe = reg->l & MSR_MCGCTL_NBE; 2286 nbe = reg->l & MSR_MCGCTL_NBE;
2298 2287
2299 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2288 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2300 cpu, reg->q, 2289 cpu, reg->q,
2301 (nbe ? "enabled" : "disabled")); 2290 (nbe ? "enabled" : "disabled"));
2302 2291
2303 if (!nbe) 2292 if (!nbe)
2304 goto out; 2293 goto out;
@@ -2369,8 +2358,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2369 2358
2370 amd64_read_pci_cfg(F3, NBCFG, &value); 2359 amd64_read_pci_cfg(F3, NBCFG, &value);
2371 2360
2372 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 2361 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2373 nid, value, !!(value & NBCFG_ECC_ENABLE)); 2362 nid, value, !!(value & NBCFG_ECC_ENABLE));
2374 2363
2375 if (!(value & NBCFG_ECC_ENABLE)) { 2364 if (!(value & NBCFG_ECC_ENABLE)) {
2376 amd64_warn("DRAM ECC disabled on this node, enabling...\n"); 2365 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
@@ -2394,8 +2383,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2394 s->flags.nb_ecc_prev = 1; 2383 s->flags.nb_ecc_prev = 1;
2395 } 2384 }
2396 2385
2397 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 2386 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2398 nid, value, !!(value & NBCFG_ECC_ENABLE)); 2387 nid, value, !!(value & NBCFG_ECC_ENABLE));
2399 2388
2400 return ret; 2389 return ret;
2401} 2390}
@@ -2463,26 +2452,29 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2463 return true; 2452 return true;
2464} 2453}
2465 2454
2466struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + 2455static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2467 ARRAY_SIZE(amd64_inj_attrs) +
2468 1];
2469
2470struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2471
2472static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2473{ 2456{
2474 unsigned int i = 0, j = 0; 2457 int rc;
2475 2458
2476 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) 2459 rc = amd64_create_sysfs_dbg_files(mci);
2477 sysfs_attrs[i] = amd64_dbg_attrs[i]; 2460 if (rc < 0)
2461 return rc;
2478 2462
2479 if (boot_cpu_data.x86 >= 0x10) 2463 if (boot_cpu_data.x86 >= 0x10) {
2480 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) 2464 rc = amd64_create_sysfs_inject_files(mci);
2481 sysfs_attrs[i] = amd64_inj_attrs[j]; 2465 if (rc < 0)
2466 return rc;
2467 }
2468
2469 return 0;
2470}
2482 2471
2483 sysfs_attrs[i] = terminator; 2472static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2473{
2474 amd64_remove_sysfs_dbg_files(mci);
2484 2475
2485 mci->mc_driver_sysfs_attributes = sysfs_attrs; 2476 if (boot_cpu_data.x86 >= 0x10)
2477 amd64_remove_sysfs_inject_files(mci);
2486} 2478}
2487 2479
2488static void setup_mci_misc_attrs(struct mem_ctl_info *mci, 2480static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
@@ -2601,20 +2593,22 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2601 goto err_siblings; 2593 goto err_siblings;
2602 2594
2603 mci->pvt_info = pvt; 2595 mci->pvt_info = pvt;
2604 mci->dev = &pvt->F2->dev; 2596 mci->pdev = &pvt->F2->dev;
2605 2597
2606 setup_mci_misc_attrs(mci, fam_type); 2598 setup_mci_misc_attrs(mci, fam_type);
2607 2599
2608 if (init_csrows(mci)) 2600 if (init_csrows(mci))
2609 mci->edac_cap = EDAC_FLAG_NONE; 2601 mci->edac_cap = EDAC_FLAG_NONE;
2610 2602
2611 set_mc_sysfs_attrs(mci);
2612
2613 ret = -ENODEV; 2603 ret = -ENODEV;
2614 if (edac_mc_add_mc(mci)) { 2604 if (edac_mc_add_mc(mci)) {
2615 debugf1("failed edac_mc_add_mc()\n"); 2605 edac_dbg(1, "failed edac_mc_add_mc()\n");
2616 goto err_add_mc; 2606 goto err_add_mc;
2617 } 2607 }
2608 if (set_mc_sysfs_attrs(mci)) {
2609 edac_dbg(1, "failed edac_mc_add_mc()\n");
2610 goto err_add_sysfs;
2611 }
2618 2612
2619 /* register stuff with EDAC MCE */ 2613 /* register stuff with EDAC MCE */
2620 if (report_gart_errors) 2614 if (report_gart_errors)
@@ -2628,6 +2622,8 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2628 2622
2629 return 0; 2623 return 0;
2630 2624
2625err_add_sysfs:
2626 edac_mc_del_mc(mci->pdev);
2631err_add_mc: 2627err_add_mc:
2632 edac_mc_free(mci); 2628 edac_mc_free(mci);
2633 2629
@@ -2651,7 +2647,7 @@ static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2651 2647
2652 ret = pci_enable_device(pdev); 2648 ret = pci_enable_device(pdev);
2653 if (ret < 0) { 2649 if (ret < 0) {
2654 debugf0("ret=%d\n", ret); 2650 edac_dbg(0, "ret=%d\n", ret);
2655 return -EIO; 2651 return -EIO;
2656 } 2652 }
2657 2653
@@ -2698,6 +2694,8 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2698 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2694 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2699 struct ecc_settings *s = ecc_stngs[nid]; 2695 struct ecc_settings *s = ecc_stngs[nid];
2700 2696
2697 mci = find_mci_by_dev(&pdev->dev);
2698 del_mc_sysfs_attrs(mci);
2701 /* Remove from EDAC CORE tracking list */ 2699 /* Remove from EDAC CORE tracking list */
2702 mci = edac_mc_del_mc(&pdev->dev); 2700 mci = edac_mc_del_mc(&pdev->dev);
2703 if (!mci) 2701 if (!mci)
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 9a666cb985b2..8d4804732bac 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -413,20 +413,33 @@ struct ecc_settings {
413}; 413};
414 414
415#ifdef CONFIG_EDAC_DEBUG 415#ifdef CONFIG_EDAC_DEBUG
416#define NUM_DBG_ATTRS 5 416int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci);
417void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci);
418
417#else 419#else
418#define NUM_DBG_ATTRS 0 420static inline int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci)
421{
422 return 0;
423}
424static void inline amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci)
425{
426}
419#endif 427#endif
420 428
421#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION 429#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
422#define NUM_INJ_ATTRS 5 430int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci);
431void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci);
432
423#else 433#else
424#define NUM_INJ_ATTRS 0 434static inline int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci)
435{
436 return 0;
437}
438static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci)
439{
440}
425#endif 441#endif
426 442
427extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
428 amd64_inj_attrs[NUM_INJ_ATTRS];
429
430/* 443/*
431 * Each of the PCI Device IDs types have their own set of hardware accessor 444 * Each of the PCI Device IDs types have their own set of hardware accessor
432 * functions and per device encoding/decoding logic. 445 * functions and per device encoding/decoding logic.
@@ -460,3 +473,5 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
460 473
461int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 474int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
462 u64 *hole_offset, u64 *hole_size); 475 u64 *hole_offset, u64 *hole_size);
476
477#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
index e3562288f4ce..2c1bbf740605 100644
--- a/drivers/edac/amd64_edac_dbg.c
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -1,8 +1,11 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3#define EDAC_DCT_ATTR_SHOW(reg) \ 3#define EDAC_DCT_ATTR_SHOW(reg) \
4static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \ 4static ssize_t amd64_##reg##_show(struct device *dev, \
5 struct device_attribute *mattr, \
6 char *data) \
5{ \ 7{ \
8 struct mem_ctl_info *mci = to_mci(dev); \
6 struct amd64_pvt *pvt = mci->pvt_info; \ 9 struct amd64_pvt *pvt = mci->pvt_info; \
7 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ 10 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
8} 11}
@@ -12,8 +15,12 @@ EDAC_DCT_ATTR_SHOW(dbam0);
12EDAC_DCT_ATTR_SHOW(top_mem); 15EDAC_DCT_ATTR_SHOW(top_mem);
13EDAC_DCT_ATTR_SHOW(top_mem2); 16EDAC_DCT_ATTR_SHOW(top_mem2);
14 17
15static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) 18static ssize_t amd64_hole_show(struct device *dev,
19 struct device_attribute *mattr,
20 char *data)
16{ 21{
22 struct mem_ctl_info *mci = to_mci(dev);
23
17 u64 hole_base = 0; 24 u64 hole_base = 0;
18 u64 hole_offset = 0; 25 u64 hole_offset = 0;
19 u64 hole_size = 0; 26 u64 hole_size = 0;
@@ -27,46 +34,40 @@ static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
27/* 34/*
28 * update NUM_DBG_ATTRS in case you add new members 35 * update NUM_DBG_ATTRS in case you add new members
29 */ 36 */
30struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { 37static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL);
38static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL);
39static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL);
40static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL);
41static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL);
42
43int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci)
44{
45 int rc;
46
47 rc = device_create_file(&mci->dev, &dev_attr_dhar);
48 if (rc < 0)
49 return rc;
50 rc = device_create_file(&mci->dev, &dev_attr_dbam);
51 if (rc < 0)
52 return rc;
53 rc = device_create_file(&mci->dev, &dev_attr_topmem);
54 if (rc < 0)
55 return rc;
56 rc = device_create_file(&mci->dev, &dev_attr_topmem2);
57 if (rc < 0)
58 return rc;
59 rc = device_create_file(&mci->dev, &dev_attr_dram_hole);
60 if (rc < 0)
61 return rc;
31 62
32 { 63 return 0;
33 .attr = { 64}
34 .name = "dhar", 65
35 .mode = (S_IRUGO) 66void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci)
36 }, 67{
37 .show = amd64_dhar_show, 68 device_remove_file(&mci->dev, &dev_attr_dhar);
38 .store = NULL, 69 device_remove_file(&mci->dev, &dev_attr_dbam);
39 }, 70 device_remove_file(&mci->dev, &dev_attr_topmem);
40 { 71 device_remove_file(&mci->dev, &dev_attr_topmem2);
41 .attr = { 72 device_remove_file(&mci->dev, &dev_attr_dram_hole);
42 .name = "dbam", 73}
43 .mode = (S_IRUGO)
44 },
45 .show = amd64_dbam0_show,
46 .store = NULL,
47 },
48 {
49 .attr = {
50 .name = "topmem",
51 .mode = (S_IRUGO)
52 },
53 .show = amd64_top_mem_show,
54 .store = NULL,
55 },
56 {
57 .attr = {
58 .name = "topmem2",
59 .mode = (S_IRUGO)
60 },
61 .show = amd64_top_mem2_show,
62 .store = NULL,
63 },
64 {
65 .attr = {
66 .name = "dram_hole",
67 .mode = (S_IRUGO)
68 },
69 .show = amd64_hole_show,
70 .store = NULL,
71 },
72};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 303f10e03dda..53d972e00dfb 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,7 +1,10 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) 3static ssize_t amd64_inject_section_show(struct device *dev,
4 struct device_attribute *mattr,
5 char *buf)
4{ 6{
7 struct mem_ctl_info *mci = to_mci(dev);
5 struct amd64_pvt *pvt = mci->pvt_info; 8 struct amd64_pvt *pvt = mci->pvt_info;
6 return sprintf(buf, "0x%x\n", pvt->injection.section); 9 return sprintf(buf, "0x%x\n", pvt->injection.section);
7} 10}
@@ -12,9 +15,11 @@ static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
12 * 15 *
13 * range: 0..3 16 * range: 0..3
14 */ 17 */
15static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, 18static ssize_t amd64_inject_section_store(struct device *dev,
19 struct device_attribute *mattr,
16 const char *data, size_t count) 20 const char *data, size_t count)
17{ 21{
22 struct mem_ctl_info *mci = to_mci(dev);
18 struct amd64_pvt *pvt = mci->pvt_info; 23 struct amd64_pvt *pvt = mci->pvt_info;
19 unsigned long value; 24 unsigned long value;
20 int ret = 0; 25 int ret = 0;
@@ -33,8 +38,11 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
33 return ret; 38 return ret;
34} 39}
35 40
36static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) 41static ssize_t amd64_inject_word_show(struct device *dev,
42 struct device_attribute *mattr,
43 char *buf)
37{ 44{
45 struct mem_ctl_info *mci = to_mci(dev);
38 struct amd64_pvt *pvt = mci->pvt_info; 46 struct amd64_pvt *pvt = mci->pvt_info;
39 return sprintf(buf, "0x%x\n", pvt->injection.word); 47 return sprintf(buf, "0x%x\n", pvt->injection.word);
40} 48}
@@ -45,9 +53,11 @@ static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
45 * 53 *
46 * range: 0..8 54 * range: 0..8
47 */ 55 */
48static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, 56static ssize_t amd64_inject_word_store(struct device *dev,
49 const char *data, size_t count) 57 struct device_attribute *mattr,
58 const char *data, size_t count)
50{ 59{
60 struct mem_ctl_info *mci = to_mci(dev);
51 struct amd64_pvt *pvt = mci->pvt_info; 61 struct amd64_pvt *pvt = mci->pvt_info;
52 unsigned long value; 62 unsigned long value;
53 int ret = 0; 63 int ret = 0;
@@ -66,8 +76,11 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
66 return ret; 76 return ret;
67} 77}
68 78
69static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) 79static ssize_t amd64_inject_ecc_vector_show(struct device *dev,
80 struct device_attribute *mattr,
81 char *buf)
70{ 82{
83 struct mem_ctl_info *mci = to_mci(dev);
71 struct amd64_pvt *pvt = mci->pvt_info; 84 struct amd64_pvt *pvt = mci->pvt_info;
72 return sprintf(buf, "0x%x\n", pvt->injection.bit_map); 85 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
73} 86}
@@ -77,9 +90,11 @@ static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
77 * corresponding bit within the error injection word above. When used during a 90 * corresponding bit within the error injection word above. When used during a
78 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits. 91 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
79 */ 92 */
80static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, 93static ssize_t amd64_inject_ecc_vector_store(struct device *dev,
81 const char *data, size_t count) 94 struct device_attribute *mattr,
95 const char *data, size_t count)
82{ 96{
97 struct mem_ctl_info *mci = to_mci(dev);
83 struct amd64_pvt *pvt = mci->pvt_info; 98 struct amd64_pvt *pvt = mci->pvt_info;
84 unsigned long value; 99 unsigned long value;
85 int ret = 0; 100 int ret = 0;
@@ -103,9 +118,11 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
103 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into 118 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
104 * fields needed by the injection registers and read the NB Array Data Port. 119 * fields needed by the injection registers and read the NB Array Data Port.
105 */ 120 */
106static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, 121static ssize_t amd64_inject_read_store(struct device *dev,
107 const char *data, size_t count) 122 struct device_attribute *mattr,
123 const char *data, size_t count)
108{ 124{
125 struct mem_ctl_info *mci = to_mci(dev);
109 struct amd64_pvt *pvt = mci->pvt_info; 126 struct amd64_pvt *pvt = mci->pvt_info;
110 unsigned long value; 127 unsigned long value;
111 u32 section, word_bits; 128 u32 section, word_bits;
@@ -125,7 +142,8 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
125 /* Issue 'word' and 'bit' along with the READ request */ 142 /* Issue 'word' and 'bit' along with the READ request */
126 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); 143 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
127 144
128 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 145 edac_dbg(0, "section=0x%x word_bits=0x%x\n",
146 section, word_bits);
129 147
130 return count; 148 return count;
131 } 149 }
@@ -136,9 +154,11 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
136 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into 154 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
137 * fields needed by the injection registers. 155 * fields needed by the injection registers.
138 */ 156 */
139static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, 157static ssize_t amd64_inject_write_store(struct device *dev,
158 struct device_attribute *mattr,
140 const char *data, size_t count) 159 const char *data, size_t count)
141{ 160{
161 struct mem_ctl_info *mci = to_mci(dev);
142 struct amd64_pvt *pvt = mci->pvt_info; 162 struct amd64_pvt *pvt = mci->pvt_info;
143 unsigned long value; 163 unsigned long value;
144 u32 section, word_bits; 164 u32 section, word_bits;
@@ -158,7 +178,8 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
158 /* Issue 'word' and 'bit' along with the READ request */ 178 /* Issue 'word' and 'bit' along with the READ request */
159 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); 179 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
160 180
161 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 181 edac_dbg(0, "section=0x%x word_bits=0x%x\n",
182 section, word_bits);
162 183
163 return count; 184 return count;
164 } 185 }
@@ -168,46 +189,47 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
168/* 189/*
169 * update NUM_INJ_ATTRS in case you add new members 190 * update NUM_INJ_ATTRS in case you add new members
170 */ 191 */
171struct mcidev_sysfs_attribute amd64_inj_attrs[] = { 192
172 193static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
173 { 194 amd64_inject_section_show, amd64_inject_section_store);
174 .attr = { 195static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
175 .name = "inject_section", 196 amd64_inject_word_show, amd64_inject_word_store);
176 .mode = (S_IRUGO | S_IWUSR) 197static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
177 }, 198 amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
178 .show = amd64_inject_section_show, 199static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR,
179 .store = amd64_inject_section_store, 200 NULL, amd64_inject_write_store);
180 }, 201static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR,
181 { 202 NULL, amd64_inject_read_store);
182 .attr = { 203
183 .name = "inject_word", 204
184 .mode = (S_IRUGO | S_IWUSR) 205int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci)
185 }, 206{
186 .show = amd64_inject_word_show, 207 int rc;
187 .store = amd64_inject_word_store, 208
188 }, 209 rc = device_create_file(&mci->dev, &dev_attr_inject_section);
189 { 210 if (rc < 0)
190 .attr = { 211 return rc;
191 .name = "inject_ecc_vector", 212 rc = device_create_file(&mci->dev, &dev_attr_inject_word);
192 .mode = (S_IRUGO | S_IWUSR) 213 if (rc < 0)
193 }, 214 return rc;
194 .show = amd64_inject_ecc_vector_show, 215 rc = device_create_file(&mci->dev, &dev_attr_inject_ecc_vector);
195 .store = amd64_inject_ecc_vector_store, 216 if (rc < 0)
196 }, 217 return rc;
197 { 218 rc = device_create_file(&mci->dev, &dev_attr_inject_write);
198 .attr = { 219 if (rc < 0)
199 .name = "inject_write", 220 return rc;
200 .mode = (S_IRUGO | S_IWUSR) 221 rc = device_create_file(&mci->dev, &dev_attr_inject_read);
201 }, 222 if (rc < 0)
202 .show = NULL, 223 return rc;
203 .store = amd64_inject_write_store, 224
204 }, 225 return 0;
205 { 226}
206 .attr = { 227
207 .name = "inject_read", 228void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci)
208 .mode = (S_IRUGO | S_IWUSR) 229{
209 }, 230 device_remove_file(&mci->dev, &dev_attr_inject_section);
210 .show = NULL, 231 device_remove_file(&mci->dev, &dev_attr_inject_word);
211 .store = amd64_inject_read_store, 232 device_remove_file(&mci->dev, &dev_attr_inject_ecc_vector);
212 }, 233 device_remove_file(&mci->dev, &dev_attr_inject_write);
213}; 234 device_remove_file(&mci->dev, &dev_attr_inject_read);
235}
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 9774d443fa57..29eeb68a200c 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -105,7 +105,7 @@ static void amd76x_get_error_info(struct mem_ctl_info *mci,
105{ 105{
106 struct pci_dev *pdev; 106 struct pci_dev *pdev;
107 107
108 pdev = to_pci_dev(mci->dev); 108 pdev = to_pci_dev(mci->pdev);
109 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, 109 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
110 &info->ecc_mode_status); 110 &info->ecc_mode_status);
111 111
@@ -145,10 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
145 145
146 if (handle_errors) { 146 if (handle_errors) {
147 row = (info->ecc_mode_status >> 4) & 0xf; 147 row = (info->ecc_mode_status >> 4) & 0xf;
148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
149 mci->csrows[row].first_page, 0, 0, 149 mci->csrows[row]->first_page, 0, 0,
150 row, 0, -1, 150 row, 0, -1,
151 mci->ctl_name, "", NULL); 151 mci->ctl_name, "");
152 } 152 }
153 } 153 }
154 154
@@ -160,10 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
160 160
161 if (handle_errors) { 161 if (handle_errors) {
162 row = info->ecc_mode_status & 0xf; 162 row = info->ecc_mode_status & 0xf;
163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
164 mci->csrows[row].first_page, 0, 0, 164 mci->csrows[row]->first_page, 0, 0,
165 row, 0, -1, 165 row, 0, -1,
166 mci->ctl_name, "", NULL); 166 mci->ctl_name, "");
167 } 167 }
168 } 168 }
169 169
@@ -180,7 +180,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
180static void amd76x_check(struct mem_ctl_info *mci) 180static void amd76x_check(struct mem_ctl_info *mci)
181{ 181{
182 struct amd76x_error_info info; 182 struct amd76x_error_info info;
183 debugf3("%s()\n", __func__); 183 edac_dbg(3, "\n");
184 amd76x_get_error_info(mci, &info); 184 amd76x_get_error_info(mci, &info);
185 amd76x_process_error_info(mci, &info, 1); 185 amd76x_process_error_info(mci, &info, 1);
186} 186}
@@ -194,8 +194,8 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
194 int index; 194 int index;
195 195
196 for (index = 0; index < mci->nr_csrows; index++) { 196 for (index = 0; index < mci->nr_csrows; index++) {
197 csrow = &mci->csrows[index]; 197 csrow = mci->csrows[index];
198 dimm = csrow->channels[0].dimm; 198 dimm = csrow->channels[0]->dimm;
199 199
200 /* find the DRAM Chip Select Base address and mask */ 200 /* find the DRAM Chip Select Base address and mask */
201 pci_read_config_dword(pdev, 201 pci_read_config_dword(pdev,
@@ -241,7 +241,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
241 u32 ems_mode; 241 u32 ems_mode;
242 struct amd76x_error_info discard; 242 struct amd76x_error_info discard;
243 243
244 debugf0("%s()\n", __func__); 244 edac_dbg(0, "\n");
245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); 245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
246 ems_mode = (ems >> 10) & 0x3; 246 ems_mode = (ems >> 10) & 0x3;
247 247
@@ -256,8 +256,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
256 if (mci == NULL) 256 if (mci == NULL)
257 return -ENOMEM; 257 return -ENOMEM;
258 258
259 debugf0("%s(): mci = %p\n", __func__, mci); 259 edac_dbg(0, "mci = %p\n", mci);
260 mci->dev = &pdev->dev; 260 mci->pdev = &pdev->dev;
261 mci->mtype_cap = MEM_FLAG_RDDR; 261 mci->mtype_cap = MEM_FLAG_RDDR;
262 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 262 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
263 mci->edac_cap = ems_mode ? 263 mci->edac_cap = ems_mode ?
@@ -276,7 +276,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
276 * type of memory controller. The ID is therefore hardcoded to 0. 276 * type of memory controller. The ID is therefore hardcoded to 0.
277 */ 277 */
278 if (edac_mc_add_mc(mci)) { 278 if (edac_mc_add_mc(mci)) {
279 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 279 edac_dbg(3, "failed edac_mc_add_mc()\n");
280 goto fail; 280 goto fail;
281 } 281 }
282 282
@@ -292,7 +292,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
292 } 292 }
293 293
294 /* get this far and it's successful */ 294 /* get this far and it's successful */
295 debugf3("%s(): success\n", __func__); 295 edac_dbg(3, "success\n");
296 return 0; 296 return 0;
297 297
298fail: 298fail:
@@ -304,7 +304,7 @@ fail:
304static int __devinit amd76x_init_one(struct pci_dev *pdev, 304static int __devinit amd76x_init_one(struct pci_dev *pdev,
305 const struct pci_device_id *ent) 305 const struct pci_device_id *ent)
306{ 306{
307 debugf0("%s()\n", __func__); 307 edac_dbg(0, "\n");
308 308
309 /* don't need to call pci_enable_device() */ 309 /* don't need to call pci_enable_device() */
310 return amd76x_probe1(pdev, ent->driver_data); 310 return amd76x_probe1(pdev, ent->driver_data);
@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
322{ 322{
323 struct mem_ctl_info *mci; 323 struct mem_ctl_info *mci;
324 324
325 debugf0("%s()\n", __func__); 325 edac_dbg(0, "\n");
326 326
327 if (amd76x_pci) 327 if (amd76x_pci)
328 edac_pci_release_generic_ctl(amd76x_pci); 328 edac_pci_release_generic_ctl(amd76x_pci);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 69ee6aab5c71..a1bbd8edd257 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -33,10 +33,10 @@ struct cell_edac_priv
33static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) 33static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
34{ 34{
35 struct cell_edac_priv *priv = mci->pvt_info; 35 struct cell_edac_priv *priv = mci->pvt_info;
36 struct csrow_info *csrow = &mci->csrows[0]; 36 struct csrow_info *csrow = mci->csrows[0];
37 unsigned long address, pfn, offset, syndrome; 37 unsigned long address, pfn, offset, syndrome;
38 38
39 dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", 39 dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
40 priv->node, chan, ar); 40 priv->node, chan, ar);
41 41
42 /* Address decoding is likely a bit bogus, to dbl check */ 42 /* Address decoding is likely a bit bogus, to dbl check */
@@ -48,18 +48,18 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
48 syndrome = (ar & 0x000000001fe00000ul) >> 21; 48 syndrome = (ar & 0x000000001fe00000ul) >> 21;
49 49
50 /* TODO: Decoding of the error address */ 50 /* TODO: Decoding of the error address */
51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
52 csrow->first_page + pfn, offset, syndrome, 52 csrow->first_page + pfn, offset, syndrome,
53 0, chan, -1, "", "", NULL); 53 0, chan, -1, "", "");
54} 54}
55 55
56static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) 56static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
57{ 57{
58 struct cell_edac_priv *priv = mci->pvt_info; 58 struct cell_edac_priv *priv = mci->pvt_info;
59 struct csrow_info *csrow = &mci->csrows[0]; 59 struct csrow_info *csrow = mci->csrows[0];
60 unsigned long address, pfn, offset; 60 unsigned long address, pfn, offset;
61 61
62 dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", 62 dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
63 priv->node, chan, ar); 63 priv->node, chan, ar);
64 64
65 /* Address decoding is likely a bit bogus, to dbl check */ 65 /* Address decoding is likely a bit bogus, to dbl check */
@@ -70,9 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
70 offset = address & ~PAGE_MASK; 70 offset = address & ~PAGE_MASK;
71 71
72 /* TODO: Decoding of the error address */ 72 /* TODO: Decoding of the error address */
73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
74 csrow->first_page + pfn, offset, 0, 74 csrow->first_page + pfn, offset, 0,
75 0, chan, -1, "", "", NULL); 75 0, chan, -1, "", "");
76} 76}
77 77
78static void cell_edac_check(struct mem_ctl_info *mci) 78static void cell_edac_check(struct mem_ctl_info *mci)
@@ -83,7 +83,7 @@ static void cell_edac_check(struct mem_ctl_info *mci)
83 fir = in_be64(&priv->regs->mic_fir); 83 fir = in_be64(&priv->regs->mic_fir);
84#ifdef DEBUG 84#ifdef DEBUG
85 if (fir != priv->prev_fir) { 85 if (fir != priv->prev_fir) {
86 dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir); 86 dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
87 priv->prev_fir = fir; 87 priv->prev_fir = fir;
88 } 88 }
89#endif 89#endif
@@ -119,14 +119,14 @@ static void cell_edac_check(struct mem_ctl_info *mci)
119 mb(); /* sync up */ 119 mb(); /* sync up */
120#ifdef DEBUG 120#ifdef DEBUG
121 fir = in_be64(&priv->regs->mic_fir); 121 fir = in_be64(&priv->regs->mic_fir);
122 dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir); 122 dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
123#endif 123#endif
124 } 124 }
125} 125}
126 126
127static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) 127static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
128{ 128{
129 struct csrow_info *csrow = &mci->csrows[0]; 129 struct csrow_info *csrow = mci->csrows[0];
130 struct dimm_info *dimm; 130 struct dimm_info *dimm;
131 struct cell_edac_priv *priv = mci->pvt_info; 131 struct cell_edac_priv *priv = mci->pvt_info;
132 struct device_node *np; 132 struct device_node *np;
@@ -150,12 +150,12 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
150 csrow->last_page = csrow->first_page + nr_pages - 1; 150 csrow->last_page = csrow->first_page + nr_pages - 1;
151 151
152 for (j = 0; j < csrow->nr_channels; j++) { 152 for (j = 0; j < csrow->nr_channels; j++) {
153 dimm = csrow->channels[j].dimm; 153 dimm = csrow->channels[j]->dimm;
154 dimm->mtype = MEM_XDR; 154 dimm->mtype = MEM_XDR;
155 dimm->edac_mode = EDAC_SECDED; 155 dimm->edac_mode = EDAC_SECDED;
156 dimm->nr_pages = nr_pages / csrow->nr_channels; 156 dimm->nr_pages = nr_pages / csrow->nr_channels;
157 } 157 }
158 dev_dbg(mci->dev, 158 dev_dbg(mci->pdev,
159 "Initialized on node %d, chanmask=0x%x," 159 "Initialized on node %d, chanmask=0x%x,"
160 " first_page=0x%lx, nr_pages=0x%x\n", 160 " first_page=0x%lx, nr_pages=0x%x\n",
161 priv->node, priv->chanmask, 161 priv->node, priv->chanmask,
@@ -212,7 +212,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
212 priv->regs = regs; 212 priv->regs = regs;
213 priv->node = pdev->id; 213 priv->node = pdev->id;
214 priv->chanmask = chanmask; 214 priv->chanmask = chanmask;
215 mci->dev = &pdev->dev; 215 mci->pdev = &pdev->dev;
216 mci->mtype_cap = MEM_FLAG_XDR; 216 mci->mtype_cap = MEM_FLAG_XDR;
217 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 217 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
218 mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED; 218 mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index e22030a9de66..c2ef13495873 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -316,13 +316,12 @@ static void get_total_mem(struct cpc925_mc_pdata *pdata)
316 reg += aw; 316 reg += aw;
317 size = of_read_number(reg, sw); 317 size = of_read_number(reg, sw);
318 reg += sw; 318 reg += sw;
319 debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, 319 edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
320 start, size);
321 pdata->total_mem += size; 320 pdata->total_mem += size;
322 } while (reg < reg_end); 321 } while (reg < reg_end);
323 322
324 of_node_put(np); 323 of_node_put(np);
325 debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); 324 edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
326} 325}
327 326
328static void cpc925_init_csrows(struct mem_ctl_info *mci) 327static void cpc925_init_csrows(struct mem_ctl_info *mci)
@@ -330,8 +329,9 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
330 struct cpc925_mc_pdata *pdata = mci->pvt_info; 329 struct cpc925_mc_pdata *pdata = mci->pvt_info;
331 struct csrow_info *csrow; 330 struct csrow_info *csrow;
332 struct dimm_info *dimm; 331 struct dimm_info *dimm;
332 enum dev_type dtype;
333 int index, j; 333 int index, j;
334 u32 mbmr, mbbar, bba; 334 u32 mbmr, mbbar, bba, grain;
335 unsigned long row_size, nr_pages, last_nr_pages = 0; 335 unsigned long row_size, nr_pages, last_nr_pages = 0;
336 336
337 get_total_mem(pdata); 337 get_total_mem(pdata);
@@ -347,7 +347,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
347 if (bba == 0) 347 if (bba == 0)
348 continue; /* not populated */ 348 continue; /* not populated */
349 349
350 csrow = &mci->csrows[index]; 350 csrow = mci->csrows[index];
351 351
352 row_size = bba * (1UL << 28); /* 256M */ 352 row_size = bba * (1UL << 28); /* 256M */
353 csrow->first_page = last_nr_pages; 353 csrow->first_page = last_nr_pages;
@@ -355,37 +355,36 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
355 csrow->last_page = csrow->first_page + nr_pages - 1; 355 csrow->last_page = csrow->first_page + nr_pages - 1;
356 last_nr_pages = csrow->last_page + 1; 356 last_nr_pages = csrow->last_page + 1;
357 357
358 switch (csrow->nr_channels) {
359 case 1: /* Single channel */
360 grain = 32; /* four-beat burst of 32 bytes */
361 break;
362 case 2: /* Dual channel */
363 default:
364 grain = 64; /* four-beat burst of 64 bytes */
365 break;
366 }
367 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
368 case 6: /* 0110, no way to differentiate X8 VS X16 */
369 case 5: /* 0101 */
370 case 8: /* 1000 */
371 dtype = DEV_X16;
372 break;
373 case 7: /* 0111 */
374 case 9: /* 1001 */
375 dtype = DEV_X8;
376 break;
377 default:
378 dtype = DEV_UNKNOWN;
379 break;
380 }
358 for (j = 0; j < csrow->nr_channels; j++) { 381 for (j = 0; j < csrow->nr_channels; j++) {
359 dimm = csrow->channels[j].dimm; 382 dimm = csrow->channels[j]->dimm;
360
361 dimm->nr_pages = nr_pages / csrow->nr_channels; 383 dimm->nr_pages = nr_pages / csrow->nr_channels;
362 dimm->mtype = MEM_RDDR; 384 dimm->mtype = MEM_RDDR;
363 dimm->edac_mode = EDAC_SECDED; 385 dimm->edac_mode = EDAC_SECDED;
364 386 dimm->grain = grain;
365 switch (csrow->nr_channels) { 387 dimm->dtype = dtype;
366 case 1: /* Single channel */
367 dimm->grain = 32; /* four-beat burst of 32 bytes */
368 break;
369 case 2: /* Dual channel */
370 default:
371 dimm->grain = 64; /* four-beat burst of 64 bytes */
372 break;
373 }
374
375 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
376 case 6: /* 0110, no way to differentiate X8 VS X16 */
377 case 5: /* 0101 */
378 case 8: /* 1000 */
379 dimm->dtype = DEV_X16;
380 break;
381 case 7: /* 0111 */
382 case 9: /* 1001 */
383 dimm->dtype = DEV_X8;
384 break;
385 default:
386 dimm->dtype = DEV_UNKNOWN;
387 break;
388 }
389 } 388 }
390 } 389 }
391} 390}
@@ -463,7 +462,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
463 *csrow = rank; 462 *csrow = rank;
464 463
465#ifdef CONFIG_EDAC_DEBUG 464#ifdef CONFIG_EDAC_DEBUG
466 if (mci->csrows[rank].first_page == 0) { 465 if (mci->csrows[rank]->first_page == 0) {
467 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " 466 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
468 "non-populated csrow, broken hardware?\n"); 467 "non-populated csrow, broken hardware?\n");
469 return; 468 return;
@@ -471,7 +470,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
471#endif 470#endif
472 471
473 /* Revert csrow number */ 472 /* Revert csrow number */
474 pa = mci->csrows[rank].first_page << PAGE_SHIFT; 473 pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
475 474
476 /* Revert column address */ 475 /* Revert column address */
477 col += bcnt; 476 col += bcnt;
@@ -512,7 +511,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
512 *offset = pa & (PAGE_SIZE - 1); 511 *offset = pa & (PAGE_SIZE - 1);
513 *pfn = pa >> PAGE_SHIFT; 512 *pfn = pa >> PAGE_SHIFT;
514 513
515 debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); 514 edac_dbg(0, "ECC physical address 0x%lx\n", pa);
516} 515}
517 516
518static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) 517static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
@@ -555,18 +554,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
555 if (apiexcp & CECC_EXCP_DETECTED) { 554 if (apiexcp & CECC_EXCP_DETECTED) {
556 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); 555 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
557 channel = cpc925_mc_find_channel(mci, syndrome); 556 channel = cpc925_mc_find_channel(mci, syndrome);
558 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 557 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
559 pfn, offset, syndrome, 558 pfn, offset, syndrome,
560 csrow, channel, -1, 559 csrow, channel, -1,
561 mci->ctl_name, "", NULL); 560 mci->ctl_name, "");
562 } 561 }
563 562
564 if (apiexcp & UECC_EXCP_DETECTED) { 563 if (apiexcp & UECC_EXCP_DETECTED) {
565 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 564 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
566 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 565 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
567 pfn, offset, 0, 566 pfn, offset, 0,
568 csrow, -1, -1, 567 csrow, -1, -1,
569 mci->ctl_name, "", NULL); 568 mci->ctl_name, "");
570 } 569 }
571 570
572 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); 571 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -852,8 +851,8 @@ static void cpc925_add_edac_devices(void __iomem *vbase)
852 goto err2; 851 goto err2;
853 } 852 }
854 853
855 debugf0("%s: Successfully added edac device for %s\n", 854 edac_dbg(0, "Successfully added edac device for %s\n",
856 __func__, dev_info->ctl_name); 855 dev_info->ctl_name);
857 856
858 continue; 857 continue;
859 858
@@ -884,8 +883,8 @@ static void cpc925_del_edac_devices(void)
884 if (dev_info->exit) 883 if (dev_info->exit)
885 dev_info->exit(dev_info); 884 dev_info->exit(dev_info);
886 885
887 debugf0("%s: Successfully deleted edac device for %s\n", 886 edac_dbg(0, "Successfully deleted edac device for %s\n",
888 __func__, dev_info->ctl_name); 887 dev_info->ctl_name);
889 } 888 }
890} 889}
891 890
@@ -900,7 +899,7 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
900 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); 899 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
901 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; 900 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
902 901
903 debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); 902 edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
904 903
905 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || 904 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
906 (si == 0)) { 905 (si == 0)) {
@@ -928,8 +927,7 @@ static int cpc925_mc_get_channels(void __iomem *vbase)
928 ((mbcr & MBCR_64BITBUS_MASK) == 0)) 927 ((mbcr & MBCR_64BITBUS_MASK) == 0))
929 dual = 1; 928 dual = 1;
930 929
931 debugf0("%s: %s channel\n", __func__, 930 edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
932 (dual > 0) ? "Dual" : "Single");
933 931
934 return dual; 932 return dual;
935} 933}
@@ -944,7 +942,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
944 struct resource *r; 942 struct resource *r;
945 int res = 0, nr_channels; 943 int res = 0, nr_channels;
946 944
947 debugf0("%s: %s platform device found!\n", __func__, pdev->name); 945 edac_dbg(0, "%s platform device found!\n", pdev->name);
948 946
949 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { 947 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
950 res = -ENOMEM; 948 res = -ENOMEM;
@@ -995,7 +993,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
995 pdata->edac_idx = edac_mc_idx++; 993 pdata->edac_idx = edac_mc_idx++;
996 pdata->name = pdev->name; 994 pdata->name = pdev->name;
997 995
998 mci->dev = &pdev->dev; 996 mci->pdev = &pdev->dev;
999 platform_set_drvdata(pdev, mci); 997 platform_set_drvdata(pdev, mci);
1000 mci->dev_name = dev_name(&pdev->dev); 998 mci->dev_name = dev_name(&pdev->dev);
1001 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 999 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
@@ -1026,7 +1024,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
1026 cpc925_add_edac_devices(vbase); 1024 cpc925_add_edac_devices(vbase);
1027 1025
1028 /* get this far and it's successful */ 1026 /* get this far and it's successful */
1029 debugf0("%s: success\n", __func__); 1027 edac_dbg(0, "success\n");
1030 1028
1031 res = 0; 1029 res = 0;
1032 goto out; 1030 goto out;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 3186512c9739..a5ed6b795fd4 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -309,7 +309,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
309 u32 remap; 309 u32 remap;
310 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 310 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
311 311
312 debugf3("%s()\n", __func__); 312 edac_dbg(3, "\n");
313 313
314 if (page < pvt->tolm) 314 if (page < pvt->tolm)
315 return page; 315 return page;
@@ -335,7 +335,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
335 int i; 335 int i;
336 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 336 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
337 337
338 debugf3("%s()\n", __func__); 338 edac_dbg(3, "\n");
339 339
340 /* convert the addr to 4k page */ 340 /* convert the addr to 4k page */
341 page = sec1_add >> (PAGE_SHIFT - 4); 341 page = sec1_add >> (PAGE_SHIFT - 4);
@@ -371,10 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
371 channel = !(error_one & 1); 371 channel = !(error_one & 1);
372 372
373 /* e752x mc reads 34:6 of the DRAM linear address */ 373 /* e752x mc reads 34:6 of the DRAM linear address */
374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
375 page, offset_in_page(sec1_add << 4), sec1_syndrome, 375 page, offset_in_page(sec1_add << 4), sec1_syndrome,
376 row, channel, -1, 376 row, channel, -1,
377 "e752x CE", "", NULL); 377 "e752x CE", "");
378} 378}
379 379
380static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 380static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -394,7 +394,7 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
394 int row; 394 int row;
395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
396 396
397 debugf3("%s()\n", __func__); 397 edac_dbg(3, "\n");
398 398
399 if (error_one & 0x0202) { 399 if (error_one & 0x0202) {
400 error_2b = ded_add; 400 error_2b = ded_add;
@@ -408,11 +408,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
408 edac_mc_find_csrow_by_page(mci, block_page); 408 edac_mc_find_csrow_by_page(mci, block_page);
409 409
410 /* e752x mc reads 34:6 of the DRAM linear address */ 410 /* e752x mc reads 34:6 of the DRAM linear address */
411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
412 block_page, 412 block_page,
413 offset_in_page(error_2b << 4), 0, 413 offset_in_page(error_2b << 4), 0,
414 row, -1, -1, 414 row, -1, -1,
415 "e752x UE from Read", "", NULL); 415 "e752x UE from Read", "");
416 416
417 } 417 }
418 if (error_one & 0x0404) { 418 if (error_one & 0x0404) {
@@ -427,11 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
427 edac_mc_find_csrow_by_page(mci, block_page); 427 edac_mc_find_csrow_by_page(mci, block_page);
428 428
429 /* e752x mc reads 34:6 of the DRAM linear address */ 429 /* e752x mc reads 34:6 of the DRAM linear address */
430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
431 block_page, 431 block_page,
432 offset_in_page(error_2b << 4), 0, 432 offset_in_page(error_2b << 4), 0,
433 row, -1, -1, 433 row, -1, -1,
434 "e752x UE from Scruber", "", NULL); 434 "e752x UE from Scruber", "");
435 } 435 }
436} 436}
437 437
@@ -453,10 +453,10 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
453 if (!handle_error) 453 if (!handle_error)
454 return; 454 return;
455 455
456 debugf3("%s()\n", __func__); 456 edac_dbg(3, "\n");
457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
458 -1, -1, -1, 458 -1, -1, -1,
459 "e752x UE log memory write", "", NULL); 459 "e752x UE log memory write", "");
460} 460}
461 461
462static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, 462static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -982,7 +982,7 @@ static void e752x_check(struct mem_ctl_info *mci)
982{ 982{
983 struct e752x_error_info info; 983 struct e752x_error_info info;
984 984
985 debugf3("%s()\n", __func__); 985 edac_dbg(3, "\n");
986 e752x_get_error_info(mci, &info); 986 e752x_get_error_info(mci, &info);
987 e752x_process_error_info(mci, &info, 1); 987 e752x_process_error_info(mci, &info, 1);
988} 988}
@@ -1069,6 +1069,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1069 u16 ddrcsr) 1069 u16 ddrcsr)
1070{ 1070{
1071 struct csrow_info *csrow; 1071 struct csrow_info *csrow;
1072 enum edac_type edac_mode;
1072 unsigned long last_cumul_size; 1073 unsigned long last_cumul_size;
1073 int index, mem_dev, drc_chan; 1074 int index, mem_dev, drc_chan;
1074 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ 1075 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
@@ -1095,14 +1096,13 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1095 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 1096 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1096 /* mem_dev 0=x8, 1=x4 */ 1097 /* mem_dev 0=x8, 1=x4 */
1097 mem_dev = (dra >> (index * 4 + 2)) & 0x3; 1098 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1098 csrow = &mci->csrows[remap_csrow_index(mci, index)]; 1099 csrow = mci->csrows[remap_csrow_index(mci, index)];
1099 1100
1100 mem_dev = (mem_dev == 2); 1101 mem_dev = (mem_dev == 2);
1101 pci_read_config_byte(pdev, E752X_DRB + index, &value); 1102 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1102 /* convert a 128 or 64 MiB DRB to a page size. */ 1103 /* convert a 128 or 64 MiB DRB to a page size. */
1103 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 1104 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1104 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 1105 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
1105 cumul_size);
1106 if (cumul_size == last_cumul_size) 1106 if (cumul_size == last_cumul_size)
1107 continue; /* not populated */ 1107 continue; /* not populated */
1108 1108
@@ -1111,29 +1111,29 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1111 nr_pages = cumul_size - last_cumul_size; 1111 nr_pages = cumul_size - last_cumul_size;
1112 last_cumul_size = cumul_size; 1112 last_cumul_size = cumul_size;
1113 1113
1114 /*
1115 * if single channel or x8 devices then SECDED
1116 * if dual channel and x4 then S4ECD4ED
1117 */
1118 if (drc_ddim) {
1119 if (drc_chan && mem_dev) {
1120 edac_mode = EDAC_S4ECD4ED;
1121 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1122 } else {
1123 edac_mode = EDAC_SECDED;
1124 mci->edac_cap |= EDAC_FLAG_SECDED;
1125 }
1126 } else
1127 edac_mode = EDAC_NONE;
1114 for (i = 0; i < csrow->nr_channels; i++) { 1128 for (i = 0; i < csrow->nr_channels; i++) {
1115 struct dimm_info *dimm = csrow->channels[i].dimm; 1129 struct dimm_info *dimm = csrow->channels[i]->dimm;
1116 1130
1117 debugf3("Initializing rank at (%i,%i)\n", index, i); 1131 edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
1118 dimm->nr_pages = nr_pages / csrow->nr_channels; 1132 dimm->nr_pages = nr_pages / csrow->nr_channels;
1119 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 1133 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1120 dimm->mtype = MEM_RDDR; /* only one type supported */ 1134 dimm->mtype = MEM_RDDR; /* only one type supported */
1121 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; 1135 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1122 1136 dimm->edac_mode = edac_mode;
1123 /*
1124 * if single channel or x8 devices then SECDED
1125 * if dual channel and x4 then S4ECD4ED
1126 */
1127 if (drc_ddim) {
1128 if (drc_chan && mem_dev) {
1129 dimm->edac_mode = EDAC_S4ECD4ED;
1130 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1131 } else {
1132 dimm->edac_mode = EDAC_SECDED;
1133 mci->edac_cap |= EDAC_FLAG_SECDED;
1134 }
1135 } else
1136 dimm->edac_mode = EDAC_NONE;
1137 } 1137 }
1138 } 1138 }
1139} 1139}
@@ -1269,8 +1269,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1269 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 1269 int drc_chan; /* Number of channels 0=1chan,1=2chan */
1270 struct e752x_error_info discard; 1270 struct e752x_error_info discard;
1271 1271
1272 debugf0("%s(): mci\n", __func__); 1272 edac_dbg(0, "mci\n");
1273 debugf0("Starting Probe1\n"); 1273 edac_dbg(0, "Starting Probe1\n");
1274 1274
1275 /* check to see if device 0 function 1 is enabled; if it isn't, we 1275 /* check to see if device 0 function 1 is enabled; if it isn't, we
1276 * assume the BIOS has reserved it for a reason and is expecting 1276 * assume the BIOS has reserved it for a reason and is expecting
@@ -1300,7 +1300,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1300 if (mci == NULL) 1300 if (mci == NULL)
1301 return -ENOMEM; 1301 return -ENOMEM;
1302 1302
1303 debugf3("%s(): init mci\n", __func__); 1303 edac_dbg(3, "init mci\n");
1304 mci->mtype_cap = MEM_FLAG_RDDR; 1304 mci->mtype_cap = MEM_FLAG_RDDR;
1305 /* 3100 IMCH supports SECDEC only */ 1305 /* 3100 IMCH supports SECDEC only */
1306 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : 1306 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
@@ -1308,9 +1308,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1308 /* FIXME - what if different memory types are in different csrows? */ 1308 /* FIXME - what if different memory types are in different csrows? */
1309 mci->mod_name = EDAC_MOD_STR; 1309 mci->mod_name = EDAC_MOD_STR;
1310 mci->mod_ver = E752X_REVISION; 1310 mci->mod_ver = E752X_REVISION;
1311 mci->dev = &pdev->dev; 1311 mci->pdev = &pdev->dev;
1312 1312
1313 debugf3("%s(): init pvt\n", __func__); 1313 edac_dbg(3, "init pvt\n");
1314 pvt = (struct e752x_pvt *)mci->pvt_info; 1314 pvt = (struct e752x_pvt *)mci->pvt_info;
1315 pvt->dev_info = &e752x_devs[dev_idx]; 1315 pvt->dev_info = &e752x_devs[dev_idx];
1316 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); 1316 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
@@ -1320,7 +1320,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1320 return -ENODEV; 1320 return -ENODEV;
1321 } 1321 }
1322 1322
1323 debugf3("%s(): more mci init\n", __func__); 1323 edac_dbg(3, "more mci init\n");
1324 mci->ctl_name = pvt->dev_info->ctl_name; 1324 mci->ctl_name = pvt->dev_info->ctl_name;
1325 mci->dev_name = pci_name(pdev); 1325 mci->dev_name = pci_name(pdev);
1326 mci->edac_check = e752x_check; 1326 mci->edac_check = e752x_check;
@@ -1342,7 +1342,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1342 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ 1342 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1343 else 1343 else
1344 mci->edac_cap |= EDAC_FLAG_NONE; 1344 mci->edac_cap |= EDAC_FLAG_NONE;
1345 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 1345 edac_dbg(3, "tolm, remapbase, remaplimit\n");
1346 1346
1347 /* load the top of low memory, remap base, and remap limit vars */ 1347 /* load the top of low memory, remap base, and remap limit vars */
1348 pci_read_config_word(pdev, E752X_TOLM, &pci_data); 1348 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
@@ -1359,7 +1359,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1359 * type of memory controller. The ID is therefore hardcoded to 0. 1359 * type of memory controller. The ID is therefore hardcoded to 0.
1360 */ 1360 */
1361 if (edac_mc_add_mc(mci)) { 1361 if (edac_mc_add_mc(mci)) {
1362 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1362 edac_dbg(3, "failed edac_mc_add_mc()\n");
1363 goto fail; 1363 goto fail;
1364 } 1364 }
1365 1365
@@ -1377,7 +1377,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1377 } 1377 }
1378 1378
1379 /* get this far and it's successful */ 1379 /* get this far and it's successful */
1380 debugf3("%s(): success\n", __func__); 1380 edac_dbg(3, "success\n");
1381 return 0; 1381 return 0;
1382 1382
1383fail: 1383fail:
@@ -1393,7 +1393,7 @@ fail:
1393static int __devinit e752x_init_one(struct pci_dev *pdev, 1393static int __devinit e752x_init_one(struct pci_dev *pdev,
1394 const struct pci_device_id *ent) 1394 const struct pci_device_id *ent)
1395{ 1395{
1396 debugf0("%s()\n", __func__); 1396 edac_dbg(0, "\n");
1397 1397
1398 /* wake up and enable device */ 1398 /* wake up and enable device */
1399 if (pci_enable_device(pdev) < 0) 1399 if (pci_enable_device(pdev) < 0)
@@ -1407,7 +1407,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
1407 struct mem_ctl_info *mci; 1407 struct mem_ctl_info *mci;
1408 struct e752x_pvt *pvt; 1408 struct e752x_pvt *pvt;
1409 1409
1410 debugf0("%s()\n", __func__); 1410 edac_dbg(0, "\n");
1411 1411
1412 if (e752x_pci) 1412 if (e752x_pci)
1413 edac_pci_release_generic_ctl(e752x_pci); 1413 edac_pci_release_generic_ctl(e752x_pci);
@@ -1453,7 +1453,7 @@ static int __init e752x_init(void)
1453{ 1453{
1454 int pci_rc; 1454 int pci_rc;
1455 1455
1456 debugf3("%s()\n", __func__); 1456 edac_dbg(3, "\n");
1457 1457
1458 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1458 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1459 opstate_init(); 1459 opstate_init();
@@ -1464,7 +1464,7 @@ static int __init e752x_init(void)
1464 1464
1465static void __exit e752x_exit(void) 1465static void __exit e752x_exit(void)
1466{ 1466{
1467 debugf3("%s()\n", __func__); 1467 edac_dbg(3, "\n");
1468 pci_unregister_driver(&e752x_driver); 1468 pci_unregister_driver(&e752x_driver);
1469} 1469}
1470 1470
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 9a9c1a546797..9ff57f361a43 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -166,7 +166,7 @@ static const struct e7xxx_dev_info e7xxx_devs[] = {
166/* FIXME - is this valid for both SECDED and S4ECD4ED? */ 166/* FIXME - is this valid for both SECDED and S4ECD4ED? */
167static inline int e7xxx_find_channel(u16 syndrome) 167static inline int e7xxx_find_channel(u16 syndrome)
168{ 168{
169 debugf3("%s()\n", __func__); 169 edac_dbg(3, "\n");
170 170
171 if ((syndrome & 0xff00) == 0) 171 if ((syndrome & 0xff00) == 0)
172 return 0; 172 return 0;
@@ -186,7 +186,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
186 u32 remap; 186 u32 remap;
187 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; 187 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
188 188
189 debugf3("%s()\n", __func__); 189 edac_dbg(3, "\n");
190 190
191 if ((page < pvt->tolm) || 191 if ((page < pvt->tolm) ||
192 ((page >= 0x100000) && (page < pvt->remapbase))) 192 ((page >= 0x100000) && (page < pvt->remapbase)))
@@ -208,7 +208,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
208 int row; 208 int row;
209 int channel; 209 int channel;
210 210
211 debugf3("%s()\n", __func__); 211 edac_dbg(3, "\n");
212 /* read the error address */ 212 /* read the error address */
213 error_1b = info->dram_celog_add; 213 error_1b = info->dram_celog_add;
214 /* FIXME - should use PAGE_SHIFT */ 214 /* FIXME - should use PAGE_SHIFT */
@@ -219,15 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
219 row = edac_mc_find_csrow_by_page(mci, page); 219 row = edac_mc_find_csrow_by_page(mci, page);
220 /* convert syndrome to channel */ 220 /* convert syndrome to channel */
221 channel = e7xxx_find_channel(syndrome); 221 channel = e7xxx_find_channel(syndrome);
222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome, 222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome,
223 row, channel, -1, "e7xxx CE", "", NULL); 223 row, channel, -1, "e7xxx CE", "");
224} 224}
225 225
226static void process_ce_no_info(struct mem_ctl_info *mci) 226static void process_ce_no_info(struct mem_ctl_info *mci)
227{ 227{
228 debugf3("%s()\n", __func__); 228 edac_dbg(3, "\n");
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
230 "e7xxx CE log register overflow", "", NULL); 230 "e7xxx CE log register overflow", "");
231} 231}
232 232
233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -235,23 +235,23 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
235 u32 error_2b, block_page; 235 u32 error_2b, block_page;
236 int row; 236 int row;
237 237
238 debugf3("%s()\n", __func__); 238 edac_dbg(3, "\n");
239 /* read the error address */ 239 /* read the error address */
240 error_2b = info->dram_uelog_add; 240 error_2b = info->dram_uelog_add;
241 /* FIXME - should use PAGE_SHIFT */ 241 /* FIXME - should use PAGE_SHIFT */
242 block_page = error_2b >> 6; /* convert to 4k address */ 242 block_page = error_2b >> 6; /* convert to 4k address */
243 row = edac_mc_find_csrow_by_page(mci, block_page); 243 row = edac_mc_find_csrow_by_page(mci, block_page);
244 244
245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0, 245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, 0, 0,
246 row, -1, -1, "e7xxx UE", "", NULL); 246 row, -1, -1, "e7xxx UE", "");
247} 247}
248 248
249static void process_ue_no_info(struct mem_ctl_info *mci) 249static void process_ue_no_info(struct mem_ctl_info *mci)
250{ 250{
251 debugf3("%s()\n", __func__); 251 edac_dbg(3, "\n");
252 252
253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, 253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
254 "e7xxx UE log register overflow", "", NULL); 254 "e7xxx UE log register overflow", "");
255} 255}
256 256
257static void e7xxx_get_error_info(struct mem_ctl_info *mci, 257static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -334,7 +334,7 @@ static void e7xxx_check(struct mem_ctl_info *mci)
334{ 334{
335 struct e7xxx_error_info info; 335 struct e7xxx_error_info info;
336 336
337 debugf3("%s()\n", __func__); 337 edac_dbg(3, "\n");
338 e7xxx_get_error_info(mci, &info); 338 e7xxx_get_error_info(mci, &info);
339 e7xxx_process_error_info(mci, &info, 1); 339 e7xxx_process_error_info(mci, &info, 1);
340} 340}
@@ -362,6 +362,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
362 int drc_chan, drc_drbg, drc_ddim, mem_dev; 362 int drc_chan, drc_drbg, drc_ddim, mem_dev;
363 struct csrow_info *csrow; 363 struct csrow_info *csrow;
364 struct dimm_info *dimm; 364 struct dimm_info *dimm;
365 enum edac_type edac_mode;
365 366
366 pci_read_config_dword(pdev, E7XXX_DRA, &dra); 367 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
367 drc_chan = dual_channel_active(drc, dev_idx); 368 drc_chan = dual_channel_active(drc, dev_idx);
@@ -377,13 +378,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
377 for (index = 0; index < mci->nr_csrows; index++) { 378 for (index = 0; index < mci->nr_csrows; index++) {
378 /* mem_dev 0=x8, 1=x4 */ 379 /* mem_dev 0=x8, 1=x4 */
379 mem_dev = (dra >> (index * 4 + 3)) & 0x1; 380 mem_dev = (dra >> (index * 4 + 3)) & 0x1;
380 csrow = &mci->csrows[index]; 381 csrow = mci->csrows[index];
381 382
382 pci_read_config_byte(pdev, E7XXX_DRB + index, &value); 383 pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
383 /* convert a 64 or 32 MiB DRB to a page size. */ 384 /* convert a 64 or 32 MiB DRB to a page size. */
384 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 385 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
385 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 386 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
386 cumul_size);
387 if (cumul_size == last_cumul_size) 387 if (cumul_size == last_cumul_size)
388 continue; /* not populated */ 388 continue; /* not populated */
389 389
@@ -392,28 +392,29 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
392 nr_pages = cumul_size - last_cumul_size; 392 nr_pages = cumul_size - last_cumul_size;
393 last_cumul_size = cumul_size; 393 last_cumul_size = cumul_size;
394 394
395 /*
396 * if single channel or x8 devices then SECDED
397 * if dual channel and x4 then S4ECD4ED
398 */
399 if (drc_ddim) {
400 if (drc_chan && mem_dev) {
401 edac_mode = EDAC_S4ECD4ED;
402 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
403 } else {
404 edac_mode = EDAC_SECDED;
405 mci->edac_cap |= EDAC_FLAG_SECDED;
406 }
407 } else
408 edac_mode = EDAC_NONE;
409
395 for (j = 0; j < drc_chan + 1; j++) { 410 for (j = 0; j < drc_chan + 1; j++) {
396 dimm = csrow->channels[j].dimm; 411 dimm = csrow->channels[j]->dimm;
397 412
398 dimm->nr_pages = nr_pages / (drc_chan + 1); 413 dimm->nr_pages = nr_pages / (drc_chan + 1);
399 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 414 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
400 dimm->mtype = MEM_RDDR; /* only one type supported */ 415 dimm->mtype = MEM_RDDR; /* only one type supported */
401 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; 416 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
402 417 dimm->edac_mode = edac_mode;
403 /*
404 * if single channel or x8 devices then SECDED
405 * if dual channel and x4 then S4ECD4ED
406 */
407 if (drc_ddim) {
408 if (drc_chan && mem_dev) {
409 dimm->edac_mode = EDAC_S4ECD4ED;
410 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
411 } else {
412 dimm->edac_mode = EDAC_SECDED;
413 mci->edac_cap |= EDAC_FLAG_SECDED;
414 }
415 } else
416 dimm->edac_mode = EDAC_NONE;
417 } 418 }
418 } 419 }
419} 420}
@@ -428,7 +429,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
428 int drc_chan; 429 int drc_chan;
429 struct e7xxx_error_info discard; 430 struct e7xxx_error_info discard;
430 431
431 debugf0("%s(): mci\n", __func__); 432 edac_dbg(0, "mci\n");
432 433
433 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 434 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
434 435
@@ -451,15 +452,15 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
451 if (mci == NULL) 452 if (mci == NULL)
452 return -ENOMEM; 453 return -ENOMEM;
453 454
454 debugf3("%s(): init mci\n", __func__); 455 edac_dbg(3, "init mci\n");
455 mci->mtype_cap = MEM_FLAG_RDDR; 456 mci->mtype_cap = MEM_FLAG_RDDR;
456 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | 457 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
457 EDAC_FLAG_S4ECD4ED; 458 EDAC_FLAG_S4ECD4ED;
458 /* FIXME - what if different memory types are in different csrows? */ 459 /* FIXME - what if different memory types are in different csrows? */
459 mci->mod_name = EDAC_MOD_STR; 460 mci->mod_name = EDAC_MOD_STR;
460 mci->mod_ver = E7XXX_REVISION; 461 mci->mod_ver = E7XXX_REVISION;
461 mci->dev = &pdev->dev; 462 mci->pdev = &pdev->dev;
462 debugf3("%s(): init pvt\n", __func__); 463 edac_dbg(3, "init pvt\n");
463 pvt = (struct e7xxx_pvt *)mci->pvt_info; 464 pvt = (struct e7xxx_pvt *)mci->pvt_info;
464 pvt->dev_info = &e7xxx_devs[dev_idx]; 465 pvt->dev_info = &e7xxx_devs[dev_idx];
465 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 466 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -472,14 +473,14 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
472 goto fail0; 473 goto fail0;
473 } 474 }
474 475
475 debugf3("%s(): more mci init\n", __func__); 476 edac_dbg(3, "more mci init\n");
476 mci->ctl_name = pvt->dev_info->ctl_name; 477 mci->ctl_name = pvt->dev_info->ctl_name;
477 mci->dev_name = pci_name(pdev); 478 mci->dev_name = pci_name(pdev);
478 mci->edac_check = e7xxx_check; 479 mci->edac_check = e7xxx_check;
479 mci->ctl_page_to_phys = ctl_page_to_phys; 480 mci->ctl_page_to_phys = ctl_page_to_phys;
480 e7xxx_init_csrows(mci, pdev, dev_idx, drc); 481 e7xxx_init_csrows(mci, pdev, dev_idx, drc);
481 mci->edac_cap |= EDAC_FLAG_NONE; 482 mci->edac_cap |= EDAC_FLAG_NONE;
482 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 483 edac_dbg(3, "tolm, remapbase, remaplimit\n");
483 /* load the top of low memory, remap base, and remap limit vars */ 484 /* load the top of low memory, remap base, and remap limit vars */
484 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); 485 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
485 pvt->tolm = ((u32) pci_data) << 4; 486 pvt->tolm = ((u32) pci_data) << 4;
@@ -498,7 +499,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
498 * type of memory controller. The ID is therefore hardcoded to 0. 499 * type of memory controller. The ID is therefore hardcoded to 0.
499 */ 500 */
500 if (edac_mc_add_mc(mci)) { 501 if (edac_mc_add_mc(mci)) {
501 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 502 edac_dbg(3, "failed edac_mc_add_mc()\n");
502 goto fail1; 503 goto fail1;
503 } 504 }
504 505
@@ -514,7 +515,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
514 } 515 }
515 516
516 /* get this far and it's successful */ 517 /* get this far and it's successful */
517 debugf3("%s(): success\n", __func__); 518 edac_dbg(3, "success\n");
518 return 0; 519 return 0;
519 520
520fail1: 521fail1:
@@ -530,7 +531,7 @@ fail0:
530static int __devinit e7xxx_init_one(struct pci_dev *pdev, 531static int __devinit e7xxx_init_one(struct pci_dev *pdev,
531 const struct pci_device_id *ent) 532 const struct pci_device_id *ent)
532{ 533{
533 debugf0("%s()\n", __func__); 534 edac_dbg(0, "\n");
534 535
535 /* wake up and enable device */ 536 /* wake up and enable device */
536 return pci_enable_device(pdev) ? 537 return pci_enable_device(pdev) ?
@@ -542,7 +543,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
542 struct mem_ctl_info *mci; 543 struct mem_ctl_info *mci;
543 struct e7xxx_pvt *pvt; 544 struct e7xxx_pvt *pvt;
544 545
545 debugf0("%s()\n", __func__); 546 edac_dbg(0, "\n");
546 547
547 if (e7xxx_pci) 548 if (e7xxx_pci)
548 edac_pci_release_generic_ctl(e7xxx_pci); 549 edac_pci_release_generic_ctl(e7xxx_pci);
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 117490d4f835..23bb99fa44f1 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -71,26 +71,21 @@ extern const char *edac_mem_types[];
71#ifdef CONFIG_EDAC_DEBUG 71#ifdef CONFIG_EDAC_DEBUG
72extern int edac_debug_level; 72extern int edac_debug_level;
73 73
74#define edac_debug_printk(level, fmt, arg...) \ 74#define edac_dbg(level, fmt, ...) \
75 do { \ 75do { \
76 if (level <= edac_debug_level) \ 76 if (level <= edac_debug_level) \
77 edac_printk(KERN_DEBUG, EDAC_DEBUG, \ 77 edac_printk(KERN_DEBUG, EDAC_DEBUG, \
78 "%s: " fmt, __func__, ##arg); \ 78 "%s: " fmt, __func__, ##__VA_ARGS__); \
79 } while (0) 79} while (0)
80
81#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
82#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
83#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
84#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
85#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
86 80
87#else /* !CONFIG_EDAC_DEBUG */ 81#else /* !CONFIG_EDAC_DEBUG */
88 82
89#define debugf0( ... ) 83#define edac_dbg(level, fmt, ...) \
90#define debugf1( ... ) 84do { \
91#define debugf2( ... ) 85 if (0) \
92#define debugf3( ... ) 86 edac_printk(KERN_DEBUG, EDAC_DEBUG, \
93#define debugf4( ... ) 87 "%s: " fmt, __func__, ##__VA_ARGS__); \
88} while (0)
94 89
95#endif /* !CONFIG_EDAC_DEBUG */ 90#endif /* !CONFIG_EDAC_DEBUG */
96 91
@@ -460,15 +455,15 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
460 unsigned long page); 455 unsigned long page);
461void edac_mc_handle_error(const enum hw_event_mc_err_type type, 456void edac_mc_handle_error(const enum hw_event_mc_err_type type,
462 struct mem_ctl_info *mci, 457 struct mem_ctl_info *mci,
458 const u16 error_count,
463 const unsigned long page_frame_number, 459 const unsigned long page_frame_number,
464 const unsigned long offset_in_page, 460 const unsigned long offset_in_page,
465 const unsigned long syndrome, 461 const unsigned long syndrome,
466 const int layer0, 462 const int top_layer,
467 const int layer1, 463 const int mid_layer,
468 const int layer2, 464 const int low_layer,
469 const char *msg, 465 const char *msg,
470 const char *other_detail, 466 const char *other_detail);
471 const void *mcelog);
472 467
473/* 468/*
474 * edac_device APIs 469 * edac_device APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index ee3f1f810c1e..211021dfec73 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -40,12 +40,13 @@ static LIST_HEAD(edac_device_list);
40#ifdef CONFIG_EDAC_DEBUG 40#ifdef CONFIG_EDAC_DEBUG
41static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) 41static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42{ 42{
43 debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx); 43 edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
44 debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check); 44 edac_dev, edac_dev->dev_idx);
45 debugf3("\tdev = %p\n", edac_dev->dev); 45 edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
46 debugf3("\tmod_name:ctl_name = %s:%s\n", 46 edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
47 edac_dev->mod_name, edac_dev->ctl_name); 47 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
48 debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info); 48 edac_dev->mod_name, edac_dev->ctl_name);
49 edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
49} 50}
50#endif /* CONFIG_EDAC_DEBUG */ 51#endif /* CONFIG_EDAC_DEBUG */
51 52
@@ -82,8 +83,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
82 void *pvt, *p; 83 void *pvt, *p;
83 int err; 84 int err;
84 85
85 debugf4("%s() instances=%d blocks=%d\n", 86 edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
86 __func__, nr_instances, nr_blocks);
87 87
88 /* Calculate the size of memory we need to allocate AND 88 /* Calculate the size of memory we need to allocate AND
89 * determine the offsets of the various item arrays 89 * determine the offsets of the various item arrays
@@ -156,8 +156,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
156 /* Name of this edac device */ 156 /* Name of this edac device */
157 snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); 157 snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
158 158
159 debugf4("%s() edac_dev=%p next after end=%p\n", 159 edac_dbg(4, "edac_dev=%p next after end=%p\n",
160 __func__, dev_ctl, pvt + sz_private ); 160 dev_ctl, pvt + sz_private);
161 161
162 /* Initialize every Instance */ 162 /* Initialize every Instance */
163 for (instance = 0; instance < nr_instances; instance++) { 163 for (instance = 0; instance < nr_instances; instance++) {
@@ -178,10 +178,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
178 snprintf(blk->name, sizeof(blk->name), 178 snprintf(blk->name, sizeof(blk->name),
179 "%s%d", edac_block_name, block+offset_value); 179 "%s%d", edac_block_name, block+offset_value);
180 180
181 debugf4("%s() instance=%d inst_p=%p block=#%d " 181 edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
182 "block_p=%p name='%s'\n", 182 instance, inst, block, blk, blk->name);
183 __func__, instance, inst, block,
184 blk, blk->name);
185 183
186 /* if there are NO attributes OR no attribute pointer 184 /* if there are NO attributes OR no attribute pointer
187 * then continue on to next block iteration 185 * then continue on to next block iteration
@@ -194,8 +192,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
194 attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; 192 attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
195 blk->block_attributes = attrib_p; 193 blk->block_attributes = attrib_p;
196 194
197 debugf4("%s() THIS BLOCK_ATTRIB=%p\n", 195 edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
198 __func__, blk->block_attributes); 196 blk->block_attributes);
199 197
200 /* Initialize every user specified attribute in this 198 /* Initialize every user specified attribute in this
201 * block with the data the caller passed in 199 * block with the data the caller passed in
@@ -214,11 +212,10 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
214 212
215 attrib->block = blk; /* up link */ 213 attrib->block = blk; /* up link */
216 214
217 debugf4("%s() alloc-attrib=%p attrib_name='%s' " 215 edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
218 "attrib-spec=%p spec-name=%s\n", 216 attrib, attrib->attr.name,
219 __func__, attrib, attrib->attr.name, 217 &attrib_spec[attr],
220 &attrib_spec[attr], 218 attrib_spec[attr].attr.name
221 attrib_spec[attr].attr.name
222 ); 219 );
223 } 220 }
224 } 221 }
@@ -273,7 +270,7 @@ static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
273 struct edac_device_ctl_info *edac_dev; 270 struct edac_device_ctl_info *edac_dev;
274 struct list_head *item; 271 struct list_head *item;
275 272
276 debugf0("%s()\n", __func__); 273 edac_dbg(0, "\n");
277 274
278 list_for_each(item, &edac_device_list) { 275 list_for_each(item, &edac_device_list) {
279 edac_dev = list_entry(item, struct edac_device_ctl_info, link); 276 edac_dev = list_entry(item, struct edac_device_ctl_info, link);
@@ -408,7 +405,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
408void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, 405void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
409 unsigned msec) 406 unsigned msec)
410{ 407{
411 debugf0("%s()\n", __func__); 408 edac_dbg(0, "\n");
412 409
413 /* take the arg 'msec' and set it into the control structure 410 /* take the arg 'msec' and set it into the control structure
414 * to used in the time period calculation 411 * to used in the time period calculation
@@ -496,7 +493,7 @@ EXPORT_SYMBOL_GPL(edac_device_alloc_index);
496 */ 493 */
497int edac_device_add_device(struct edac_device_ctl_info *edac_dev) 494int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
498{ 495{
499 debugf0("%s()\n", __func__); 496 edac_dbg(0, "\n");
500 497
501#ifdef CONFIG_EDAC_DEBUG 498#ifdef CONFIG_EDAC_DEBUG
502 if (edac_debug_level >= 3) 499 if (edac_debug_level >= 3)
@@ -570,7 +567,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
570{ 567{
571 struct edac_device_ctl_info *edac_dev; 568 struct edac_device_ctl_info *edac_dev;
572 569
573 debugf0("%s()\n", __func__); 570 edac_dbg(0, "\n");
574 571
575 mutex_lock(&device_ctls_mutex); 572 mutex_lock(&device_ctls_mutex);
576 573
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index b4ea185ccebf..fb68a06ad683 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -202,7 +202,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj)
202{ 202{
203 struct edac_device_ctl_info *edac_dev = to_edacdev(kobj); 203 struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
204 204
205 debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx); 205 edac_dbg(4, "control index=%d\n", edac_dev->dev_idx);
206 206
207 /* decrement the EDAC CORE module ref count */ 207 /* decrement the EDAC CORE module ref count */
208 module_put(edac_dev->owner); 208 module_put(edac_dev->owner);
@@ -233,12 +233,12 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
233 struct bus_type *edac_subsys; 233 struct bus_type *edac_subsys;
234 int err; 234 int err;
235 235
236 debugf1("%s()\n", __func__); 236 edac_dbg(1, "\n");
237 237
238 /* get the /sys/devices/system/edac reference */ 238 /* get the /sys/devices/system/edac reference */
239 edac_subsys = edac_get_sysfs_subsys(); 239 edac_subsys = edac_get_sysfs_subsys();
240 if (edac_subsys == NULL) { 240 if (edac_subsys == NULL) {
241 debugf1("%s() no edac_subsys error\n", __func__); 241 edac_dbg(1, "no edac_subsys error\n");
242 err = -ENODEV; 242 err = -ENODEV;
243 goto err_out; 243 goto err_out;
244 } 244 }
@@ -264,8 +264,8 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
264 &edac_subsys->dev_root->kobj, 264 &edac_subsys->dev_root->kobj,
265 "%s", edac_dev->name); 265 "%s", edac_dev->name);
266 if (err) { 266 if (err) {
267 debugf1("%s()Failed to register '.../edac/%s'\n", 267 edac_dbg(1, "Failed to register '.../edac/%s'\n",
268 __func__, edac_dev->name); 268 edac_dev->name);
269 goto err_kobj_reg; 269 goto err_kobj_reg;
270 } 270 }
271 kobject_uevent(&edac_dev->kobj, KOBJ_ADD); 271 kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
@@ -274,8 +274,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
274 * edac_device_unregister_sysfs_main_kobj() must be used 274 * edac_device_unregister_sysfs_main_kobj() must be used
275 */ 275 */
276 276
277 debugf4("%s() Registered '.../edac/%s' kobject\n", 277 edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name);
278 __func__, edac_dev->name);
279 278
280 return 0; 279 return 0;
281 280
@@ -296,9 +295,8 @@ err_out:
296 */ 295 */
297void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) 296void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
298{ 297{
299 debugf0("%s()\n", __func__); 298 edac_dbg(0, "\n");
300 debugf4("%s() name of kobject is: %s\n", 299 edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj));
301 __func__, kobject_name(&dev->kobj));
302 300
303 /* 301 /*
304 * Unregister the edac device's kobject and 302 * Unregister the edac device's kobject and
@@ -336,7 +334,7 @@ static void edac_device_ctrl_instance_release(struct kobject *kobj)
336{ 334{
337 struct edac_device_instance *instance; 335 struct edac_device_instance *instance;
338 336
339 debugf1("%s()\n", __func__); 337 edac_dbg(1, "\n");
340 338
341 /* map from this kobj to the main control struct 339 /* map from this kobj to the main control struct
342 * and then dec the main kobj count 340 * and then dec the main kobj count
@@ -442,7 +440,7 @@ static void edac_device_ctrl_block_release(struct kobject *kobj)
442{ 440{
443 struct edac_device_block *block; 441 struct edac_device_block *block;
444 442
445 debugf1("%s()\n", __func__); 443 edac_dbg(1, "\n");
446 444
447 /* get the container of the kobj */ 445 /* get the container of the kobj */
448 block = to_block(kobj); 446 block = to_block(kobj);
@@ -524,10 +522,10 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
524 struct edac_dev_sysfs_block_attribute *sysfs_attrib; 522 struct edac_dev_sysfs_block_attribute *sysfs_attrib;
525 struct kobject *main_kobj; 523 struct kobject *main_kobj;
526 524
527 debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n", 525 edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n",
528 __func__, instance->name, instance, block->name, block); 526 instance->name, instance, block->name, block);
529 debugf4("%s() block kobj=%p block kobj->parent=%p\n", 527 edac_dbg(4, "block kobj=%p block kobj->parent=%p\n",
530 __func__, &block->kobj, &block->kobj.parent); 528 &block->kobj, &block->kobj.parent);
531 529
532 /* init this block's kobject */ 530 /* init this block's kobject */
533 memset(&block->kobj, 0, sizeof(struct kobject)); 531 memset(&block->kobj, 0, sizeof(struct kobject));
@@ -546,8 +544,7 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
546 &instance->kobj, 544 &instance->kobj,
547 "%s", block->name); 545 "%s", block->name);
548 if (err) { 546 if (err) {
549 debugf1("%s() Failed to register instance '%s'\n", 547 edac_dbg(1, "Failed to register instance '%s'\n", block->name);
550 __func__, block->name);
551 kobject_put(main_kobj); 548 kobject_put(main_kobj);
552 err = -ENODEV; 549 err = -ENODEV;
553 goto err_out; 550 goto err_out;
@@ -560,11 +557,9 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
560 if (sysfs_attrib && block->nr_attribs) { 557 if (sysfs_attrib && block->nr_attribs) {
561 for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { 558 for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
562 559
563 debugf4("%s() creating block attrib='%s' " 560 edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n",
564 "attrib->%p to kobj=%p\n", 561 sysfs_attrib->attr.name,
565 __func__, 562 sysfs_attrib, &block->kobj);
566 sysfs_attrib->attr.name,
567 sysfs_attrib, &block->kobj);
568 563
569 /* Create each block_attribute file */ 564 /* Create each block_attribute file */
570 err = sysfs_create_file(&block->kobj, 565 err = sysfs_create_file(&block->kobj,
@@ -647,14 +642,14 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
647 err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl, 642 err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
648 &edac_dev->kobj, "%s", instance->name); 643 &edac_dev->kobj, "%s", instance->name);
649 if (err != 0) { 644 if (err != 0) {
650 debugf2("%s() Failed to register instance '%s'\n", 645 edac_dbg(2, "Failed to register instance '%s'\n",
651 __func__, instance->name); 646 instance->name);
652 kobject_put(main_kobj); 647 kobject_put(main_kobj);
653 goto err_out; 648 goto err_out;
654 } 649 }
655 650
656 debugf4("%s() now register '%d' blocks for instance %d\n", 651 edac_dbg(4, "now register '%d' blocks for instance %d\n",
657 __func__, instance->nr_blocks, idx); 652 instance->nr_blocks, idx);
658 653
659 /* register all blocks of this instance */ 654 /* register all blocks of this instance */
660 for (i = 0; i < instance->nr_blocks; i++) { 655 for (i = 0; i < instance->nr_blocks; i++) {
@@ -670,8 +665,8 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
670 } 665 }
671 kobject_uevent(&instance->kobj, KOBJ_ADD); 666 kobject_uevent(&instance->kobj, KOBJ_ADD);
672 667
673 debugf4("%s() Registered instance %d '%s' kobject\n", 668 edac_dbg(4, "Registered instance %d '%s' kobject\n",
674 __func__, idx, instance->name); 669 idx, instance->name);
675 670
676 return 0; 671 return 0;
677 672
@@ -715,7 +710,7 @@ static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
715 int i, j; 710 int i, j;
716 int err; 711 int err;
717 712
718 debugf0("%s()\n", __func__); 713 edac_dbg(0, "\n");
719 714
720 /* iterate over creation of the instances */ 715 /* iterate over creation of the instances */
721 for (i = 0; i < edac_dev->nr_instances; i++) { 716 for (i = 0; i < edac_dev->nr_instances; i++) {
@@ -817,12 +812,12 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
817 int err; 812 int err;
818 struct kobject *edac_kobj = &edac_dev->kobj; 813 struct kobject *edac_kobj = &edac_dev->kobj;
819 814
820 debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx); 815 edac_dbg(0, "idx=%d\n", edac_dev->dev_idx);
821 816
822 /* go create any main attributes callers wants */ 817 /* go create any main attributes callers wants */
823 err = edac_device_add_main_sysfs_attributes(edac_dev); 818 err = edac_device_add_main_sysfs_attributes(edac_dev);
824 if (err) { 819 if (err) {
825 debugf0("%s() failed to add sysfs attribs\n", __func__); 820 edac_dbg(0, "failed to add sysfs attribs\n");
826 goto err_out; 821 goto err_out;
827 } 822 }
828 823
@@ -832,8 +827,7 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
832 err = sysfs_create_link(edac_kobj, 827 err = sysfs_create_link(edac_kobj,
833 &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK); 828 &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
834 if (err) { 829 if (err) {
835 debugf0("%s() sysfs_create_link() returned err= %d\n", 830 edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
836 __func__, err);
837 goto err_remove_main_attribs; 831 goto err_remove_main_attribs;
838 } 832 }
839 833
@@ -843,14 +837,13 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
843 */ 837 */
844 err = edac_device_create_instances(edac_dev); 838 err = edac_device_create_instances(edac_dev);
845 if (err) { 839 if (err) {
846 debugf0("%s() edac_device_create_instances() " 840 edac_dbg(0, "edac_device_create_instances() returned err= %d\n",
847 "returned err= %d\n", __func__, err); 841 err);
848 goto err_remove_link; 842 goto err_remove_link;
849 } 843 }
850 844
851 845
852 debugf4("%s() create-instances done, idx=%d\n", 846 edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx);
853 __func__, edac_dev->dev_idx);
854 847
855 return 0; 848 return 0;
856 849
@@ -873,7 +866,7 @@ err_out:
873 */ 866 */
874void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev) 867void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
875{ 868{
876 debugf0("%s()\n", __func__); 869 edac_dbg(0, "\n");
877 870
878 /* remove any main attributes for this device */ 871 /* remove any main attributes for this device */
879 edac_device_remove_main_sysfs_attributes(edac_dev); 872 edac_device_remove_main_sysfs_attributes(edac_dev);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index de5ba86e8b89..616d90bcb3a4 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -27,70 +27,95 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/edac.h> 29#include <linux/edac.h>
30#include <linux/bitops.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/page.h> 32#include <asm/page.h>
32#include <asm/edac.h> 33#include <asm/edac.h>
33#include "edac_core.h" 34#include "edac_core.h"
34#include "edac_module.h" 35#include "edac_module.h"
35 36
37#define CREATE_TRACE_POINTS
38#define TRACE_INCLUDE_PATH ../../include/ras
39#include <ras/ras_event.h>
40
36/* lock to memory controller's control array */ 41/* lock to memory controller's control array */
37static DEFINE_MUTEX(mem_ctls_mutex); 42static DEFINE_MUTEX(mem_ctls_mutex);
38static LIST_HEAD(mc_devices); 43static LIST_HEAD(mc_devices);
39 44
45unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
46 unsigned len)
47{
48 struct mem_ctl_info *mci = dimm->mci;
49 int i, n, count = 0;
50 char *p = buf;
51
52 for (i = 0; i < mci->n_layers; i++) {
53 n = snprintf(p, len, "%s %d ",
54 edac_layer_name[mci->layers[i].type],
55 dimm->location[i]);
56 p += n;
57 len -= n;
58 count += n;
59 if (!len)
60 break;
61 }
62
63 return count;
64}
65
40#ifdef CONFIG_EDAC_DEBUG 66#ifdef CONFIG_EDAC_DEBUG
41 67
42static void edac_mc_dump_channel(struct rank_info *chan) 68static void edac_mc_dump_channel(struct rank_info *chan)
43{ 69{
44 debugf4("\tchannel = %p\n", chan); 70 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
45 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); 71 edac_dbg(4, " channel = %p\n", chan);
46 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 72 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
47 debugf4("\tchannel->dimm = %p\n", chan->dimm); 73 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
48} 74}
49 75
50static void edac_mc_dump_dimm(struct dimm_info *dimm) 76static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
51{ 77{
52 int i; 78 char location[80];
53 79
54 debugf4("\tdimm = %p\n", dimm); 80 edac_dimm_info_location(dimm, location, sizeof(location));
55 debugf4("\tdimm->label = '%s'\n", dimm->label); 81
56 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages); 82 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
57 debugf4("\tdimm location "); 83 dimm->mci->mem_is_per_rank ? "rank" : "dimm",
58 for (i = 0; i < dimm->mci->n_layers; i++) { 84 number, location, dimm->csrow, dimm->cschannel);
59 printk(KERN_CONT "%d", dimm->location[i]); 85 edac_dbg(4, " dimm = %p\n", dimm);
60 if (i < dimm->mci->n_layers - 1) 86 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
61 printk(KERN_CONT "."); 87 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
62 } 88 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
63 printk(KERN_CONT "\n"); 89 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
64 debugf4("\tdimm->grain = %d\n", dimm->grain);
65 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
66} 90}
67 91
68static void edac_mc_dump_csrow(struct csrow_info *csrow) 92static void edac_mc_dump_csrow(struct csrow_info *csrow)
69{ 93{
70 debugf4("\tcsrow = %p\n", csrow); 94 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
71 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx); 95 edac_dbg(4, " csrow = %p\n", csrow);
72 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); 96 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
73 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); 97 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
74 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); 98 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
75 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); 99 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
76 debugf4("\tcsrow->channels = %p\n", csrow->channels); 100 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
77 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 101 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
78} 102}
79 103
80static void edac_mc_dump_mci(struct mem_ctl_info *mci) 104static void edac_mc_dump_mci(struct mem_ctl_info *mci)
81{ 105{
82 debugf3("\tmci = %p\n", mci); 106 edac_dbg(3, "\tmci = %p\n", mci);
83 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap); 107 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
84 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); 108 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
85 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap); 109 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
86 debugf4("\tmci->edac_check = %p\n", mci->edac_check); 110 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
87 debugf3("\tmci->nr_csrows = %d, csrows = %p\n", 111 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
88 mci->nr_csrows, mci->csrows); 112 mci->nr_csrows, mci->csrows);
89 debugf3("\tmci->nr_dimms = %d, dimms = %p\n", 113 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
90 mci->tot_dimms, mci->dimms); 114 mci->tot_dimms, mci->dimms);
91 debugf3("\tdev = %p\n", mci->dev); 115 edac_dbg(3, "\tdev = %p\n", mci->pdev);
92 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); 116 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
93 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 117 mci->mod_name, mci->ctl_name);
118 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
94} 119}
95 120
96#endif /* CONFIG_EDAC_DEBUG */ 121#endif /* CONFIG_EDAC_DEBUG */
@@ -205,15 +230,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
205{ 230{
206 struct mem_ctl_info *mci; 231 struct mem_ctl_info *mci;
207 struct edac_mc_layer *layer; 232 struct edac_mc_layer *layer;
208 struct csrow_info *csi, *csr; 233 struct csrow_info *csr;
209 struct rank_info *chi, *chp, *chan; 234 struct rank_info *chan;
210 struct dimm_info *dimm; 235 struct dimm_info *dimm;
211 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; 236 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
212 unsigned pos[EDAC_MAX_LAYERS]; 237 unsigned pos[EDAC_MAX_LAYERS];
213 unsigned size, tot_dimms = 1, count = 1; 238 unsigned size, tot_dimms = 1, count = 1;
214 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; 239 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
215 void *pvt, *p, *ptr = NULL; 240 void *pvt, *p, *ptr = NULL;
216 int i, j, err, row, chn, n, len; 241 int i, j, row, chn, n, len, off;
217 bool per_rank = false; 242 bool per_rank = false;
218 243
219 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); 244 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
@@ -239,26 +264,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
239 */ 264 */
240 mci = edac_align_ptr(&ptr, sizeof(*mci), 1); 265 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
241 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); 266 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
242 csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
243 chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
244 dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
245 for (i = 0; i < n_layers; i++) { 267 for (i = 0; i < n_layers; i++) {
246 count *= layers[i].size; 268 count *= layers[i].size;
247 debugf4("%s: errcount layer %d size %d\n", __func__, i, count); 269 edac_dbg(4, "errcount layer %d size %d\n", i, count);
248 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 270 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
249 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 271 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
250 tot_errcount += 2 * count; 272 tot_errcount += 2 * count;
251 } 273 }
252 274
253 debugf4("%s: allocating %d error counters\n", __func__, tot_errcount); 275 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
254 pvt = edac_align_ptr(&ptr, sz_pvt, 1); 276 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
255 size = ((unsigned long)pvt) + sz_pvt; 277 size = ((unsigned long)pvt) + sz_pvt;
256 278
257 debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", 279 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
258 __func__, size, 280 size,
259 tot_dimms, 281 tot_dimms,
260 per_rank ? "ranks" : "dimms", 282 per_rank ? "ranks" : "dimms",
261 tot_csrows * tot_channels); 283 tot_csrows * tot_channels);
284
262 mci = kzalloc(size, GFP_KERNEL); 285 mci = kzalloc(size, GFP_KERNEL);
263 if (mci == NULL) 286 if (mci == NULL)
264 return NULL; 287 return NULL;
@@ -267,9 +290,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
267 * rather than an imaginary chunk of memory located at address 0. 290 * rather than an imaginary chunk of memory located at address 0.
268 */ 291 */
269 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); 292 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
270 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
271 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
272 dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
273 for (i = 0; i < n_layers; i++) { 293 for (i = 0; i < n_layers; i++) {
274 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); 294 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
275 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); 295 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
@@ -278,8 +298,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
278 298
279 /* setup index and various internal pointers */ 299 /* setup index and various internal pointers */
280 mci->mc_idx = mc_num; 300 mci->mc_idx = mc_num;
281 mci->csrows = csi;
282 mci->dimms = dimm;
283 mci->tot_dimms = tot_dimms; 301 mci->tot_dimms = tot_dimms;
284 mci->pvt_info = pvt; 302 mci->pvt_info = pvt;
285 mci->n_layers = n_layers; 303 mci->n_layers = n_layers;
@@ -290,40 +308,57 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
290 mci->mem_is_per_rank = per_rank; 308 mci->mem_is_per_rank = per_rank;
291 309
292 /* 310 /*
293 * Fill the csrow struct 311 * Alocate and fill the csrow/channels structs
294 */ 312 */
313 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
314 if (!mci->csrows)
315 goto error;
295 for (row = 0; row < tot_csrows; row++) { 316 for (row = 0; row < tot_csrows; row++) {
296 csr = &csi[row]; 317 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
318 if (!csr)
319 goto error;
320 mci->csrows[row] = csr;
297 csr->csrow_idx = row; 321 csr->csrow_idx = row;
298 csr->mci = mci; 322 csr->mci = mci;
299 csr->nr_channels = tot_channels; 323 csr->nr_channels = tot_channels;
300 chp = &chi[row * tot_channels]; 324 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
301 csr->channels = chp; 325 GFP_KERNEL);
326 if (!csr->channels)
327 goto error;
302 328
303 for (chn = 0; chn < tot_channels; chn++) { 329 for (chn = 0; chn < tot_channels; chn++) {
304 chan = &chp[chn]; 330 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
331 if (!chan)
332 goto error;
333 csr->channels[chn] = chan;
305 chan->chan_idx = chn; 334 chan->chan_idx = chn;
306 chan->csrow = csr; 335 chan->csrow = csr;
307 } 336 }
308 } 337 }
309 338
310 /* 339 /*
311 * Fill the dimm struct 340 * Allocate and fill the dimm structs
312 */ 341 */
342 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
343 if (!mci->dimms)
344 goto error;
345
313 memset(&pos, 0, sizeof(pos)); 346 memset(&pos, 0, sizeof(pos));
314 row = 0; 347 row = 0;
315 chn = 0; 348 chn = 0;
316 debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
317 per_rank ? "ranks" : "dimms");
318 for (i = 0; i < tot_dimms; i++) { 349 for (i = 0; i < tot_dimms; i++) {
319 chan = &csi[row].channels[chn]; 350 chan = mci->csrows[row]->channels[chn];
320 dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers, 351 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
321 pos[0], pos[1], pos[2]); 352 if (off < 0 || off >= tot_dimms) {
322 dimm->mci = mci; 353 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
354 goto error;
355 }
323 356
324 debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__, 357 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
325 i, per_rank ? "rank" : "dimm", (dimm - mci->dimms), 358 if (!dimm)
326 pos[0], pos[1], pos[2], row, chn); 359 goto error;
360 mci->dimms[off] = dimm;
361 dimm->mci = mci;
327 362
328 /* 363 /*
329 * Copy DIMM location and initialize it. 364 * Copy DIMM location and initialize it.
@@ -367,16 +402,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
367 } 402 }
368 403
369 mci->op_state = OP_ALLOC; 404 mci->op_state = OP_ALLOC;
370 INIT_LIST_HEAD(&mci->grp_kobj_list);
371
372 /*
373 * Initialize the 'root' kobj for the edac_mc controller
374 */
375 err = edac_mc_register_sysfs_main_kobj(mci);
376 if (err) {
377 kfree(mci);
378 return NULL;
379 }
380 405
381 /* at this point, the root kobj is valid, and in order to 406 /* at this point, the root kobj is valid, and in order to
382 * 'free' the object, then the function: 407 * 'free' the object, then the function:
@@ -384,7 +409,30 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
384 * which will perform kobj unregistration and the actual free 409 * which will perform kobj unregistration and the actual free
385 * will occur during the kobject callback operation 410 * will occur during the kobject callback operation
386 */ 411 */
412
387 return mci; 413 return mci;
414
415error:
416 if (mci->dimms) {
417 for (i = 0; i < tot_dimms; i++)
418 kfree(mci->dimms[i]);
419 kfree(mci->dimms);
420 }
421 if (mci->csrows) {
422 for (chn = 0; chn < tot_channels; chn++) {
423 csr = mci->csrows[chn];
424 if (csr) {
425 for (chn = 0; chn < tot_channels; chn++)
426 kfree(csr->channels[chn]);
427 kfree(csr);
428 }
429 kfree(mci->csrows[i]);
430 }
431 kfree(mci->csrows);
432 }
433 kfree(mci);
434
435 return NULL;
388} 436}
389EXPORT_SYMBOL_GPL(edac_mc_alloc); 437EXPORT_SYMBOL_GPL(edac_mc_alloc);
390 438
@@ -395,12 +443,10 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc);
395 */ 443 */
396void edac_mc_free(struct mem_ctl_info *mci) 444void edac_mc_free(struct mem_ctl_info *mci)
397{ 445{
398 debugf1("%s()\n", __func__); 446 edac_dbg(1, "\n");
399 447
400 edac_mc_unregister_sysfs_main_kobj(mci); 448 /* the mci instance is freed here, when the sysfs object is dropped */
401 449 edac_unregister_sysfs(mci);
402 /* free the mci instance memory here */
403 kfree(mci);
404} 450}
405EXPORT_SYMBOL_GPL(edac_mc_free); 451EXPORT_SYMBOL_GPL(edac_mc_free);
406 452
@@ -417,12 +463,12 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev)
417 struct mem_ctl_info *mci; 463 struct mem_ctl_info *mci;
418 struct list_head *item; 464 struct list_head *item;
419 465
420 debugf3("%s()\n", __func__); 466 edac_dbg(3, "\n");
421 467
422 list_for_each(item, &mc_devices) { 468 list_for_each(item, &mc_devices) {
423 mci = list_entry(item, struct mem_ctl_info, link); 469 mci = list_entry(item, struct mem_ctl_info, link);
424 470
425 if (mci->dev == dev) 471 if (mci->pdev == dev)
426 return mci; 472 return mci;
427 } 473 }
428 474
@@ -485,7 +531,7 @@ static void edac_mc_workq_function(struct work_struct *work_req)
485 */ 531 */
486static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) 532static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
487{ 533{
488 debugf0("%s()\n", __func__); 534 edac_dbg(0, "\n");
489 535
490 /* if this instance is not in the POLL state, then simply return */ 536 /* if this instance is not in the POLL state, then simply return */
491 if (mci->op_state != OP_RUNNING_POLL) 537 if (mci->op_state != OP_RUNNING_POLL)
@@ -512,8 +558,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
512 558
513 status = cancel_delayed_work(&mci->work); 559 status = cancel_delayed_work(&mci->work);
514 if (status == 0) { 560 if (status == 0) {
515 debugf0("%s() not canceled, flush the queue\n", 561 edac_dbg(0, "not canceled, flush the queue\n");
516 __func__);
517 562
518 /* workq instance might be running, wait for it */ 563 /* workq instance might be running, wait for it */
519 flush_workqueue(edac_workqueue); 564 flush_workqueue(edac_workqueue);
@@ -574,7 +619,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
574 619
575 insert_before = &mc_devices; 620 insert_before = &mc_devices;
576 621
577 p = find_mci_by_dev(mci->dev); 622 p = find_mci_by_dev(mci->pdev);
578 if (unlikely(p != NULL)) 623 if (unlikely(p != NULL))
579 goto fail0; 624 goto fail0;
580 625
@@ -596,7 +641,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
596 641
597fail0: 642fail0:
598 edac_printk(KERN_WARNING, EDAC_MC, 643 edac_printk(KERN_WARNING, EDAC_MC,
599 "%s (%s) %s %s already assigned %d\n", dev_name(p->dev), 644 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
600 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); 645 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
601 return 1; 646 return 1;
602 647
@@ -660,7 +705,7 @@ EXPORT_SYMBOL(edac_mc_find);
660/* FIXME - should a warning be printed if no error detection? correction? */ 705/* FIXME - should a warning be printed if no error detection? correction? */
661int edac_mc_add_mc(struct mem_ctl_info *mci) 706int edac_mc_add_mc(struct mem_ctl_info *mci)
662{ 707{
663 debugf0("%s()\n", __func__); 708 edac_dbg(0, "\n");
664 709
665#ifdef CONFIG_EDAC_DEBUG 710#ifdef CONFIG_EDAC_DEBUG
666 if (edac_debug_level >= 3) 711 if (edac_debug_level >= 3)
@@ -670,15 +715,22 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
670 int i; 715 int i;
671 716
672 for (i = 0; i < mci->nr_csrows; i++) { 717 for (i = 0; i < mci->nr_csrows; i++) {
718 struct csrow_info *csrow = mci->csrows[i];
719 u32 nr_pages = 0;
673 int j; 720 int j;
674 721
675 edac_mc_dump_csrow(&mci->csrows[i]); 722 for (j = 0; j < csrow->nr_channels; j++)
676 for (j = 0; j < mci->csrows[i].nr_channels; j++) 723 nr_pages += csrow->channels[j]->dimm->nr_pages;
677 edac_mc_dump_channel(&mci->csrows[i]. 724 if (!nr_pages)
678 channels[j]); 725 continue;
726 edac_mc_dump_csrow(csrow);
727 for (j = 0; j < csrow->nr_channels; j++)
728 if (csrow->channels[j]->dimm->nr_pages)
729 edac_mc_dump_channel(csrow->channels[j]);
679 } 730 }
680 for (i = 0; i < mci->tot_dimms; i++) 731 for (i = 0; i < mci->tot_dimms; i++)
681 edac_mc_dump_dimm(&mci->dimms[i]); 732 if (mci->dimms[i]->nr_pages)
733 edac_mc_dump_dimm(mci->dimms[i], i);
682 } 734 }
683#endif 735#endif
684 mutex_lock(&mem_ctls_mutex); 736 mutex_lock(&mem_ctls_mutex);
@@ -732,7 +784,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
732{ 784{
733 struct mem_ctl_info *mci; 785 struct mem_ctl_info *mci;
734 786
735 debugf0("%s()\n", __func__); 787 edac_dbg(0, "\n");
736 788
737 mutex_lock(&mem_ctls_mutex); 789 mutex_lock(&mem_ctls_mutex);
738 790
@@ -770,7 +822,7 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
770 void *virt_addr; 822 void *virt_addr;
771 unsigned long flags = 0; 823 unsigned long flags = 0;
772 824
773 debugf3("%s()\n", __func__); 825 edac_dbg(3, "\n");
774 826
775 /* ECC error page was not in our memory. Ignore it. */ 827 /* ECC error page was not in our memory. Ignore it. */
776 if (!pfn_valid(page)) 828 if (!pfn_valid(page))
@@ -797,26 +849,26 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
797/* FIXME - should return -1 */ 849/* FIXME - should return -1 */
798int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) 850int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
799{ 851{
800 struct csrow_info *csrows = mci->csrows; 852 struct csrow_info **csrows = mci->csrows;
801 int row, i, j, n; 853 int row, i, j, n;
802 854
803 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); 855 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
804 row = -1; 856 row = -1;
805 857
806 for (i = 0; i < mci->nr_csrows; i++) { 858 for (i = 0; i < mci->nr_csrows; i++) {
807 struct csrow_info *csrow = &csrows[i]; 859 struct csrow_info *csrow = csrows[i];
808 n = 0; 860 n = 0;
809 for (j = 0; j < csrow->nr_channels; j++) { 861 for (j = 0; j < csrow->nr_channels; j++) {
810 struct dimm_info *dimm = csrow->channels[j].dimm; 862 struct dimm_info *dimm = csrow->channels[j]->dimm;
811 n += dimm->nr_pages; 863 n += dimm->nr_pages;
812 } 864 }
813 if (n == 0) 865 if (n == 0)
814 continue; 866 continue;
815 867
816 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " 868 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
817 "mask(0x%lx)\n", mci->mc_idx, __func__, 869 mci->mc_idx,
818 csrow->first_page, page, csrow->last_page, 870 csrow->first_page, page, csrow->last_page,
819 csrow->page_mask); 871 csrow->page_mask);
820 872
821 if ((page >= csrow->first_page) && 873 if ((page >= csrow->first_page) &&
822 (page <= csrow->last_page) && 874 (page <= csrow->last_page) &&
@@ -845,15 +897,16 @@ const char *edac_layer_name[] = {
845EXPORT_SYMBOL_GPL(edac_layer_name); 897EXPORT_SYMBOL_GPL(edac_layer_name);
846 898
847static void edac_inc_ce_error(struct mem_ctl_info *mci, 899static void edac_inc_ce_error(struct mem_ctl_info *mci,
848 bool enable_per_layer_report, 900 bool enable_per_layer_report,
849 const int pos[EDAC_MAX_LAYERS]) 901 const int pos[EDAC_MAX_LAYERS],
902 const u16 count)
850{ 903{
851 int i, index = 0; 904 int i, index = 0;
852 905
853 mci->ce_mc++; 906 mci->ce_mc += count;
854 907
855 if (!enable_per_layer_report) { 908 if (!enable_per_layer_report) {
856 mci->ce_noinfo_count++; 909 mci->ce_noinfo_count += count;
857 return; 910 return;
858 } 911 }
859 912
@@ -861,7 +914,7 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci,
861 if (pos[i] < 0) 914 if (pos[i] < 0)
862 break; 915 break;
863 index += pos[i]; 916 index += pos[i];
864 mci->ce_per_layer[i][index]++; 917 mci->ce_per_layer[i][index] += count;
865 918
866 if (i < mci->n_layers - 1) 919 if (i < mci->n_layers - 1)
867 index *= mci->layers[i + 1].size; 920 index *= mci->layers[i + 1].size;
@@ -870,14 +923,15 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci,
870 923
871static void edac_inc_ue_error(struct mem_ctl_info *mci, 924static void edac_inc_ue_error(struct mem_ctl_info *mci,
872 bool enable_per_layer_report, 925 bool enable_per_layer_report,
873 const int pos[EDAC_MAX_LAYERS]) 926 const int pos[EDAC_MAX_LAYERS],
927 const u16 count)
874{ 928{
875 int i, index = 0; 929 int i, index = 0;
876 930
877 mci->ue_mc++; 931 mci->ue_mc += count;
878 932
879 if (!enable_per_layer_report) { 933 if (!enable_per_layer_report) {
880 mci->ce_noinfo_count++; 934 mci->ce_noinfo_count += count;
881 return; 935 return;
882 } 936 }
883 937
@@ -885,7 +939,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
885 if (pos[i] < 0) 939 if (pos[i] < 0)
886 break; 940 break;
887 index += pos[i]; 941 index += pos[i];
888 mci->ue_per_layer[i][index]++; 942 mci->ue_per_layer[i][index] += count;
889 943
890 if (i < mci->n_layers - 1) 944 if (i < mci->n_layers - 1)
891 index *= mci->layers[i + 1].size; 945 index *= mci->layers[i + 1].size;
@@ -893,6 +947,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
893} 947}
894 948
895static void edac_ce_error(struct mem_ctl_info *mci, 949static void edac_ce_error(struct mem_ctl_info *mci,
950 const u16 error_count,
896 const int pos[EDAC_MAX_LAYERS], 951 const int pos[EDAC_MAX_LAYERS],
897 const char *msg, 952 const char *msg,
898 const char *location, 953 const char *location,
@@ -902,23 +957,25 @@ static void edac_ce_error(struct mem_ctl_info *mci,
902 const bool enable_per_layer_report, 957 const bool enable_per_layer_report,
903 const unsigned long page_frame_number, 958 const unsigned long page_frame_number,
904 const unsigned long offset_in_page, 959 const unsigned long offset_in_page,
905 u32 grain) 960 long grain)
906{ 961{
907 unsigned long remapped_page; 962 unsigned long remapped_page;
908 963
909 if (edac_mc_get_log_ce()) { 964 if (edac_mc_get_log_ce()) {
910 if (other_detail && *other_detail) 965 if (other_detail && *other_detail)
911 edac_mc_printk(mci, KERN_WARNING, 966 edac_mc_printk(mci, KERN_WARNING,
912 "CE %s on %s (%s%s - %s)\n", 967 "%d CE %s on %s (%s %s - %s)\n",
968 error_count,
913 msg, label, location, 969 msg, label, location,
914 detail, other_detail); 970 detail, other_detail);
915 else 971 else
916 edac_mc_printk(mci, KERN_WARNING, 972 edac_mc_printk(mci, KERN_WARNING,
917 "CE %s on %s (%s%s)\n", 973 "%d CE %s on %s (%s %s)\n",
974 error_count,
918 msg, label, location, 975 msg, label, location,
919 detail); 976 detail);
920 } 977 }
921 edac_inc_ce_error(mci, enable_per_layer_report, pos); 978 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
922 979
923 if (mci->scrub_mode & SCRUB_SW_SRC) { 980 if (mci->scrub_mode & SCRUB_SW_SRC) {
924 /* 981 /*
@@ -942,6 +999,7 @@ static void edac_ce_error(struct mem_ctl_info *mci,
942} 999}
943 1000
944static void edac_ue_error(struct mem_ctl_info *mci, 1001static void edac_ue_error(struct mem_ctl_info *mci,
1002 const u16 error_count,
945 const int pos[EDAC_MAX_LAYERS], 1003 const int pos[EDAC_MAX_LAYERS],
946 const char *msg, 1004 const char *msg,
947 const char *location, 1005 const char *location,
@@ -953,12 +1011,14 @@ static void edac_ue_error(struct mem_ctl_info *mci,
953 if (edac_mc_get_log_ue()) { 1011 if (edac_mc_get_log_ue()) {
954 if (other_detail && *other_detail) 1012 if (other_detail && *other_detail)
955 edac_mc_printk(mci, KERN_WARNING, 1013 edac_mc_printk(mci, KERN_WARNING,
956 "UE %s on %s (%s%s - %s)\n", 1014 "%d UE %s on %s (%s %s - %s)\n",
1015 error_count,
957 msg, label, location, detail, 1016 msg, label, location, detail,
958 other_detail); 1017 other_detail);
959 else 1018 else
960 edac_mc_printk(mci, KERN_WARNING, 1019 edac_mc_printk(mci, KERN_WARNING,
961 "UE %s on %s (%s%s)\n", 1020 "%d UE %s on %s (%s %s)\n",
1021 error_count,
962 msg, label, location, detail); 1022 msg, label, location, detail);
963 } 1023 }
964 1024
@@ -971,33 +1031,53 @@ static void edac_ue_error(struct mem_ctl_info *mci,
971 msg, label, location, detail); 1031 msg, label, location, detail);
972 } 1032 }
973 1033
974 edac_inc_ue_error(mci, enable_per_layer_report, pos); 1034 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
975} 1035}
976 1036
977#define OTHER_LABEL " or " 1037#define OTHER_LABEL " or "
1038
1039/**
1040 * edac_mc_handle_error - reports a memory event to userspace
1041 *
1042 * @type: severity of the error (CE/UE/Fatal)
1043 * @mci: a struct mem_ctl_info pointer
1044 * @error_count: Number of errors of the same type
1045 * @page_frame_number: mem page where the error occurred
1046 * @offset_in_page: offset of the error inside the page
1047 * @syndrome: ECC syndrome
1048 * @top_layer: Memory layer[0] position
1049 * @mid_layer: Memory layer[1] position
1050 * @low_layer: Memory layer[2] position
1051 * @msg: Message meaningful to the end users that
1052 * explains the event
1053 * @other_detail: Technical details about the event that
1054 * may help hardware manufacturers and
1055 * EDAC developers to analyse the event
1056 */
978void edac_mc_handle_error(const enum hw_event_mc_err_type type, 1057void edac_mc_handle_error(const enum hw_event_mc_err_type type,
979 struct mem_ctl_info *mci, 1058 struct mem_ctl_info *mci,
1059 const u16 error_count,
980 const unsigned long page_frame_number, 1060 const unsigned long page_frame_number,
981 const unsigned long offset_in_page, 1061 const unsigned long offset_in_page,
982 const unsigned long syndrome, 1062 const unsigned long syndrome,
983 const int layer0, 1063 const int top_layer,
984 const int layer1, 1064 const int mid_layer,
985 const int layer2, 1065 const int low_layer,
986 const char *msg, 1066 const char *msg,
987 const char *other_detail, 1067 const char *other_detail)
988 const void *mcelog)
989{ 1068{
990 /* FIXME: too much for stack: move it to some pre-alocated area */ 1069 /* FIXME: too much for stack: move it to some pre-alocated area */
991 char detail[80], location[80]; 1070 char detail[80], location[80];
992 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms]; 1071 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
993 char *p; 1072 char *p;
994 int row = -1, chan = -1; 1073 int row = -1, chan = -1;
995 int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 }; 1074 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
996 int i; 1075 int i;
997 u32 grain; 1076 long grain;
998 bool enable_per_layer_report = false; 1077 bool enable_per_layer_report = false;
1078 u8 grain_bits;
999 1079
1000 debugf3("MC%d: %s()\n", mci->mc_idx, __func__); 1080 edac_dbg(3, "MC%d\n", mci->mc_idx);
1001 1081
1002 /* 1082 /*
1003 * Check if the event report is consistent and if the memory 1083 * Check if the event report is consistent and if the memory
@@ -1043,13 +1123,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1043 p = label; 1123 p = label;
1044 *p = '\0'; 1124 *p = '\0';
1045 for (i = 0; i < mci->tot_dimms; i++) { 1125 for (i = 0; i < mci->tot_dimms; i++) {
1046 struct dimm_info *dimm = &mci->dimms[i]; 1126 struct dimm_info *dimm = mci->dimms[i];
1047 1127
1048 if (layer0 >= 0 && layer0 != dimm->location[0]) 1128 if (top_layer >= 0 && top_layer != dimm->location[0])
1049 continue; 1129 continue;
1050 if (layer1 >= 0 && layer1 != dimm->location[1]) 1130 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1051 continue; 1131 continue;
1052 if (layer2 >= 0 && layer2 != dimm->location[2]) 1132 if (low_layer >= 0 && low_layer != dimm->location[2])
1053 continue; 1133 continue;
1054 1134
1055 /* get the max grain, over the error match range */ 1135 /* get the max grain, over the error match range */
@@ -1075,11 +1155,9 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1075 * get csrow/channel of the DIMM, in order to allow 1155 * get csrow/channel of the DIMM, in order to allow
1076 * incrementing the compat API counters 1156 * incrementing the compat API counters
1077 */ 1157 */
1078 debugf4("%s: %s csrows map: (%d,%d)\n", 1158 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1079 __func__, 1159 mci->mem_is_per_rank ? "rank" : "dimm",
1080 mci->mem_is_per_rank ? "rank" : "dimm", 1160 dimm->csrow, dimm->cschannel);
1081 dimm->csrow, dimm->cschannel);
1082
1083 if (row == -1) 1161 if (row == -1)
1084 row = dimm->csrow; 1162 row = dimm->csrow;
1085 else if (row >= 0 && row != dimm->csrow) 1163 else if (row >= 0 && row != dimm->csrow)
@@ -1095,19 +1173,18 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1095 if (!enable_per_layer_report) { 1173 if (!enable_per_layer_report) {
1096 strcpy(label, "any memory"); 1174 strcpy(label, "any memory");
1097 } else { 1175 } else {
1098 debugf4("%s: csrow/channel to increment: (%d,%d)\n", 1176 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1099 __func__, row, chan);
1100 if (p == label) 1177 if (p == label)
1101 strcpy(label, "unknown memory"); 1178 strcpy(label, "unknown memory");
1102 if (type == HW_EVENT_ERR_CORRECTED) { 1179 if (type == HW_EVENT_ERR_CORRECTED) {
1103 if (row >= 0) { 1180 if (row >= 0) {
1104 mci->csrows[row].ce_count++; 1181 mci->csrows[row]->ce_count += error_count;
1105 if (chan >= 0) 1182 if (chan >= 0)
1106 mci->csrows[row].channels[chan].ce_count++; 1183 mci->csrows[row]->channels[chan]->ce_count += error_count;
1107 } 1184 }
1108 } else 1185 } else
1109 if (row >= 0) 1186 if (row >= 0)
1110 mci->csrows[row].ue_count++; 1187 mci->csrows[row]->ue_count += error_count;
1111 } 1188 }
1112 1189
1113 /* Fill the RAM location data */ 1190 /* Fill the RAM location data */
@@ -1120,23 +1197,33 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1120 edac_layer_name[mci->layers[i].type], 1197 edac_layer_name[mci->layers[i].type],
1121 pos[i]); 1198 pos[i]);
1122 } 1199 }
1200 if (p > location)
1201 *(p - 1) = '\0';
1202
1203 /* Report the error via the trace interface */
1204
1205 grain_bits = fls_long(grain) + 1;
1206 trace_mc_event(type, msg, label, error_count,
1207 mci->mc_idx, top_layer, mid_layer, low_layer,
1208 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1209 grain_bits, syndrome, other_detail);
1123 1210
1124 /* Memory type dependent details about the error */ 1211 /* Memory type dependent details about the error */
1125 if (type == HW_EVENT_ERR_CORRECTED) { 1212 if (type == HW_EVENT_ERR_CORRECTED) {
1126 snprintf(detail, sizeof(detail), 1213 snprintf(detail, sizeof(detail),
1127 "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx", 1214 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1128 page_frame_number, offset_in_page, 1215 page_frame_number, offset_in_page,
1129 grain, syndrome); 1216 grain, syndrome);
1130 edac_ce_error(mci, pos, msg, location, label, detail, 1217 edac_ce_error(mci, error_count, pos, msg, location, label,
1131 other_detail, enable_per_layer_report, 1218 detail, other_detail, enable_per_layer_report,
1132 page_frame_number, offset_in_page, grain); 1219 page_frame_number, offset_in_page, grain);
1133 } else { 1220 } else {
1134 snprintf(detail, sizeof(detail), 1221 snprintf(detail, sizeof(detail),
1135 "page:0x%lx offset:0x%lx grain:%d", 1222 "page:0x%lx offset:0x%lx grain:%ld",
1136 page_frame_number, offset_in_page, grain); 1223 page_frame_number, offset_in_page, grain);
1137 1224
1138 edac_ue_error(mci, pos, msg, location, label, detail, 1225 edac_ue_error(mci, error_count, pos, msg, location, label,
1139 other_detail, enable_per_layer_report); 1226 detail, other_detail, enable_per_layer_report);
1140 } 1227 }
1141} 1228}
1142EXPORT_SYMBOL_GPL(edac_mc_handle_error); 1229EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index f6a29b0eedc8..ed0bc07b8503 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -7,17 +7,21 @@
7 * 7 *
8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com 8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9 * 9 *
10 * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com>
11 * The entire API were re-written, and ported to use struct device
12 *
10 */ 13 */
11 14
12#include <linux/ctype.h> 15#include <linux/ctype.h>
13#include <linux/slab.h> 16#include <linux/slab.h>
14#include <linux/edac.h> 17#include <linux/edac.h>
15#include <linux/bug.h> 18#include <linux/bug.h>
19#include <linux/pm_runtime.h>
20#include <linux/uaccess.h>
16 21
17#include "edac_core.h" 22#include "edac_core.h"
18#include "edac_module.h" 23#include "edac_module.h"
19 24
20
21/* MC EDAC Controls, setable by module parameter, and sysfs */ 25/* MC EDAC Controls, setable by module parameter, and sysfs */
22static int edac_mc_log_ue = 1; 26static int edac_mc_log_ue = 1;
23static int edac_mc_log_ce = 1; 27static int edac_mc_log_ce = 1;
@@ -78,6 +82,8 @@ module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
78 &edac_mc_poll_msec, 0644); 82 &edac_mc_poll_msec, 0644);
79MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); 83MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
80 84
85static struct device *mci_pdev;
86
81/* 87/*
82 * various constants for Memory Controllers 88 * various constants for Memory Controllers
83 */ 89 */
@@ -125,317 +131,526 @@ static const char *edac_caps[] = {
125 [EDAC_S16ECD16ED] = "S16ECD16ED" 131 [EDAC_S16ECD16ED] = "S16ECD16ED"
126}; 132};
127 133
128/* EDAC sysfs CSROW data structures and methods 134#ifdef CONFIG_EDAC_LEGACY_SYSFS
135/*
136 * EDAC sysfs CSROW data structures and methods
137 */
138
139#define to_csrow(k) container_of(k, struct csrow_info, dev)
140
141/*
142 * We need it to avoid namespace conflicts between the legacy API
143 * and the per-dimm/per-rank one
129 */ 144 */
145#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
146 struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
147
148struct dev_ch_attribute {
149 struct device_attribute attr;
150 int channel;
151};
152
153#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
154 struct dev_ch_attribute dev_attr_legacy_##_name = \
155 { __ATTR(_name, _mode, _show, _store), (_var) }
156
157#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
130 158
131/* Set of more default csrow<id> attribute show/store functions */ 159/* Set of more default csrow<id> attribute show/store functions */
132static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, 160static ssize_t csrow_ue_count_show(struct device *dev,
133 int private) 161 struct device_attribute *mattr, char *data)
134{ 162{
163 struct csrow_info *csrow = to_csrow(dev);
164
135 return sprintf(data, "%u\n", csrow->ue_count); 165 return sprintf(data, "%u\n", csrow->ue_count);
136} 166}
137 167
138static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, 168static ssize_t csrow_ce_count_show(struct device *dev,
139 int private) 169 struct device_attribute *mattr, char *data)
140{ 170{
171 struct csrow_info *csrow = to_csrow(dev);
172
141 return sprintf(data, "%u\n", csrow->ce_count); 173 return sprintf(data, "%u\n", csrow->ce_count);
142} 174}
143 175
144static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, 176static ssize_t csrow_size_show(struct device *dev,
145 int private) 177 struct device_attribute *mattr, char *data)
146{ 178{
179 struct csrow_info *csrow = to_csrow(dev);
147 int i; 180 int i;
148 u32 nr_pages = 0; 181 u32 nr_pages = 0;
149 182
150 for (i = 0; i < csrow->nr_channels; i++) 183 for (i = 0; i < csrow->nr_channels; i++)
151 nr_pages += csrow->channels[i].dimm->nr_pages; 184 nr_pages += csrow->channels[i]->dimm->nr_pages;
152
153 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); 185 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
154} 186}
155 187
156static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, 188static ssize_t csrow_mem_type_show(struct device *dev,
157 int private) 189 struct device_attribute *mattr, char *data)
158{ 190{
159 return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]); 191 struct csrow_info *csrow = to_csrow(dev);
192
193 return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
160} 194}
161 195
162static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, 196static ssize_t csrow_dev_type_show(struct device *dev,
163 int private) 197 struct device_attribute *mattr, char *data)
164{ 198{
165 return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]); 199 struct csrow_info *csrow = to_csrow(dev);
200
201 return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
166} 202}
167 203
168static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, 204static ssize_t csrow_edac_mode_show(struct device *dev,
169 int private) 205 struct device_attribute *mattr,
206 char *data)
170{ 207{
171 return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]); 208 struct csrow_info *csrow = to_csrow(dev);
209
210 return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
172} 211}
173 212
174/* show/store functions for DIMM Label attributes */ 213/* show/store functions for DIMM Label attributes */
175static ssize_t channel_dimm_label_show(struct csrow_info *csrow, 214static ssize_t channel_dimm_label_show(struct device *dev,
176 char *data, int channel) 215 struct device_attribute *mattr,
216 char *data)
177{ 217{
218 struct csrow_info *csrow = to_csrow(dev);
219 unsigned chan = to_channel(mattr);
220 struct rank_info *rank = csrow->channels[chan];
221
178 /* if field has not been initialized, there is nothing to send */ 222 /* if field has not been initialized, there is nothing to send */
179 if (!csrow->channels[channel].dimm->label[0]) 223 if (!rank->dimm->label[0])
180 return 0; 224 return 0;
181 225
182 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 226 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
183 csrow->channels[channel].dimm->label); 227 rank->dimm->label);
184} 228}
185 229
186static ssize_t channel_dimm_label_store(struct csrow_info *csrow, 230static ssize_t channel_dimm_label_store(struct device *dev,
187 const char *data, 231 struct device_attribute *mattr,
188 size_t count, int channel) 232 const char *data, size_t count)
189{ 233{
234 struct csrow_info *csrow = to_csrow(dev);
235 unsigned chan = to_channel(mattr);
236 struct rank_info *rank = csrow->channels[chan];
237
190 ssize_t max_size = 0; 238 ssize_t max_size = 0;
191 239
192 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 240 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
193 strncpy(csrow->channels[channel].dimm->label, data, max_size); 241 strncpy(rank->dimm->label, data, max_size);
194 csrow->channels[channel].dimm->label[max_size] = '\0'; 242 rank->dimm->label[max_size] = '\0';
195 243
196 return max_size; 244 return max_size;
197} 245}
198 246
199/* show function for dynamic chX_ce_count attribute */ 247/* show function for dynamic chX_ce_count attribute */
200static ssize_t channel_ce_count_show(struct csrow_info *csrow, 248static ssize_t channel_ce_count_show(struct device *dev,
201 char *data, int channel) 249 struct device_attribute *mattr, char *data)
202{ 250{
203 return sprintf(data, "%u\n", csrow->channels[channel].ce_count); 251 struct csrow_info *csrow = to_csrow(dev);
252 unsigned chan = to_channel(mattr);
253 struct rank_info *rank = csrow->channels[chan];
254
255 return sprintf(data, "%u\n", rank->ce_count);
204} 256}
205 257
206/* csrow specific attribute structure */ 258/* cwrow<id>/attribute files */
207struct csrowdev_attribute { 259DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
208 struct attribute attr; 260DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
209 ssize_t(*show) (struct csrow_info *, char *, int); 261DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
210 ssize_t(*store) (struct csrow_info *, const char *, size_t, int); 262DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
211 int private; 263DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
212}; 264DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
213 265
214#define to_csrow(k) container_of(k, struct csrow_info, kobj) 266/* default attributes of the CSROW<id> object */
215#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) 267static struct attribute *csrow_attrs[] = {
268 &dev_attr_legacy_dev_type.attr,
269 &dev_attr_legacy_mem_type.attr,
270 &dev_attr_legacy_edac_mode.attr,
271 &dev_attr_legacy_size_mb.attr,
272 &dev_attr_legacy_ue_count.attr,
273 &dev_attr_legacy_ce_count.attr,
274 NULL,
275};
216 276
217/* Set of show/store higher level functions for default csrow attributes */ 277static struct attribute_group csrow_attr_grp = {
218static ssize_t csrowdev_show(struct kobject *kobj, 278 .attrs = csrow_attrs,
219 struct attribute *attr, char *buffer) 279};
220{
221 struct csrow_info *csrow = to_csrow(kobj);
222 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
223 280
224 if (csrowdev_attr->show) 281static const struct attribute_group *csrow_attr_groups[] = {
225 return csrowdev_attr->show(csrow, 282 &csrow_attr_grp,
226 buffer, csrowdev_attr->private); 283 NULL
227 return -EIO; 284};
228}
229 285
230static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, 286static void csrow_attr_release(struct device *dev)
231 const char *buffer, size_t count)
232{ 287{
233 struct csrow_info *csrow = to_csrow(kobj); 288 struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
234 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
235
236 if (csrowdev_attr->store)
237 return csrowdev_attr->store(csrow,
238 buffer,
239 count, csrowdev_attr->private);
240 return -EIO;
241}
242 289
243static const struct sysfs_ops csrowfs_ops = { 290 edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
244 .show = csrowdev_show, 291 kfree(csrow);
245 .store = csrowdev_store 292}
246};
247 293
248#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \ 294static struct device_type csrow_attr_type = {
249static struct csrowdev_attribute attr_##_name = { \ 295 .groups = csrow_attr_groups,
250 .attr = {.name = __stringify(_name), .mode = _mode }, \ 296 .release = csrow_attr_release,
251 .show = _show, \
252 .store = _store, \
253 .private = _private, \
254}; 297};
255 298
256/* default cwrow<id>/attribute files */ 299/*
257CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0); 300 * possible dynamic channel DIMM Label attribute files
258CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0); 301 *
259CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0); 302 */
260CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
261CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
262CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
263 303
264/* default attributes of the CSROW<id> object */ 304#define EDAC_NR_CHANNELS 6
265static struct csrowdev_attribute *default_csrow_attr[] = {
266 &attr_dev_type,
267 &attr_mem_type,
268 &attr_edac_mode,
269 &attr_size_mb,
270 &attr_ue_count,
271 &attr_ce_count,
272 NULL,
273};
274 305
275/* possible dynamic channel DIMM Label attribute files */ 306DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
276CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
277 channel_dimm_label_show, channel_dimm_label_store, 0); 307 channel_dimm_label_show, channel_dimm_label_store, 0);
278CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR, 308DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
279 channel_dimm_label_show, channel_dimm_label_store, 1); 309 channel_dimm_label_show, channel_dimm_label_store, 1);
280CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR, 310DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
281 channel_dimm_label_show, channel_dimm_label_store, 2); 311 channel_dimm_label_show, channel_dimm_label_store, 2);
282CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR, 312DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
283 channel_dimm_label_show, channel_dimm_label_store, 3); 313 channel_dimm_label_show, channel_dimm_label_store, 3);
284CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR, 314DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
285 channel_dimm_label_show, channel_dimm_label_store, 4); 315 channel_dimm_label_show, channel_dimm_label_store, 4);
286CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR, 316DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
287 channel_dimm_label_show, channel_dimm_label_store, 5); 317 channel_dimm_label_show, channel_dimm_label_store, 5);
288 318
289/* Total possible dynamic DIMM Label attribute file table */ 319/* Total possible dynamic DIMM Label attribute file table */
290static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = { 320static struct device_attribute *dynamic_csrow_dimm_attr[] = {
291 &attr_ch0_dimm_label, 321 &dev_attr_legacy_ch0_dimm_label.attr,
292 &attr_ch1_dimm_label, 322 &dev_attr_legacy_ch1_dimm_label.attr,
293 &attr_ch2_dimm_label, 323 &dev_attr_legacy_ch2_dimm_label.attr,
294 &attr_ch3_dimm_label, 324 &dev_attr_legacy_ch3_dimm_label.attr,
295 &attr_ch4_dimm_label, 325 &dev_attr_legacy_ch4_dimm_label.attr,
296 &attr_ch5_dimm_label 326 &dev_attr_legacy_ch5_dimm_label.attr
297}; 327};
298 328
299/* possible dynamic channel ce_count attribute files */ 329/* possible dynamic channel ce_count attribute files */
300CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0); 330DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
301CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1); 331 channel_ce_count_show, NULL, 0);
302CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2); 332DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
303CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3); 333 channel_ce_count_show, NULL, 1);
304CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4); 334DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
305CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5); 335 channel_ce_count_show, NULL, 2);
336DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
337 channel_ce_count_show, NULL, 3);
338DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
339 channel_ce_count_show, NULL, 4);
340DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
341 channel_ce_count_show, NULL, 5);
306 342
307/* Total possible dynamic ce_count attribute file table */ 343/* Total possible dynamic ce_count attribute file table */
308static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = { 344static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
309 &attr_ch0_ce_count, 345 &dev_attr_legacy_ch0_ce_count.attr,
310 &attr_ch1_ce_count, 346 &dev_attr_legacy_ch1_ce_count.attr,
311 &attr_ch2_ce_count, 347 &dev_attr_legacy_ch2_ce_count.attr,
312 &attr_ch3_ce_count, 348 &dev_attr_legacy_ch3_ce_count.attr,
313 &attr_ch4_ce_count, 349 &dev_attr_legacy_ch4_ce_count.attr,
314 &attr_ch5_ce_count 350 &dev_attr_legacy_ch5_ce_count.attr
315}; 351};
316 352
317#define EDAC_NR_CHANNELS 6 353static inline int nr_pages_per_csrow(struct csrow_info *csrow)
354{
355 int chan, nr_pages = 0;
356
357 for (chan = 0; chan < csrow->nr_channels; chan++)
358 nr_pages += csrow->channels[chan]->dimm->nr_pages;
359
360 return nr_pages;
361}
318 362
319/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */ 363/* Create a CSROW object under specifed edac_mc_device */
320static int edac_create_channel_files(struct kobject *kobj, int chan) 364static int edac_create_csrow_object(struct mem_ctl_info *mci,
365 struct csrow_info *csrow, int index)
321{ 366{
322 int err = -ENODEV; 367 int err, chan;
368
369 if (csrow->nr_channels >= EDAC_NR_CHANNELS)
370 return -ENODEV;
371
372 csrow->dev.type = &csrow_attr_type;
373 csrow->dev.bus = &mci->bus;
374 device_initialize(&csrow->dev);
375 csrow->dev.parent = &mci->dev;
376 dev_set_name(&csrow->dev, "csrow%d", index);
377 dev_set_drvdata(&csrow->dev, csrow);
323 378
324 if (chan >= EDAC_NR_CHANNELS) 379 edac_dbg(0, "creating (virtual) csrow node %s\n",
380 dev_name(&csrow->dev));
381
382 err = device_add(&csrow->dev);
383 if (err < 0)
325 return err; 384 return err;
326 385
327 /* create the DIMM label attribute file */ 386 for (chan = 0; chan < csrow->nr_channels; chan++) {
328 err = sysfs_create_file(kobj, 387 /* Only expose populated DIMMs */
329 (struct attribute *) 388 if (!csrow->channels[chan]->dimm->nr_pages)
330 dynamic_csrow_dimm_attr[chan]); 389 continue;
331 390 err = device_create_file(&csrow->dev,
332 if (!err) { 391 dynamic_csrow_dimm_attr[chan]);
333 /* create the CE Count attribute file */ 392 if (err < 0)
334 err = sysfs_create_file(kobj, 393 goto error;
335 (struct attribute *) 394 err = device_create_file(&csrow->dev,
336 dynamic_csrow_ce_count_attr[chan]); 395 dynamic_csrow_ce_count_attr[chan]);
337 } else { 396 if (err < 0) {
338 debugf1("%s() dimm labels and ce_count files created", 397 device_remove_file(&csrow->dev,
339 __func__); 398 dynamic_csrow_dimm_attr[chan]);
399 goto error;
400 }
401 }
402
403 return 0;
404
405error:
406 for (--chan; chan >= 0; chan--) {
407 device_remove_file(&csrow->dev,
408 dynamic_csrow_dimm_attr[chan]);
409 device_remove_file(&csrow->dev,
410 dynamic_csrow_ce_count_attr[chan]);
340 } 411 }
412 put_device(&csrow->dev);
341 413
342 return err; 414 return err;
343} 415}
344 416
345/* No memory to release for this kobj */ 417/* Create a CSROW object under specifed edac_mc_device */
346static void edac_csrow_instance_release(struct kobject *kobj) 418static int edac_create_csrow_objects(struct mem_ctl_info *mci)
347{ 419{
348 struct mem_ctl_info *mci; 420 int err, i, chan;
349 struct csrow_info *cs; 421 struct csrow_info *csrow;
422
423 for (i = 0; i < mci->nr_csrows; i++) {
424 csrow = mci->csrows[i];
425 if (!nr_pages_per_csrow(csrow))
426 continue;
427 err = edac_create_csrow_object(mci, mci->csrows[i], i);
428 if (err < 0)
429 goto error;
430 }
431 return 0;
350 432
351 debugf1("%s()\n", __func__); 433error:
434 for (--i; i >= 0; i--) {
435 csrow = mci->csrows[i];
436 if (!nr_pages_per_csrow(csrow))
437 continue;
438 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
439 if (!csrow->channels[chan]->dimm->nr_pages)
440 continue;
441 device_remove_file(&csrow->dev,
442 dynamic_csrow_dimm_attr[chan]);
443 device_remove_file(&csrow->dev,
444 dynamic_csrow_ce_count_attr[chan]);
445 }
446 put_device(&mci->csrows[i]->dev);
447 }
352 448
353 cs = container_of(kobj, struct csrow_info, kobj); 449 return err;
354 mci = cs->mci; 450}
355 451
356 kobject_put(&mci->edac_mci_kobj); 452static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
453{
454 int i, chan;
455 struct csrow_info *csrow;
456
457 for (i = mci->nr_csrows - 1; i >= 0; i--) {
458 csrow = mci->csrows[i];
459 if (!nr_pages_per_csrow(csrow))
460 continue;
461 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
462 if (!csrow->channels[chan]->dimm->nr_pages)
463 continue;
464 edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
465 i, chan);
466 device_remove_file(&csrow->dev,
467 dynamic_csrow_dimm_attr[chan]);
468 device_remove_file(&csrow->dev,
469 dynamic_csrow_ce_count_attr[chan]);
470 }
471 put_device(&mci->csrows[i]->dev);
472 device_del(&mci->csrows[i]->dev);
473 }
357} 474}
475#endif
358 476
359/* the kobj_type instance for a CSROW */ 477/*
360static struct kobj_type ktype_csrow = { 478 * Per-dimm (or per-rank) devices
361 .release = edac_csrow_instance_release, 479 */
362 .sysfs_ops = &csrowfs_ops, 480
363 .default_attrs = (struct attribute **)default_csrow_attr, 481#define to_dimm(k) container_of(k, struct dimm_info, dev)
482
483/* show/store functions for DIMM Label attributes */
484static ssize_t dimmdev_location_show(struct device *dev,
485 struct device_attribute *mattr, char *data)
486{
487 struct dimm_info *dimm = to_dimm(dev);
488
489 return edac_dimm_info_location(dimm, data, PAGE_SIZE);
490}
491
492static ssize_t dimmdev_label_show(struct device *dev,
493 struct device_attribute *mattr, char *data)
494{
495 struct dimm_info *dimm = to_dimm(dev);
496
497 /* if field has not been initialized, there is nothing to send */
498 if (!dimm->label[0])
499 return 0;
500
501 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
502}
503
504static ssize_t dimmdev_label_store(struct device *dev,
505 struct device_attribute *mattr,
506 const char *data,
507 size_t count)
508{
509 struct dimm_info *dimm = to_dimm(dev);
510
511 ssize_t max_size = 0;
512
513 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
514 strncpy(dimm->label, data, max_size);
515 dimm->label[max_size] = '\0';
516
517 return max_size;
518}
519
520static ssize_t dimmdev_size_show(struct device *dev,
521 struct device_attribute *mattr, char *data)
522{
523 struct dimm_info *dimm = to_dimm(dev);
524
525 return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
526}
527
528static ssize_t dimmdev_mem_type_show(struct device *dev,
529 struct device_attribute *mattr, char *data)
530{
531 struct dimm_info *dimm = to_dimm(dev);
532
533 return sprintf(data, "%s\n", mem_types[dimm->mtype]);
534}
535
536static ssize_t dimmdev_dev_type_show(struct device *dev,
537 struct device_attribute *mattr, char *data)
538{
539 struct dimm_info *dimm = to_dimm(dev);
540
541 return sprintf(data, "%s\n", dev_types[dimm->dtype]);
542}
543
544static ssize_t dimmdev_edac_mode_show(struct device *dev,
545 struct device_attribute *mattr,
546 char *data)
547{
548 struct dimm_info *dimm = to_dimm(dev);
549
550 return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
551}
552
553/* dimm/rank attribute files */
554static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
555 dimmdev_label_show, dimmdev_label_store);
556static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
557static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
558static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
559static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
560static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
561
562/* attributes of the dimm<id>/rank<id> object */
563static struct attribute *dimm_attrs[] = {
564 &dev_attr_dimm_label.attr,
565 &dev_attr_dimm_location.attr,
566 &dev_attr_size.attr,
567 &dev_attr_dimm_mem_type.attr,
568 &dev_attr_dimm_dev_type.attr,
569 &dev_attr_dimm_edac_mode.attr,
570 NULL,
364}; 571};
365 572
366/* Create a CSROW object under specifed edac_mc_device */ 573static struct attribute_group dimm_attr_grp = {
367static int edac_create_csrow_object(struct mem_ctl_info *mci, 574 .attrs = dimm_attrs,
368 struct csrow_info *csrow, int index) 575};
576
577static const struct attribute_group *dimm_attr_groups[] = {
578 &dimm_attr_grp,
579 NULL
580};
581
582static void dimm_attr_release(struct device *dev)
369{ 583{
370 struct kobject *kobj_mci = &mci->edac_mci_kobj; 584 struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
371 struct kobject *kobj;
372 int chan;
373 int err;
374 585
375 /* generate ..../edac/mc/mc<id>/csrow<index> */ 586 edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
376 memset(&csrow->kobj, 0, sizeof(csrow->kobj)); 587 kfree(dimm);
377 csrow->mci = mci; /* include container up link */ 588}
378 589
379 /* bump the mci instance's kobject's ref count */ 590static struct device_type dimm_attr_type = {
380 kobj = kobject_get(&mci->edac_mci_kobj); 591 .groups = dimm_attr_groups,
381 if (!kobj) { 592 .release = dimm_attr_release,
382 err = -ENODEV; 593};
383 goto err_out; 594
384 } 595/* Create a DIMM object under specifed memory controller device */
596static int edac_create_dimm_object(struct mem_ctl_info *mci,
597 struct dimm_info *dimm,
598 int index)
599{
600 int err;
601 dimm->mci = mci;
385 602
386 /* Instanstiate the csrow object */ 603 dimm->dev.type = &dimm_attr_type;
387 err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci, 604 dimm->dev.bus = &mci->bus;
388 "csrow%d", index); 605 device_initialize(&dimm->dev);
389 if (err)
390 goto err_release_top_kobj;
391 606
392 /* At this point, to release a csrow kobj, one must 607 dimm->dev.parent = &mci->dev;
393 * call the kobject_put and allow that tear down 608 if (mci->mem_is_per_rank)
394 * to work the releasing 609 dev_set_name(&dimm->dev, "rank%d", index);
395 */ 610 else
611 dev_set_name(&dimm->dev, "dimm%d", index);
612 dev_set_drvdata(&dimm->dev, dimm);
613 pm_runtime_forbid(&mci->dev);
396 614
397 /* Create the dyanmic attribute files on this csrow, 615 err = device_add(&dimm->dev);
398 * namely, the DIMM labels and the channel ce_count
399 */
400 for (chan = 0; chan < csrow->nr_channels; chan++) {
401 err = edac_create_channel_files(&csrow->kobj, chan);
402 if (err) {
403 /* special case the unregister here */
404 kobject_put(&csrow->kobj);
405 goto err_out;
406 }
407 }
408 kobject_uevent(&csrow->kobj, KOBJ_ADD);
409 return 0;
410 616
411 /* error unwind stack */ 617 edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
412err_release_top_kobj:
413 kobject_put(&mci->edac_mci_kobj);
414 618
415err_out:
416 return err; 619 return err;
417} 620}
418 621
419/* default sysfs methods and data structures for the main MCI kobject */ 622/*
623 * Memory controller device
624 */
625
626#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
420 627
421static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, 628static ssize_t mci_reset_counters_store(struct device *dev,
629 struct device_attribute *mattr,
422 const char *data, size_t count) 630 const char *data, size_t count)
423{ 631{
424 int row, chan; 632 struct mem_ctl_info *mci = to_mci(dev);
425 633 int cnt, row, chan, i;
426 mci->ue_noinfo_count = 0;
427 mci->ce_noinfo_count = 0;
428 mci->ue_mc = 0; 634 mci->ue_mc = 0;
429 mci->ce_mc = 0; 635 mci->ce_mc = 0;
636 mci->ue_noinfo_count = 0;
637 mci->ce_noinfo_count = 0;
430 638
431 for (row = 0; row < mci->nr_csrows; row++) { 639 for (row = 0; row < mci->nr_csrows; row++) {
432 struct csrow_info *ri = &mci->csrows[row]; 640 struct csrow_info *ri = mci->csrows[row];
433 641
434 ri->ue_count = 0; 642 ri->ue_count = 0;
435 ri->ce_count = 0; 643 ri->ce_count = 0;
436 644
437 for (chan = 0; chan < ri->nr_channels; chan++) 645 for (chan = 0; chan < ri->nr_channels; chan++)
438 ri->channels[chan].ce_count = 0; 646 ri->channels[chan]->ce_count = 0;
647 }
648
649 cnt = 1;
650 for (i = 0; i < mci->n_layers; i++) {
651 cnt *= mci->layers[i].size;
652 memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
653 memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
439 } 654 }
440 655
441 mci->start_time = jiffies; 656 mci->start_time = jiffies;
@@ -451,9 +666,11 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
451 * Negative value still means that an error has occurred while setting 666 * Negative value still means that an error has occurred while setting
452 * the scrub rate. 667 * the scrub rate.
453 */ 668 */
454static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, 669static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
670 struct device_attribute *mattr,
455 const char *data, size_t count) 671 const char *data, size_t count)
456{ 672{
673 struct mem_ctl_info *mci = to_mci(dev);
457 unsigned long bandwidth = 0; 674 unsigned long bandwidth = 0;
458 int new_bw = 0; 675 int new_bw = 0;
459 676
@@ -476,8 +693,11 @@ static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
476/* 693/*
477 * ->get_sdram_scrub_rate() return value semantics same as above. 694 * ->get_sdram_scrub_rate() return value semantics same as above.
478 */ 695 */
479static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) 696static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
697 struct device_attribute *mattr,
698 char *data)
480{ 699{
700 struct mem_ctl_info *mci = to_mci(dev);
481 int bandwidth = 0; 701 int bandwidth = 0;
482 702
483 if (!mci->get_sdram_scrub_rate) 703 if (!mci->get_sdram_scrub_rate)
@@ -493,45 +713,72 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
493} 713}
494 714
495/* default attribute files for the MCI object */ 715/* default attribute files for the MCI object */
496static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) 716static ssize_t mci_ue_count_show(struct device *dev,
717 struct device_attribute *mattr,
718 char *data)
497{ 719{
720 struct mem_ctl_info *mci = to_mci(dev);
721
498 return sprintf(data, "%d\n", mci->ue_mc); 722 return sprintf(data, "%d\n", mci->ue_mc);
499} 723}
500 724
501static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) 725static ssize_t mci_ce_count_show(struct device *dev,
726 struct device_attribute *mattr,
727 char *data)
502{ 728{
729 struct mem_ctl_info *mci = to_mci(dev);
730
503 return sprintf(data, "%d\n", mci->ce_mc); 731 return sprintf(data, "%d\n", mci->ce_mc);
504} 732}
505 733
506static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) 734static ssize_t mci_ce_noinfo_show(struct device *dev,
735 struct device_attribute *mattr,
736 char *data)
507{ 737{
738 struct mem_ctl_info *mci = to_mci(dev);
739
508 return sprintf(data, "%d\n", mci->ce_noinfo_count); 740 return sprintf(data, "%d\n", mci->ce_noinfo_count);
509} 741}
510 742
511static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) 743static ssize_t mci_ue_noinfo_show(struct device *dev,
744 struct device_attribute *mattr,
745 char *data)
512{ 746{
747 struct mem_ctl_info *mci = to_mci(dev);
748
513 return sprintf(data, "%d\n", mci->ue_noinfo_count); 749 return sprintf(data, "%d\n", mci->ue_noinfo_count);
514} 750}
515 751
516static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) 752static ssize_t mci_seconds_show(struct device *dev,
753 struct device_attribute *mattr,
754 char *data)
517{ 755{
756 struct mem_ctl_info *mci = to_mci(dev);
757
518 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); 758 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
519} 759}
520 760
521static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) 761static ssize_t mci_ctl_name_show(struct device *dev,
762 struct device_attribute *mattr,
763 char *data)
522{ 764{
765 struct mem_ctl_info *mci = to_mci(dev);
766
523 return sprintf(data, "%s\n", mci->ctl_name); 767 return sprintf(data, "%s\n", mci->ctl_name);
524} 768}
525 769
526static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) 770static ssize_t mci_size_mb_show(struct device *dev,
771 struct device_attribute *mattr,
772 char *data)
527{ 773{
774 struct mem_ctl_info *mci = to_mci(dev);
528 int total_pages = 0, csrow_idx, j; 775 int total_pages = 0, csrow_idx, j;
529 776
530 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { 777 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
531 struct csrow_info *csrow = &mci->csrows[csrow_idx]; 778 struct csrow_info *csrow = mci->csrows[csrow_idx];
532 779
533 for (j = 0; j < csrow->nr_channels; j++) { 780 for (j = 0; j < csrow->nr_channels; j++) {
534 struct dimm_info *dimm = csrow->channels[j].dimm; 781 struct dimm_info *dimm = csrow->channels[j]->dimm;
535 782
536 total_pages += dimm->nr_pages; 783 total_pages += dimm->nr_pages;
537 } 784 }
@@ -540,361 +787,187 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
540 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); 787 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
541} 788}
542 789
543#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) 790static ssize_t mci_max_location_show(struct device *dev,
544#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr) 791 struct device_attribute *mattr,
545 792 char *data)
546/* MCI show/store functions for top most object */
547static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
548 char *buffer)
549{ 793{
550 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 794 struct mem_ctl_info *mci = to_mci(dev);
551 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 795 int i;
552 796 char *p = data;
553 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
554 797
555 if (mcidev_attr->show) 798 for (i = 0; i < mci->n_layers; i++) {
556 return mcidev_attr->show(mem_ctl_info, buffer); 799 p += sprintf(p, "%s %d ",
800 edac_layer_name[mci->layers[i].type],
801 mci->layers[i].size - 1);
802 }
557 803
558 return -EIO; 804 return p - data;
559} 805}
560 806
561static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, 807#ifdef CONFIG_EDAC_DEBUG
562 const char *buffer, size_t count) 808static ssize_t edac_fake_inject_write(struct file *file,
809 const char __user *data,
810 size_t count, loff_t *ppos)
563{ 811{
564 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 812 struct device *dev = file->private_data;
565 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 813 struct mem_ctl_info *mci = to_mci(dev);
566 814 static enum hw_event_mc_err_type type;
567 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); 815 u16 errcount = mci->fake_inject_count;
568 816
569 if (mcidev_attr->store) 817 if (!errcount)
570 return mcidev_attr->store(mem_ctl_info, buffer, count); 818 errcount = 1;
819
820 type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
821 : HW_EVENT_ERR_CORRECTED;
822
823 printk(KERN_DEBUG
824 "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
825 errcount,
826 (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
827 errcount > 1 ? "s" : "",
828 mci->fake_inject_layer[0],
829 mci->fake_inject_layer[1],
830 mci->fake_inject_layer[2]
831 );
832 edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
833 mci->fake_inject_layer[0],
834 mci->fake_inject_layer[1],
835 mci->fake_inject_layer[2],
836 "FAKE ERROR", "for EDAC testing only");
571 837
572 return -EIO; 838 return count;
573} 839}
574 840
575/* Intermediate show/store table */ 841static int debugfs_open(struct inode *inode, struct file *file)
576static const struct sysfs_ops mci_ops = { 842{
577 .show = mcidev_show, 843 file->private_data = inode->i_private;
578 .store = mcidev_store 844 return 0;
579}; 845}
580 846
581#define MCIDEV_ATTR(_name,_mode,_show,_store) \ 847static const struct file_operations debug_fake_inject_fops = {
582static struct mcidev_sysfs_attribute mci_attr_##_name = { \ 848 .open = debugfs_open,
583 .attr = {.name = __stringify(_name), .mode = _mode }, \ 849 .write = edac_fake_inject_write,
584 .show = _show, \ 850 .llseek = generic_file_llseek,
585 .store = _store, \
586}; 851};
852#endif
587 853
588/* default Control file */ 854/* default Control file */
589MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); 855DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
590 856
591/* default Attribute files */ 857/* default Attribute files */
592MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); 858DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
593MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); 859DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
594MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); 860DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
595MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); 861DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
596MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); 862DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
597MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); 863DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
598MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); 864DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
865DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
599 866
600/* memory scrubber attribute file */ 867/* memory scrubber attribute file */
601MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, 868DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
602 mci_sdram_scrub_rate_store); 869 mci_sdram_scrub_rate_store);
603 870
604static struct mcidev_sysfs_attribute *mci_attr[] = { 871static struct attribute *mci_attrs[] = {
605 &mci_attr_reset_counters, 872 &dev_attr_reset_counters.attr,
606 &mci_attr_mc_name, 873 &dev_attr_mc_name.attr,
607 &mci_attr_size_mb, 874 &dev_attr_size_mb.attr,
608 &mci_attr_seconds_since_reset, 875 &dev_attr_seconds_since_reset.attr,
609 &mci_attr_ue_noinfo_count, 876 &dev_attr_ue_noinfo_count.attr,
610 &mci_attr_ce_noinfo_count, 877 &dev_attr_ce_noinfo_count.attr,
611 &mci_attr_ue_count, 878 &dev_attr_ue_count.attr,
612 &mci_attr_ce_count, 879 &dev_attr_ce_count.attr,
613 &mci_attr_sdram_scrub_rate, 880 &dev_attr_sdram_scrub_rate.attr,
881 &dev_attr_max_location.attr,
614 NULL 882 NULL
615}; 883};
616 884
885static struct attribute_group mci_attr_grp = {
886 .attrs = mci_attrs,
887};
617 888
618/* 889static const struct attribute_group *mci_attr_groups[] = {
619 * Release of a MC controlling instance 890 &mci_attr_grp,
620 * 891 NULL
621 * each MC control instance has the following resources upon entry: 892};
622 * a) a ref count on the top memctl kobj
623 * b) a ref count on this module
624 *
625 * this function must decrement those ref counts and then
626 * issue a free on the instance's memory
627 */
628static void edac_mci_control_release(struct kobject *kobj)
629{
630 struct mem_ctl_info *mci;
631
632 mci = to_mci(kobj);
633 893
634 debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx); 894static void mci_attr_release(struct device *dev)
895{
896 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
635 897
636 /* decrement the module ref count */ 898 edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
637 module_put(mci->owner); 899 kfree(mci);
638} 900}
639 901
640static struct kobj_type ktype_mci = { 902static struct device_type mci_attr_type = {
641 .release = edac_mci_control_release, 903 .groups = mci_attr_groups,
642 .sysfs_ops = &mci_ops, 904 .release = mci_attr_release,
643 .default_attrs = (struct attribute **)mci_attr,
644}; 905};
645 906
646/* EDAC memory controller sysfs kset: 907#ifdef CONFIG_EDAC_DEBUG
647 * /sys/devices/system/edac/mc 908static struct dentry *edac_debugfs;
648 */
649static struct kset *mc_kset;
650 909
651/* 910int __init edac_debugfs_init(void)
652 * edac_mc_register_sysfs_main_kobj
653 *
654 * setups and registers the main kobject for each mci
655 */
656int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
657{ 911{
658 struct kobject *kobj_mci; 912 edac_debugfs = debugfs_create_dir("edac", NULL);
659 int err; 913 if (IS_ERR(edac_debugfs)) {
660 914 edac_debugfs = NULL;
661 debugf1("%s()\n", __func__); 915 return -ENOMEM;
662
663 kobj_mci = &mci->edac_mci_kobj;
664
665 /* Init the mci's kobject */
666 memset(kobj_mci, 0, sizeof(*kobj_mci));
667
668 /* Record which module 'owns' this control structure
669 * and bump the ref count of the module
670 */
671 mci->owner = THIS_MODULE;
672
673 /* bump ref count on this module */
674 if (!try_module_get(mci->owner)) {
675 err = -ENODEV;
676 goto fail_out;
677 }
678
679 /* this instance become part of the mc_kset */
680 kobj_mci->kset = mc_kset;
681
682 /* register the mc<id> kobject to the mc_kset */
683 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
684 "mc%d", mci->mc_idx);
685 if (err) {
686 debugf1("%s()Failed to register '.../edac/mc%d'\n",
687 __func__, mci->mc_idx);
688 goto kobj_reg_fail;
689 } 916 }
690 kobject_uevent(kobj_mci, KOBJ_ADD);
691
692 /* At this point, to 'free' the control struct,
693 * edac_mc_unregister_sysfs_main_kobj() must be used
694 */
695
696 debugf1("%s() Registered '.../edac/mc%d' kobject\n",
697 __func__, mci->mc_idx);
698
699 return 0; 917 return 0;
700
701 /* Error exit stack */
702
703kobj_reg_fail:
704 module_put(mci->owner);
705
706fail_out:
707 return err;
708}
709
710/*
711 * edac_mc_register_sysfs_main_kobj
712 *
713 * tears down and the main mci kobject from the mc_kset
714 */
715void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
716{
717 debugf1("%s()\n", __func__);
718
719 /* delete the kobj from the mc_kset */
720 kobject_put(&mci->edac_mci_kobj);
721}
722
723#define EDAC_DEVICE_SYMLINK "device"
724
725#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
726
727/* MCI show/store functions for top most object */
728static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
729 char *buffer)
730{
731 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
732 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
733
734 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
735
736 if (mcidev_attr->show)
737 return mcidev_attr->show(mem_ctl_info, buffer);
738
739 return -EIO;
740} 918}
741 919
742static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr, 920void __exit edac_debugfs_exit(void)
743 const char *buffer, size_t count)
744{ 921{
745 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); 922 debugfs_remove(edac_debugfs);
746 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
747
748 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
749
750 if (mcidev_attr->store)
751 return mcidev_attr->store(mem_ctl_info, buffer, count);
752
753 return -EIO;
754} 923}
755 924
756/* No memory to release for this kobj */ 925int edac_create_debug_nodes(struct mem_ctl_info *mci)
757static void edac_inst_grp_release(struct kobject *kobj)
758{ 926{
759 struct mcidev_sysfs_group_kobj *grp; 927 struct dentry *d, *parent;
760 struct mem_ctl_info *mci; 928 char name[80];
761 929 int i;
762 debugf1("%s()\n", __func__);
763
764 grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
765 mci = grp->mci;
766}
767
768/* Intermediate show/store table */
769static struct sysfs_ops inst_grp_ops = {
770 .show = inst_grp_show,
771 .store = inst_grp_store
772};
773
774/* the kobj_type instance for a instance group */
775static struct kobj_type ktype_inst_grp = {
776 .release = edac_inst_grp_release,
777 .sysfs_ops = &inst_grp_ops,
778};
779
780 930
781/* 931 if (!edac_debugfs)
782 * edac_create_mci_instance_attributes 932 return -ENODEV;
783 * create MC driver specific attributes bellow an specified kobj
784 * This routine calls itself recursively, in order to create an entire
785 * object tree.
786 */
787static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
788 const struct mcidev_sysfs_attribute *sysfs_attrib,
789 struct kobject *kobj)
790{
791 int err;
792 933
793 debugf4("%s()\n", __func__); 934 d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
794 935 if (!d)
795 while (sysfs_attrib) { 936 return -ENOMEM;
796 debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); 937 parent = d;
797 if (sysfs_attrib->grp) { 938
798 struct mcidev_sysfs_group_kobj *grp_kobj; 939 for (i = 0; i < mci->n_layers; i++) {
799 940 sprintf(name, "fake_inject_%s",
800 grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL); 941 edac_layer_name[mci->layers[i].type]);
801 if (!grp_kobj) 942 d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
802 return -ENOMEM; 943 &mci->fake_inject_layer[i]);
803 944 if (!d)
804 grp_kobj->grp = sysfs_attrib->grp; 945 goto nomem;
805 grp_kobj->mci = mci;
806 list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
807
808 debugf0("%s() grp %s, mci %p\n", __func__,
809 sysfs_attrib->grp->name, mci);
810
811 err = kobject_init_and_add(&grp_kobj->kobj,
812 &ktype_inst_grp,
813 &mci->edac_mci_kobj,
814 sysfs_attrib->grp->name);
815 if (err < 0) {
816 printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
817 return err;
818 }
819 err = edac_create_mci_instance_attributes(mci,
820 grp_kobj->grp->mcidev_attr,
821 &grp_kobj->kobj);
822
823 if (err < 0)
824 return err;
825 } else if (sysfs_attrib->attr.name) {
826 debugf4("%s() file %s\n", __func__,
827 sysfs_attrib->attr.name);
828
829 err = sysfs_create_file(kobj, &sysfs_attrib->attr);
830 if (err < 0) {
831 printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
832 return err;
833 }
834 } else
835 break;
836
837 sysfs_attrib++;
838 } 946 }
839 947
840 return 0; 948 d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
841} 949 &mci->fake_inject_ue);
950 if (!d)
951 goto nomem;
842 952
843/* 953 d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
844 * edac_remove_mci_instance_attributes 954 &mci->fake_inject_count);
845 * remove MC driver specific attributes at the topmost level 955 if (!d)
846 * directory of this mci instance. 956 goto nomem;
847 */
848static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
849 const struct mcidev_sysfs_attribute *sysfs_attrib,
850 struct kobject *kobj, int count)
851{
852 struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
853 957
854 debugf1("%s()\n", __func__); 958 d = debugfs_create_file("fake_inject", S_IWUSR, parent,
855 959 &mci->dev,
856 /* 960 &debug_fake_inject_fops);
857 * loop if there are attributes and until we hit a NULL entry 961 if (!d)
858 * Remove first all the attributes 962 goto nomem;
859 */
860 while (sysfs_attrib) {
861 debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
862 if (sysfs_attrib->grp) {
863 debugf4("%s() seeking for group %s\n",
864 __func__, sysfs_attrib->grp->name);
865 list_for_each_entry(grp_kobj,
866 &mci->grp_kobj_list, list) {
867 debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
868 if (grp_kobj->grp == sysfs_attrib->grp) {
869 edac_remove_mci_instance_attributes(mci,
870 grp_kobj->grp->mcidev_attr,
871 &grp_kobj->kobj, count + 1);
872 debugf4("%s() group %s\n", __func__,
873 sysfs_attrib->grp->name);
874 kobject_put(&grp_kobj->kobj);
875 }
876 }
877 debugf4("%s() end of seeking for group %s\n",
878 __func__, sysfs_attrib->grp->name);
879 } else if (sysfs_attrib->attr.name) {
880 debugf4("%s() file %s\n", __func__,
881 sysfs_attrib->attr.name);
882 sysfs_remove_file(kobj, &sysfs_attrib->attr);
883 } else
884 break;
885 sysfs_attrib++;
886 }
887 963
888 /* Remove the group objects */ 964 mci->debugfs = parent;
889 if (count) 965 return 0;
890 return; 966nomem:
891 list_for_each_entry_safe(grp_kobj, tmp, 967 debugfs_remove(mci->debugfs);
892 &mci->grp_kobj_list, list) { 968 return -ENOMEM;
893 list_del(&grp_kobj->list);
894 kfree(grp_kobj);
895 }
896} 969}
897 970#endif
898 971
899/* 972/*
900 * Create a new Memory Controller kobject instance, 973 * Create a new Memory Controller kobject instance,
@@ -906,77 +979,87 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
906 */ 979 */
907int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) 980int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
908{ 981{
909 int i, j; 982 int i, err;
910 int err;
911 struct csrow_info *csrow;
912 struct kobject *kobj_mci = &mci->edac_mci_kobj;
913 983
914 debugf0("%s() idx=%d\n", __func__, mci->mc_idx); 984 /*
915 985 * The memory controller needs its own bus, in order to avoid
916 INIT_LIST_HEAD(&mci->grp_kobj_list); 986 * namespace conflicts at /sys/bus/edac.
987 */
988 mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
989 if (!mci->bus.name)
990 return -ENOMEM;
991 edac_dbg(0, "creating bus %s\n", mci->bus.name);
992 err = bus_register(&mci->bus);
993 if (err < 0)
994 return err;
917 995
918 /* create a symlink for the device */ 996 /* get the /sys/devices/system/edac subsys reference */
919 err = sysfs_create_link(kobj_mci, &mci->dev->kobj, 997 mci->dev.type = &mci_attr_type;
920 EDAC_DEVICE_SYMLINK); 998 device_initialize(&mci->dev);
921 if (err) { 999
922 debugf1("%s() failure to create symlink\n", __func__); 1000 mci->dev.parent = mci_pdev;
923 goto fail0; 1001 mci->dev.bus = &mci->bus;
1002 dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
1003 dev_set_drvdata(&mci->dev, mci);
1004 pm_runtime_forbid(&mci->dev);
1005
1006 edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
1007 err = device_add(&mci->dev);
1008 if (err < 0) {
1009 bus_unregister(&mci->bus);
1010 kfree(mci->bus.name);
1011 return err;
924 } 1012 }
925 1013
926 /* If the low level driver desires some attributes, 1014 /*
927 * then create them now for the driver. 1015 * Create the dimm/rank devices
928 */ 1016 */
929 if (mci->mc_driver_sysfs_attributes) { 1017 for (i = 0; i < mci->tot_dimms; i++) {
930 err = edac_create_mci_instance_attributes(mci, 1018 struct dimm_info *dimm = mci->dimms[i];
931 mci->mc_driver_sysfs_attributes, 1019 /* Only expose populated DIMMs */
932 &mci->edac_mci_kobj); 1020 if (dimm->nr_pages == 0)
1021 continue;
1022#ifdef CONFIG_EDAC_DEBUG
1023 edac_dbg(1, "creating dimm%d, located at ", i);
1024 if (edac_debug_level >= 1) {
1025 int lay;
1026 for (lay = 0; lay < mci->n_layers; lay++)
1027 printk(KERN_CONT "%s %d ",
1028 edac_layer_name[mci->layers[lay].type],
1029 dimm->location[lay]);
1030 printk(KERN_CONT "\n");
1031 }
1032#endif
1033 err = edac_create_dimm_object(mci, dimm, i);
933 if (err) { 1034 if (err) {
934 debugf1("%s() failure to create mci attributes\n", 1035 edac_dbg(1, "failure: create dimm %d obj\n", i);
935 __func__); 1036 goto fail;
936 goto fail0;
937 } 1037 }
938 } 1038 }
939 1039
940 /* Make directories for each CSROW object under the mc<id> kobject 1040#ifdef CONFIG_EDAC_LEGACY_SYSFS
941 */ 1041 err = edac_create_csrow_objects(mci);
942 for (i = 0; i < mci->nr_csrows; i++) { 1042 if (err < 0)
943 int nr_pages = 0; 1043 goto fail;
944 1044#endif
945 csrow = &mci->csrows[i];
946 for (j = 0; j < csrow->nr_channels; j++)
947 nr_pages += csrow->channels[j].dimm->nr_pages;
948
949 if (nr_pages > 0) {
950 err = edac_create_csrow_object(mci, csrow, i);
951 if (err) {
952 debugf1("%s() failure: create csrow %d obj\n",
953 __func__, i);
954 goto fail1;
955 }
956 }
957 }
958 1045
1046#ifdef CONFIG_EDAC_DEBUG
1047 edac_create_debug_nodes(mci);
1048#endif
959 return 0; 1049 return 0;
960 1050
961fail1: 1051fail:
962 for (i--; i >= 0; i--) { 1052 for (i--; i >= 0; i--) {
963 int nr_pages = 0; 1053 struct dimm_info *dimm = mci->dimms[i];
964 1054 if (dimm->nr_pages == 0)
965 csrow = &mci->csrows[i]; 1055 continue;
966 for (j = 0; j < csrow->nr_channels; j++) 1056 put_device(&dimm->dev);
967 nr_pages += csrow->channels[j].dimm->nr_pages; 1057 device_del(&dimm->dev);
968 if (nr_pages > 0)
969 kobject_put(&mci->csrows[i].kobj);
970 } 1058 }
971 1059 put_device(&mci->dev);
972 /* remove the mci instance's attributes, if any */ 1060 device_del(&mci->dev);
973 edac_remove_mci_instance_attributes(mci, 1061 bus_unregister(&mci->bus);
974 mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0); 1062 kfree(mci->bus.name);
975
976 /* remove the symlink */
977 sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
978
979fail0:
980 return err; 1063 return err;
981} 1064}
982 1065
@@ -985,98 +1068,84 @@ fail0:
985 */ 1068 */
986void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) 1069void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
987{ 1070{
988 struct csrow_info *csrow; 1071 int i;
989 int i, j;
990
991 debugf0("%s()\n", __func__);
992
993 /* remove all csrow kobjects */
994 debugf4("%s() unregister this mci kobj\n", __func__);
995 for (i = 0; i < mci->nr_csrows; i++) {
996 int nr_pages = 0;
997
998 csrow = &mci->csrows[i];
999 for (j = 0; j < csrow->nr_channels; j++)
1000 nr_pages += csrow->channels[j].dimm->nr_pages;
1001 if (nr_pages > 0) {
1002 debugf0("%s() unreg csrow-%d\n", __func__, i);
1003 kobject_put(&mci->csrows[i].kobj);
1004 }
1005 }
1006 1072
1007 /* remove this mci instance's attribtes */ 1073 edac_dbg(0, "\n");
1008 if (mci->mc_driver_sysfs_attributes) { 1074
1009 debugf4("%s() unregister mci private attributes\n", __func__); 1075#ifdef CONFIG_EDAC_DEBUG
1010 edac_remove_mci_instance_attributes(mci, 1076 debugfs_remove(mci->debugfs);
1011 mci->mc_driver_sysfs_attributes, 1077#endif
1012 &mci->edac_mci_kobj, 0); 1078#ifdef CONFIG_EDAC_LEGACY_SYSFS
1079 edac_delete_csrow_objects(mci);
1080#endif
1081
1082 for (i = 0; i < mci->tot_dimms; i++) {
1083 struct dimm_info *dimm = mci->dimms[i];
1084 if (dimm->nr_pages == 0)
1085 continue;
1086 edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
1087 put_device(&dimm->dev);
1088 device_del(&dimm->dev);
1013 } 1089 }
1014
1015 /* remove the symlink */
1016 debugf4("%s() remove_link\n", __func__);
1017 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1018
1019 /* unregister this instance's kobject */
1020 debugf4("%s() remove_mci_instance\n", __func__);
1021 kobject_put(&mci->edac_mci_kobj);
1022} 1090}
1023 1091
1092void edac_unregister_sysfs(struct mem_ctl_info *mci)
1093{
1094 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1095 put_device(&mci->dev);
1096 device_del(&mci->dev);
1097 bus_unregister(&mci->bus);
1098 kfree(mci->bus.name);
1099}
1024 1100
1101static void mc_attr_release(struct device *dev)
1102{
1103 /*
1104 * There's no container structure here, as this is just the mci
1105 * parent device, used to create the /sys/devices/mc sysfs node.
1106 * So, there are no attributes on it.
1107 */
1108 edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1109 kfree(dev);
1110}
1025 1111
1026 1112static struct device_type mc_attr_type = {
1113 .release = mc_attr_release,
1114};
1027/* 1115/*
1028 * edac_setup_sysfs_mc_kset(void) 1116 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1029 *
1030 * Initialize the mc_kset for the 'mc' entry
1031 * This requires creating the top 'mc' directory with a kset
1032 * and its controls/attributes.
1033 *
1034 * To this 'mc' kset, instance 'mci' will be grouped as children.
1035 *
1036 * Return: 0 SUCCESS
1037 * !0 FAILURE error code
1038 */ 1117 */
1039int edac_sysfs_setup_mc_kset(void) 1118int __init edac_mc_sysfs_init(void)
1040{ 1119{
1041 int err = -EINVAL;
1042 struct bus_type *edac_subsys; 1120 struct bus_type *edac_subsys;
1043 1121 int err;
1044 debugf1("%s()\n", __func__);
1045 1122
1046 /* get the /sys/devices/system/edac subsys reference */ 1123 /* get the /sys/devices/system/edac subsys reference */
1047 edac_subsys = edac_get_sysfs_subsys(); 1124 edac_subsys = edac_get_sysfs_subsys();
1048 if (edac_subsys == NULL) { 1125 if (edac_subsys == NULL) {
1049 debugf1("%s() no edac_subsys error=%d\n", __func__, err); 1126 edac_dbg(1, "no edac_subsys\n");
1050 goto fail_out; 1127 return -EINVAL;
1051 } 1128 }
1052 1129
1053 /* Init the MC's kobject */ 1130 mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1054 mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
1055 if (!mc_kset) {
1056 err = -ENOMEM;
1057 debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
1058 goto fail_kset;
1059 }
1060 1131
1061 debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); 1132 mci_pdev->bus = edac_subsys;
1133 mci_pdev->type = &mc_attr_type;
1134 device_initialize(mci_pdev);
1135 dev_set_name(mci_pdev, "mc");
1062 1136
1063 return 0; 1137 err = device_add(mci_pdev);
1138 if (err < 0)
1139 return err;
1064 1140
1065fail_kset: 1141 edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1066 edac_put_sysfs_subsys();
1067 1142
1068fail_out: 1143 return 0;
1069 return err;
1070} 1144}
1071 1145
1072/* 1146void __exit edac_mc_sysfs_exit(void)
1073 * edac_sysfs_teardown_mc_kset
1074 *
1075 * deconstruct the mc_ket for memory controllers
1076 */
1077void edac_sysfs_teardown_mc_kset(void)
1078{ 1147{
1079 kset_unregister(mc_kset); 1148 put_device(mci_pdev);
1149 device_del(mci_pdev);
1080 edac_put_sysfs_subsys(); 1150 edac_put_sysfs_subsys();
1081} 1151}
1082
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 5ddaa86d6a6e..58a28d838f37 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -15,7 +15,7 @@
15#include "edac_core.h" 15#include "edac_core.h"
16#include "edac_module.h" 16#include "edac_module.h"
17 17
18#define EDAC_VERSION "Ver: 2.1.0" 18#define EDAC_VERSION "Ver: 3.0.0"
19 19
20#ifdef CONFIG_EDAC_DEBUG 20#ifdef CONFIG_EDAC_DEBUG
21/* Values of 0 to 4 will generate output */ 21/* Values of 0 to 4 will generate output */
@@ -90,26 +90,21 @@ static int __init edac_init(void)
90 */ 90 */
91 edac_pci_clear_parity_errors(); 91 edac_pci_clear_parity_errors();
92 92
93 /* 93 err = edac_mc_sysfs_init();
94 * now set up the mc_kset under the edac class object
95 */
96 err = edac_sysfs_setup_mc_kset();
97 if (err) 94 if (err)
98 goto error; 95 goto error;
99 96
97 edac_debugfs_init();
98
100 /* Setup/Initialize the workq for this core */ 99 /* Setup/Initialize the workq for this core */
101 err = edac_workqueue_setup(); 100 err = edac_workqueue_setup();
102 if (err) { 101 if (err) {
103 edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n"); 102 edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
104 goto workq_fail; 103 goto error;
105 } 104 }
106 105
107 return 0; 106 return 0;
108 107
109 /* Error teardown stack */
110workq_fail:
111 edac_sysfs_teardown_mc_kset();
112
113error: 108error:
114 return err; 109 return err;
115} 110}
@@ -120,11 +115,12 @@ error:
120 */ 115 */
121static void __exit edac_exit(void) 116static void __exit edac_exit(void)
122{ 117{
123 debugf0("%s()\n", __func__); 118 edac_dbg(0, "\n");
124 119
125 /* tear down the various subsystems */ 120 /* tear down the various subsystems */
126 edac_workqueue_teardown(); 121 edac_workqueue_teardown();
127 edac_sysfs_teardown_mc_kset(); 122 edac_mc_sysfs_exit();
123 edac_debugfs_exit();
128} 124}
129 125
130/* 126/*
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 0ea7d14cb930..3d139c6e7fe3 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -19,12 +19,12 @@
19 * 19 *
20 * edac_mc objects 20 * edac_mc objects
21 */ 21 */
22extern int edac_sysfs_setup_mc_kset(void); 22 /* on edac_mc_sysfs.c */
23extern void edac_sysfs_teardown_mc_kset(void); 23int edac_mc_sysfs_init(void);
24extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci); 24void edac_mc_sysfs_exit(void);
25extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
26extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); 25extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
27extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); 26extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
27void edac_unregister_sysfs(struct mem_ctl_info *mci);
28extern int edac_get_log_ue(void); 28extern int edac_get_log_ue(void);
29extern int edac_get_log_ce(void); 29extern int edac_get_log_ce(void);
30extern int edac_get_panic_on_ue(void); 30extern int edac_get_panic_on_ue(void);
@@ -34,6 +34,10 @@ extern int edac_mc_get_panic_on_ue(void);
34extern int edac_get_poll_msec(void); 34extern int edac_get_poll_msec(void);
35extern int edac_mc_get_poll_msec(void); 35extern int edac_mc_get_poll_msec(void);
36 36
37unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
38 unsigned len);
39
40 /* on edac_device.c */
37extern int edac_device_register_sysfs_main_kobj( 41extern int edac_device_register_sysfs_main_kobj(
38 struct edac_device_ctl_info *edac_dev); 42 struct edac_device_ctl_info *edac_dev);
39extern void edac_device_unregister_sysfs_main_kobj( 43extern void edac_device_unregister_sysfs_main_kobj(
@@ -53,6 +57,20 @@ extern void edac_mc_reset_delay_period(int value);
53extern void *edac_align_ptr(void **p, unsigned size, int n_elems); 57extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
54 58
55/* 59/*
60 * EDAC debugfs functions
61 */
62#ifdef CONFIG_EDAC_DEBUG
63int edac_debugfs_init(void);
64void edac_debugfs_exit(void);
65#else
66static inline int edac_debugfs_init(void)
67{
68 return -ENODEV;
69}
70static inline void edac_debugfs_exit(void) {}
71#endif
72
73/*
56 * EDAC PCI functions 74 * EDAC PCI functions
57 */ 75 */
58#ifdef CONFIG_PCI 76#ifdef CONFIG_PCI
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index f1ac86649886..ee87ef972ead 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -45,7 +45,7 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
45 void *p = NULL, *pvt; 45 void *p = NULL, *pvt;
46 unsigned int size; 46 unsigned int size;
47 47
48 debugf1("%s()\n", __func__); 48 edac_dbg(1, "\n");
49 49
50 pci = edac_align_ptr(&p, sizeof(*pci), 1); 50 pci = edac_align_ptr(&p, sizeof(*pci), 1);
51 pvt = edac_align_ptr(&p, 1, sz_pvt); 51 pvt = edac_align_ptr(&p, 1, sz_pvt);
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
80 */ 80 */
81void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci) 81void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
82{ 82{
83 debugf1("%s()\n", __func__); 83 edac_dbg(1, "\n");
84 84
85 edac_pci_remove_sysfs(pci); 85 edac_pci_remove_sysfs(pci);
86} 86}
@@ -97,7 +97,7 @@ static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
97 struct edac_pci_ctl_info *pci; 97 struct edac_pci_ctl_info *pci;
98 struct list_head *item; 98 struct list_head *item;
99 99
100 debugf1("%s()\n", __func__); 100 edac_dbg(1, "\n");
101 101
102 list_for_each(item, &edac_pci_list) { 102 list_for_each(item, &edac_pci_list) {
103 pci = list_entry(item, struct edac_pci_ctl_info, link); 103 pci = list_entry(item, struct edac_pci_ctl_info, link);
@@ -122,7 +122,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
122 struct list_head *item, *insert_before; 122 struct list_head *item, *insert_before;
123 struct edac_pci_ctl_info *rover; 123 struct edac_pci_ctl_info *rover;
124 124
125 debugf1("%s()\n", __func__); 125 edac_dbg(1, "\n");
126 126
127 insert_before = &edac_pci_list; 127 insert_before = &edac_pci_list;
128 128
@@ -226,7 +226,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
226 int msec; 226 int msec;
227 unsigned long delay; 227 unsigned long delay;
228 228
229 debugf3("%s() checking\n", __func__); 229 edac_dbg(3, "checking\n");
230 230
231 mutex_lock(&edac_pci_ctls_mutex); 231 mutex_lock(&edac_pci_ctls_mutex);
232 232
@@ -261,7 +261,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
261static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, 261static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
262 unsigned int msec) 262 unsigned int msec)
263{ 263{
264 debugf0("%s()\n", __func__); 264 edac_dbg(0, "\n");
265 265
266 INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); 266 INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
267 queue_delayed_work(edac_workqueue, &pci->work, 267 queue_delayed_work(edac_workqueue, &pci->work,
@@ -276,7 +276,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
276{ 276{
277 int status; 277 int status;
278 278
279 debugf0("%s()\n", __func__); 279 edac_dbg(0, "\n");
280 280
281 status = cancel_delayed_work(&pci->work); 281 status = cancel_delayed_work(&pci->work);
282 if (status == 0) 282 if (status == 0)
@@ -293,7 +293,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
293void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, 293void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
294 unsigned long value) 294 unsigned long value)
295{ 295{
296 debugf0("%s()\n", __func__); 296 edac_dbg(0, "\n");
297 297
298 edac_pci_workq_teardown(pci); 298 edac_pci_workq_teardown(pci);
299 299
@@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
333 */ 333 */
334int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) 334int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
335{ 335{
336 debugf0("%s()\n", __func__); 336 edac_dbg(0, "\n");
337 337
338 pci->pci_idx = edac_idx; 338 pci->pci_idx = edac_idx;
339 pci->start_time = jiffies; 339 pci->start_time = jiffies;
@@ -393,7 +393,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
393{ 393{
394 struct edac_pci_ctl_info *pci; 394 struct edac_pci_ctl_info *pci;
395 395
396 debugf0("%s()\n", __func__); 396 edac_dbg(0, "\n");
397 397
398 mutex_lock(&edac_pci_ctls_mutex); 398 mutex_lock(&edac_pci_ctls_mutex);
399 399
@@ -430,7 +430,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device);
430 */ 430 */
431static void edac_pci_generic_check(struct edac_pci_ctl_info *pci) 431static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
432{ 432{
433 debugf4("%s()\n", __func__); 433 edac_dbg(4, "\n");
434 edac_pci_do_parity_check(); 434 edac_pci_do_parity_check();
435} 435}
436 436
@@ -475,7 +475,7 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
475 pdata->edac_idx = edac_pci_idx++; 475 pdata->edac_idx = edac_pci_idx++;
476 476
477 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 477 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
478 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 478 edac_dbg(3, "failed edac_pci_add_device()\n");
479 edac_pci_free_ctl_info(pci); 479 edac_pci_free_ctl_info(pci);
480 return NULL; 480 return NULL;
481 } 481 }
@@ -491,7 +491,7 @@ EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
491 */ 491 */
492void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci) 492void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
493{ 493{
494 debugf0("%s() pci mod=%s\n", __func__, pci->mod_name); 494 edac_dbg(0, "pci mod=%s\n", pci->mod_name);
495 495
496 edac_pci_del_device(pci->dev); 496 edac_pci_del_device(pci->dev);
497 edac_pci_free_ctl_info(pci); 497 edac_pci_free_ctl_info(pci);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 97f5064e3992..e164c555a337 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -78,7 +78,7 @@ static void edac_pci_instance_release(struct kobject *kobj)
78{ 78{
79 struct edac_pci_ctl_info *pci; 79 struct edac_pci_ctl_info *pci;
80 80
81 debugf0("%s()\n", __func__); 81 edac_dbg(0, "\n");
82 82
83 /* Form pointer to containing struct, the pci control struct */ 83 /* Form pointer to containing struct, the pci control struct */
84 pci = to_instance(kobj); 84 pci = to_instance(kobj);
@@ -161,7 +161,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
161 struct kobject *main_kobj; 161 struct kobject *main_kobj;
162 int err; 162 int err;
163 163
164 debugf0("%s()\n", __func__); 164 edac_dbg(0, "\n");
165 165
166 /* First bump the ref count on the top main kobj, which will 166 /* First bump the ref count on the top main kobj, which will
167 * track the number of PCI instances we have, and thus nest 167 * track the number of PCI instances we have, and thus nest
@@ -177,14 +177,13 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
177 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance, 177 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
178 edac_pci_top_main_kobj, "pci%d", idx); 178 edac_pci_top_main_kobj, "pci%d", idx);
179 if (err != 0) { 179 if (err != 0) {
180 debugf2("%s() failed to register instance pci%d\n", 180 edac_dbg(2, "failed to register instance pci%d\n", idx);
181 __func__, idx);
182 kobject_put(edac_pci_top_main_kobj); 181 kobject_put(edac_pci_top_main_kobj);
183 goto error_out; 182 goto error_out;
184 } 183 }
185 184
186 kobject_uevent(&pci->kobj, KOBJ_ADD); 185 kobject_uevent(&pci->kobj, KOBJ_ADD);
187 debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx); 186 edac_dbg(1, "Register instance 'pci%d' kobject\n", idx);
188 187
189 return 0; 188 return 0;
190 189
@@ -201,7 +200,7 @@ error_out:
201static void edac_pci_unregister_sysfs_instance_kobj( 200static void edac_pci_unregister_sysfs_instance_kobj(
202 struct edac_pci_ctl_info *pci) 201 struct edac_pci_ctl_info *pci)
203{ 202{
204 debugf0("%s()\n", __func__); 203 edac_dbg(0, "\n");
205 204
206 /* Unregister the instance kobject and allow its release 205 /* Unregister the instance kobject and allow its release
207 * function release the main reference count and then 206 * function release the main reference count and then
@@ -317,7 +316,7 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
317 */ 316 */
318static void edac_pci_release_main_kobj(struct kobject *kobj) 317static void edac_pci_release_main_kobj(struct kobject *kobj)
319{ 318{
320 debugf0("%s() here to module_put(THIS_MODULE)\n", __func__); 319 edac_dbg(0, "here to module_put(THIS_MODULE)\n");
321 320
322 kfree(kobj); 321 kfree(kobj);
323 322
@@ -345,7 +344,7 @@ static int edac_pci_main_kobj_setup(void)
345 int err; 344 int err;
346 struct bus_type *edac_subsys; 345 struct bus_type *edac_subsys;
347 346
348 debugf0("%s()\n", __func__); 347 edac_dbg(0, "\n");
349 348
350 /* check and count if we have already created the main kobject */ 349 /* check and count if we have already created the main kobject */
351 if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1) 350 if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
@@ -356,7 +355,7 @@ static int edac_pci_main_kobj_setup(void)
356 */ 355 */
357 edac_subsys = edac_get_sysfs_subsys(); 356 edac_subsys = edac_get_sysfs_subsys();
358 if (edac_subsys == NULL) { 357 if (edac_subsys == NULL) {
359 debugf1("%s() no edac_subsys\n", __func__); 358 edac_dbg(1, "no edac_subsys\n");
360 err = -ENODEV; 359 err = -ENODEV;
361 goto decrement_count_fail; 360 goto decrement_count_fail;
362 } 361 }
@@ -366,14 +365,14 @@ static int edac_pci_main_kobj_setup(void)
366 * level main kobj for EDAC PCI 365 * level main kobj for EDAC PCI
367 */ 366 */
368 if (!try_module_get(THIS_MODULE)) { 367 if (!try_module_get(THIS_MODULE)) {
369 debugf1("%s() try_module_get() failed\n", __func__); 368 edac_dbg(1, "try_module_get() failed\n");
370 err = -ENODEV; 369 err = -ENODEV;
371 goto mod_get_fail; 370 goto mod_get_fail;
372 } 371 }
373 372
374 edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); 373 edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
375 if (!edac_pci_top_main_kobj) { 374 if (!edac_pci_top_main_kobj) {
376 debugf1("Failed to allocate\n"); 375 edac_dbg(1, "Failed to allocate\n");
377 err = -ENOMEM; 376 err = -ENOMEM;
378 goto kzalloc_fail; 377 goto kzalloc_fail;
379 } 378 }
@@ -383,7 +382,7 @@ static int edac_pci_main_kobj_setup(void)
383 &ktype_edac_pci_main_kobj, 382 &ktype_edac_pci_main_kobj,
384 &edac_subsys->dev_root->kobj, "pci"); 383 &edac_subsys->dev_root->kobj, "pci");
385 if (err) { 384 if (err) {
386 debugf1("Failed to register '.../edac/pci'\n"); 385 edac_dbg(1, "Failed to register '.../edac/pci'\n");
387 goto kobject_init_and_add_fail; 386 goto kobject_init_and_add_fail;
388 } 387 }
389 388
@@ -392,7 +391,7 @@ static int edac_pci_main_kobj_setup(void)
392 * must be used, for resources to be cleaned up properly 391 * must be used, for resources to be cleaned up properly
393 */ 392 */
394 kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD); 393 kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
395 debugf1("Registered '.../edac/pci' kobject\n"); 394 edac_dbg(1, "Registered '.../edac/pci' kobject\n");
396 395
397 return 0; 396 return 0;
398 397
@@ -421,15 +420,14 @@ decrement_count_fail:
421 */ 420 */
422static void edac_pci_main_kobj_teardown(void) 421static void edac_pci_main_kobj_teardown(void)
423{ 422{
424 debugf0("%s()\n", __func__); 423 edac_dbg(0, "\n");
425 424
426 /* Decrement the count and only if no more controller instances 425 /* Decrement the count and only if no more controller instances
427 * are connected perform the unregisteration of the top level 426 * are connected perform the unregisteration of the top level
428 * main kobj 427 * main kobj
429 */ 428 */
430 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { 429 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
431 debugf0("%s() called kobject_put on main kobj\n", 430 edac_dbg(0, "called kobject_put on main kobj\n");
432 __func__);
433 kobject_put(edac_pci_top_main_kobj); 431 kobject_put(edac_pci_top_main_kobj);
434 } 432 }
435 edac_put_sysfs_subsys(); 433 edac_put_sysfs_subsys();
@@ -446,7 +444,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
446 int err; 444 int err;
447 struct kobject *edac_kobj = &pci->kobj; 445 struct kobject *edac_kobj = &pci->kobj;
448 446
449 debugf0("%s() idx=%d\n", __func__, pci->pci_idx); 447 edac_dbg(0, "idx=%d\n", pci->pci_idx);
450 448
451 /* create the top main EDAC PCI kobject, IF needed */ 449 /* create the top main EDAC PCI kobject, IF needed */
452 err = edac_pci_main_kobj_setup(); 450 err = edac_pci_main_kobj_setup();
@@ -460,8 +458,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
460 458
461 err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK); 459 err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
462 if (err) { 460 if (err) {
463 debugf0("%s() sysfs_create_link() returned err= %d\n", 461 edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
464 __func__, err);
465 goto symlink_fail; 462 goto symlink_fail;
466 } 463 }
467 464
@@ -484,7 +481,7 @@ unregister_cleanup:
484 */ 481 */
485void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) 482void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
486{ 483{
487 debugf0("%s() index=%d\n", __func__, pci->pci_idx); 484 edac_dbg(0, "index=%d\n", pci->pci_idx);
488 485
489 /* Remove the symlink */ 486 /* Remove the symlink */
490 sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK); 487 sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
@@ -496,7 +493,7 @@ void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
496 * if this 'pci' is the last instance. 493 * if this 'pci' is the last instance.
497 * If it is, the main kobject will be unregistered as a result 494 * If it is, the main kobject will be unregistered as a result
498 */ 495 */
499 debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__); 496 edac_dbg(0, "calling edac_pci_main_kobj_teardown()\n");
500 edac_pci_main_kobj_teardown(); 497 edac_pci_main_kobj_teardown();
501} 498}
502 499
@@ -572,7 +569,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
572 569
573 local_irq_restore(flags); 570 local_irq_restore(flags);
574 571
575 debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); 572 edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
576 573
577 /* check the status reg for errors on boards NOT marked as broken 574 /* check the status reg for errors on boards NOT marked as broken
578 * if broken, we cannot trust any of the status bits 575 * if broken, we cannot trust any of the status bits
@@ -603,13 +600,15 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
603 } 600 }
604 601
605 602
606 debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev)); 603 edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
604 header_type, dev_name(&dev->dev));
607 605
608 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { 606 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
609 /* On bridges, need to examine secondary status register */ 607 /* On bridges, need to examine secondary status register */
610 status = get_pci_parity_status(dev, 1); 608 status = get_pci_parity_status(dev, 1);
611 609
612 debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); 610 edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n",
611 status, dev_name(&dev->dev));
613 612
614 /* check the secondary status reg for errors, 613 /* check the secondary status reg for errors,
615 * on NOT broken boards 614 * on NOT broken boards
@@ -671,7 +670,7 @@ void edac_pci_do_parity_check(void)
671{ 670{
672 int before_count; 671 int before_count;
673 672
674 debugf3("%s()\n", __func__); 673 edac_dbg(3, "\n");
675 674
676 /* if policy has PCI check off, leave now */ 675 /* if policy has PCI check off, leave now */
677 if (!check_pci_errors) 676 if (!check_pci_errors)
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
new file mode 100644
index 000000000000..e599b00c05a8
--- /dev/null
+++ b/drivers/edac/highbank_l2_edac.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/ctype.h>
19#include <linux/edac.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/of_platform.h>
23
24#include "edac_core.h"
25#include "edac_module.h"
26
27#define SR_CLR_SB_ECC_INTR 0x0
28#define SR_CLR_DB_ECC_INTR 0x4
29
30struct hb_l2_drvdata {
31 void __iomem *base;
32 int sb_irq;
33 int db_irq;
34};
35
36static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
37{
38 struct edac_device_ctl_info *dci = dev_id;
39 struct hb_l2_drvdata *drvdata = dci->pvt_info;
40
41 if (irq == drvdata->sb_irq) {
42 writel(1, drvdata->base + SR_CLR_SB_ECC_INTR);
43 edac_device_handle_ce(dci, 0, 0, dci->ctl_name);
44 }
45 if (irq == drvdata->db_irq) {
46 writel(1, drvdata->base + SR_CLR_DB_ECC_INTR);
47 edac_device_handle_ue(dci, 0, 0, dci->ctl_name);
48 }
49
50 return IRQ_HANDLED;
51}
52
53static int __devinit highbank_l2_err_probe(struct platform_device *pdev)
54{
55 struct edac_device_ctl_info *dci;
56 struct hb_l2_drvdata *drvdata;
57 struct resource *r;
58 int res = 0;
59
60 dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu",
61 1, "L", 1, 2, NULL, 0, 0);
62 if (!dci)
63 return -ENOMEM;
64
65 drvdata = dci->pvt_info;
66 dci->dev = &pdev->dev;
67 platform_set_drvdata(pdev, dci);
68
69 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
70 return -ENOMEM;
71
72 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
73 if (!r) {
74 dev_err(&pdev->dev, "Unable to get mem resource\n");
75 res = -ENODEV;
76 goto err;
77 }
78
79 if (!devm_request_mem_region(&pdev->dev, r->start,
80 resource_size(r), dev_name(&pdev->dev))) {
81 dev_err(&pdev->dev, "Error while requesting mem region\n");
82 res = -EBUSY;
83 goto err;
84 }
85
86 drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
87 if (!drvdata->base) {
88 dev_err(&pdev->dev, "Unable to map regs\n");
89 res = -ENOMEM;
90 goto err;
91 }
92
93 drvdata->db_irq = platform_get_irq(pdev, 0);
94 res = devm_request_irq(&pdev->dev, drvdata->db_irq,
95 highbank_l2_err_handler,
96 0, dev_name(&pdev->dev), dci);
97 if (res < 0)
98 goto err;
99
100 drvdata->sb_irq = platform_get_irq(pdev, 1);
101 res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
102 highbank_l2_err_handler,
103 0, dev_name(&pdev->dev), dci);
104 if (res < 0)
105 goto err;
106
107 dci->mod_name = dev_name(&pdev->dev);
108 dci->dev_name = dev_name(&pdev->dev);
109
110 if (edac_device_add_device(dci))
111 goto err;
112
113 devres_close_group(&pdev->dev, NULL);
114 return 0;
115err:
116 devres_release_group(&pdev->dev, NULL);
117 edac_device_free_ctl_info(dci);
118 return res;
119}
120
121static int highbank_l2_err_remove(struct platform_device *pdev)
122{
123 struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
124
125 edac_device_del_device(&pdev->dev);
126 edac_device_free_ctl_info(dci);
127 return 0;
128}
129
130static const struct of_device_id hb_l2_err_of_match[] = {
131 { .compatible = "calxeda,hb-sregs-l2-ecc", },
132 {},
133};
134MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
135
136static struct platform_driver highbank_l2_edac_driver = {
137 .probe = highbank_l2_err_probe,
138 .remove = highbank_l2_err_remove,
139 .driver = {
140 .name = "hb_l2_edac",
141 .of_match_table = hb_l2_err_of_match,
142 },
143};
144
145module_platform_driver(highbank_l2_edac_driver);
146
147MODULE_LICENSE("GPL v2");
148MODULE_AUTHOR("Calxeda, Inc.");
149MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank L2 Cache");
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
new file mode 100644
index 000000000000..c769f477fd22
--- /dev/null
+++ b/drivers/edac/highbank_mc_edac.c
@@ -0,0 +1,264 @@
1/*
2 * Copyright 2011-2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/ctype.h>
19#include <linux/edac.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/of_platform.h>
23#include <linux/uaccess.h>
24
25#include "edac_core.h"
26#include "edac_module.h"
27
28/* DDR Ctrlr Error Registers */
29#define HB_DDR_ECC_OPT 0x128
30#define HB_DDR_ECC_U_ERR_ADDR 0x130
31#define HB_DDR_ECC_U_ERR_STAT 0x134
32#define HB_DDR_ECC_U_ERR_DATAL 0x138
33#define HB_DDR_ECC_U_ERR_DATAH 0x13c
34#define HB_DDR_ECC_C_ERR_ADDR 0x140
35#define HB_DDR_ECC_C_ERR_STAT 0x144
36#define HB_DDR_ECC_C_ERR_DATAL 0x148
37#define HB_DDR_ECC_C_ERR_DATAH 0x14c
38#define HB_DDR_ECC_INT_STATUS 0x180
39#define HB_DDR_ECC_INT_ACK 0x184
40#define HB_DDR_ECC_U_ERR_ID 0x424
41#define HB_DDR_ECC_C_ERR_ID 0x428
42
43#define HB_DDR_ECC_INT_STAT_CE 0x8
44#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
45#define HB_DDR_ECC_INT_STAT_UE 0x20
46#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
47
48#define HB_DDR_ECC_OPT_MODE_MASK 0x3
49#define HB_DDR_ECC_OPT_FWC 0x100
50#define HB_DDR_ECC_OPT_XOR_SHIFT 16
51
52struct hb_mc_drvdata {
53 void __iomem *mc_vbase;
54};
55
56static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
57{
58 struct mem_ctl_info *mci = dev_id;
59 struct hb_mc_drvdata *drvdata = mci->pvt_info;
60 u32 status, err_addr;
61
62 /* Read the interrupt status register */
63 status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS);
64
65 if (status & HB_DDR_ECC_INT_STAT_UE) {
66 err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR);
67 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
68 err_addr >> PAGE_SHIFT,
69 err_addr & ~PAGE_MASK, 0,
70 0, 0, -1,
71 mci->ctl_name, "");
72 }
73 if (status & HB_DDR_ECC_INT_STAT_CE) {
74 u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT);
75 syndrome = (syndrome >> 8) & 0xff;
76 err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR);
77 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
78 err_addr >> PAGE_SHIFT,
79 err_addr & ~PAGE_MASK, syndrome,
80 0, 0, -1,
81 mci->ctl_name, "");
82 }
83
84 /* clear the error, clears the interrupt */
85 writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK);
86 return IRQ_HANDLED;
87}
88
89#ifdef CONFIG_EDAC_DEBUG
90static ssize_t highbank_mc_err_inject_write(struct file *file,
91 const char __user *data,
92 size_t count, loff_t *ppos)
93{
94 struct mem_ctl_info *mci = file->private_data;
95 struct hb_mc_drvdata *pdata = mci->pvt_info;
96 char buf[32];
97 size_t buf_size;
98 u32 reg;
99 u8 synd;
100
101 buf_size = min(count, (sizeof(buf)-1));
102 if (copy_from_user(buf, data, buf_size))
103 return -EFAULT;
104 buf[buf_size] = 0;
105
106 if (!kstrtou8(buf, 16, &synd)) {
107 reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT);
108 reg &= HB_DDR_ECC_OPT_MODE_MASK;
109 reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
110 writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT);
111 }
112
113 return count;
114}
115
116static int debugfs_open(struct inode *inode, struct file *file)
117{
118 file->private_data = inode->i_private;
119 return 0;
120}
121
122static const struct file_operations highbank_mc_debug_inject_fops = {
123 .open = debugfs_open,
124 .write = highbank_mc_err_inject_write,
125 .llseek = generic_file_llseek,
126};
127
128static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
129{
130 if (mci->debugfs)
131 debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
132 &highbank_mc_debug_inject_fops);
133;
134}
135#else
136static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
137{}
138#endif
139
140static int __devinit highbank_mc_probe(struct platform_device *pdev)
141{
142 struct edac_mc_layer layers[2];
143 struct mem_ctl_info *mci;
144 struct hb_mc_drvdata *drvdata;
145 struct dimm_info *dimm;
146 struct resource *r;
147 u32 control;
148 int irq;
149 int res = 0;
150
151 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
152 layers[0].size = 1;
153 layers[0].is_virt_csrow = true;
154 layers[1].type = EDAC_MC_LAYER_CHANNEL;
155 layers[1].size = 1;
156 layers[1].is_virt_csrow = false;
157 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
158 sizeof(struct hb_mc_drvdata));
159 if (!mci)
160 return -ENOMEM;
161
162 mci->pdev = &pdev->dev;
163 drvdata = mci->pvt_info;
164 platform_set_drvdata(pdev, mci);
165
166 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
167 return -ENOMEM;
168
169 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
170 if (!r) {
171 dev_err(&pdev->dev, "Unable to get mem resource\n");
172 res = -ENODEV;
173 goto err;
174 }
175
176 if (!devm_request_mem_region(&pdev->dev, r->start,
177 resource_size(r), dev_name(&pdev->dev))) {
178 dev_err(&pdev->dev, "Error while requesting mem region\n");
179 res = -EBUSY;
180 goto err;
181 }
182
183 drvdata->mc_vbase = devm_ioremap(&pdev->dev,
184 r->start, resource_size(r));
185 if (!drvdata->mc_vbase) {
186 dev_err(&pdev->dev, "Unable to map regs\n");
187 res = -ENOMEM;
188 goto err;
189 }
190
191 control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3;
192 if (!control || (control == 0x2)) {
193 dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
194 res = -ENODEV;
195 goto err;
196 }
197
198 irq = platform_get_irq(pdev, 0);
199 res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
200 0, dev_name(&pdev->dev), mci);
201 if (res < 0) {
202 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
203 goto err;
204 }
205
206 mci->mtype_cap = MEM_FLAG_DDR3;
207 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
208 mci->edac_cap = EDAC_FLAG_SECDED;
209 mci->mod_name = dev_name(&pdev->dev);
210 mci->mod_ver = "1";
211 mci->ctl_name = dev_name(&pdev->dev);
212 mci->scrub_mode = SCRUB_SW_SRC;
213
214 /* Only a single 4GB DIMM is supported */
215 dimm = *mci->dimms;
216 dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
217 dimm->grain = 8;
218 dimm->dtype = DEV_X8;
219 dimm->mtype = MEM_DDR3;
220 dimm->edac_mode = EDAC_SECDED;
221
222 res = edac_mc_add_mc(mci);
223 if (res < 0)
224 goto err;
225
226 highbank_mc_create_debugfs_nodes(mci);
227
228 devres_close_group(&pdev->dev, NULL);
229 return 0;
230err:
231 devres_release_group(&pdev->dev, NULL);
232 edac_mc_free(mci);
233 return res;
234}
235
236static int highbank_mc_remove(struct platform_device *pdev)
237{
238 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
239
240 edac_mc_del_mc(&pdev->dev);
241 edac_mc_free(mci);
242 return 0;
243}
244
245static const struct of_device_id hb_ddr_ctrl_of_match[] = {
246 { .compatible = "calxeda,hb-ddr-ctrl", },
247 {},
248};
249MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
250
251static struct platform_driver highbank_mc_edac_driver = {
252 .probe = highbank_mc_probe,
253 .remove = highbank_mc_remove,
254 .driver = {
255 .name = "hb_mc_edac",
256 .of_match_table = hb_ddr_ctrl_of_match,
257 },
258};
259
260module_platform_driver(highbank_mc_edac_driver);
261
262MODULE_LICENSE("GPL v2");
263MODULE_AUTHOR("Calxeda, Inc.");
264MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 8ad1744faacd..d3d19cc4e9a1 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -194,7 +194,7 @@ static void i3000_get_error_info(struct mem_ctl_info *mci,
194{ 194{
195 struct pci_dev *pdev; 195 struct pci_dev *pdev;
196 196
197 pdev = to_pci_dev(mci->dev); 197 pdev = to_pci_dev(mci->pdev);
198 198
199 /* 199 /*
200 * This is a mess because there is no atomic way to read all the 200 * This is a mess because there is no atomic way to read all the
@@ -236,7 +236,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
236 int row, multi_chan, channel; 236 int row, multi_chan, channel;
237 unsigned long pfn, offset; 237 unsigned long pfn, offset;
238 238
239 multi_chan = mci->csrows[0].nr_channels - 1; 239 multi_chan = mci->csrows[0]->nr_channels - 1;
240 240
241 if (!(info->errsts & I3000_ERRSTS_BITS)) 241 if (!(info->errsts & I3000_ERRSTS_BITS))
242 return 0; 242 return 0;
@@ -245,9 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
245 return 1; 245 return 1;
246 246
247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
249 -1, -1, -1, 249 -1, -1, -1,
250 "UE overwrote CE", "", NULL); 250 "UE overwrote CE", "");
251 info->errsts = info->errsts2; 251 info->errsts = info->errsts2;
252 } 252 }
253 253
@@ -258,15 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
258 row = edac_mc_find_csrow_by_page(mci, pfn); 258 row = edac_mc_find_csrow_by_page(mci, pfn);
259 259
260 if (info->errsts & I3000_ERRSTS_UE) 260 if (info->errsts & I3000_ERRSTS_UE)
261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
262 pfn, offset, 0, 262 pfn, offset, 0,
263 row, -1, -1, 263 row, -1, -1,
264 "i3000 UE", "", NULL); 264 "i3000 UE", "");
265 else 265 else
266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
267 pfn, offset, info->derrsyn, 267 pfn, offset, info->derrsyn,
268 row, multi_chan ? channel : 0, -1, 268 row, multi_chan ? channel : 0, -1,
269 "i3000 CE", "", NULL); 269 "i3000 CE", "");
270 270
271 return 1; 271 return 1;
272} 272}
@@ -275,7 +275,7 @@ static void i3000_check(struct mem_ctl_info *mci)
275{ 275{
276 struct i3000_error_info info; 276 struct i3000_error_info info;
277 277
278 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 278 edac_dbg(1, "MC%d\n", mci->mc_idx);
279 i3000_get_error_info(mci, &info); 279 i3000_get_error_info(mci, &info);
280 i3000_process_error_info(mci, &info, 1); 280 i3000_process_error_info(mci, &info, 1);
281} 281}
@@ -322,7 +322,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
322 unsigned long mchbar; 322 unsigned long mchbar;
323 void __iomem *window; 323 void __iomem *window;
324 324
325 debugf0("MC: %s()\n", __func__); 325 edac_dbg(0, "MC:\n");
326 326
327 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); 327 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
328 mchbar &= I3000_MCHBAR_MASK; 328 mchbar &= I3000_MCHBAR_MASK;
@@ -366,9 +366,9 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
366 if (!mci) 366 if (!mci)
367 return -ENOMEM; 367 return -ENOMEM;
368 368
369 debugf3("MC: %s(): init mci\n", __func__); 369 edac_dbg(3, "MC: init mci\n");
370 370
371 mci->dev = &pdev->dev; 371 mci->pdev = &pdev->dev;
372 mci->mtype_cap = MEM_FLAG_DDR2; 372 mci->mtype_cap = MEM_FLAG_DDR2;
373 373
374 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 374 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -393,14 +393,13 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
393 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { 393 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
394 u8 value; 394 u8 value;
395 u32 cumul_size; 395 u32 cumul_size;
396 struct csrow_info *csrow = &mci->csrows[i]; 396 struct csrow_info *csrow = mci->csrows[i];
397 397
398 value = drb[i]; 398 value = drb[i];
399 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); 399 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
400 if (interleaved) 400 if (interleaved)
401 cumul_size <<= 1; 401 cumul_size <<= 1;
402 debugf3("MC: %s(): (%d) cumul_size 0x%x\n", 402 edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size);
403 __func__, i, cumul_size);
404 if (cumul_size == last_cumul_size) 403 if (cumul_size == last_cumul_size)
405 continue; 404 continue;
406 405
@@ -410,7 +409,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
410 last_cumul_size = cumul_size; 409 last_cumul_size = cumul_size;
411 410
412 for (j = 0; j < nr_channels; j++) { 411 for (j = 0; j < nr_channels; j++) {
413 struct dimm_info *dimm = csrow->channels[j].dimm; 412 struct dimm_info *dimm = csrow->channels[j]->dimm;
414 413
415 dimm->nr_pages = nr_pages / nr_channels; 414 dimm->nr_pages = nr_pages / nr_channels;
416 dimm->grain = I3000_DEAP_GRAIN; 415 dimm->grain = I3000_DEAP_GRAIN;
@@ -429,7 +428,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
429 428
430 rc = -ENODEV; 429 rc = -ENODEV;
431 if (edac_mc_add_mc(mci)) { 430 if (edac_mc_add_mc(mci)) {
432 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 431 edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
433 goto fail; 432 goto fail;
434 } 433 }
435 434
@@ -445,7 +444,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
445 } 444 }
446 445
447 /* get this far and it's successful */ 446 /* get this far and it's successful */
448 debugf3("MC: %s(): success\n", __func__); 447 edac_dbg(3, "MC: success\n");
449 return 0; 448 return 0;
450 449
451fail: 450fail:
@@ -461,7 +460,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev,
461{ 460{
462 int rc; 461 int rc;
463 462
464 debugf0("MC: %s()\n", __func__); 463 edac_dbg(0, "MC:\n");
465 464
466 if (pci_enable_device(pdev) < 0) 465 if (pci_enable_device(pdev) < 0)
467 return -EIO; 466 return -EIO;
@@ -477,7 +476,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
477{ 476{
478 struct mem_ctl_info *mci; 477 struct mem_ctl_info *mci;
479 478
480 debugf0("%s()\n", __func__); 479 edac_dbg(0, "\n");
481 480
482 if (i3000_pci) 481 if (i3000_pci)
483 edac_pci_release_generic_ctl(i3000_pci); 482 edac_pci_release_generic_ctl(i3000_pci);
@@ -511,7 +510,7 @@ static int __init i3000_init(void)
511{ 510{
512 int pci_rc; 511 int pci_rc;
513 512
514 debugf3("MC: %s()\n", __func__); 513 edac_dbg(3, "MC:\n");
515 514
516 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 515 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
517 opstate_init(); 516 opstate_init();
@@ -525,14 +524,14 @@ static int __init i3000_init(void)
525 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 524 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
526 PCI_DEVICE_ID_INTEL_3000_HB, NULL); 525 PCI_DEVICE_ID_INTEL_3000_HB, NULL);
527 if (!mci_pdev) { 526 if (!mci_pdev) {
528 debugf0("i3000 pci_get_device fail\n"); 527 edac_dbg(0, "i3000 pci_get_device fail\n");
529 pci_rc = -ENODEV; 528 pci_rc = -ENODEV;
530 goto fail1; 529 goto fail1;
531 } 530 }
532 531
533 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); 532 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
534 if (pci_rc < 0) { 533 if (pci_rc < 0) {
535 debugf0("i3000 init fail\n"); 534 edac_dbg(0, "i3000 init fail\n");
536 pci_rc = -ENODEV; 535 pci_rc = -ENODEV;
537 goto fail1; 536 goto fail1;
538 } 537 }
@@ -552,7 +551,7 @@ fail0:
552 551
553static void __exit i3000_exit(void) 552static void __exit i3000_exit(void)
554{ 553{
555 debugf3("MC: %s()\n", __func__); 554 edac_dbg(3, "MC:\n");
556 555
557 pci_unregister_driver(&i3000_driver); 556 pci_unregister_driver(&i3000_driver);
558 if (!i3000_registered) { 557 if (!i3000_registered) {
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index bbe43ef71823..47180a08edad 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -110,10 +110,10 @@ static int how_many_channels(struct pci_dev *pdev)
110 110
111 pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); 111 pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
112 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ 112 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
113 debugf0("In single channel mode.\n"); 113 edac_dbg(0, "In single channel mode\n");
114 return 1; 114 return 1;
115 } else { 115 } else {
116 debugf0("In dual channel mode.\n"); 116 edac_dbg(0, "In dual channel mode\n");
117 return 2; 117 return 2;
118 } 118 }
119} 119}
@@ -159,7 +159,7 @@ static void i3200_clear_error_info(struct mem_ctl_info *mci)
159{ 159{
160 struct pci_dev *pdev; 160 struct pci_dev *pdev;
161 161
162 pdev = to_pci_dev(mci->dev); 162 pdev = to_pci_dev(mci->pdev);
163 163
164 /* 164 /*
165 * Clear any error bits. 165 * Clear any error bits.
@@ -176,7 +176,7 @@ static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
176 struct i3200_priv *priv = mci->pvt_info; 176 struct i3200_priv *priv = mci->pvt_info;
177 void __iomem *window = priv->window; 177 void __iomem *window = priv->window;
178 178
179 pdev = to_pci_dev(mci->dev); 179 pdev = to_pci_dev(mci->pdev);
180 180
181 /* 181 /*
182 * This is a mess because there is no atomic way to read all the 182 * This is a mess because there is no atomic way to read all the
@@ -218,25 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
218 return; 218 return;
219 219
220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
222 -1, -1, -1, "UE overwrote CE", "", NULL); 222 -1, -1, -1, "UE overwrote CE", "");
223 info->errsts = info->errsts2; 223 info->errsts = info->errsts2;
224 } 224 }
225 225
226 for (channel = 0; channel < nr_channels; channel++) { 226 for (channel = 0; channel < nr_channels; channel++) {
227 log = info->eccerrlog[channel]; 227 log = info->eccerrlog[channel];
228 if (log & I3200_ECCERRLOG_UE) { 228 if (log & I3200_ECCERRLOG_UE) {
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
230 0, 0, 0, 230 0, 0, 0,
231 eccerrlog_row(channel, log), 231 eccerrlog_row(channel, log),
232 -1, -1, 232 -1, -1,
233 "i3000 UE", "", NULL); 233 "i3000 UE", "");
234 } else if (log & I3200_ECCERRLOG_CE) { 234 } else if (log & I3200_ECCERRLOG_CE) {
235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
236 0, 0, eccerrlog_syndrome(log), 236 0, 0, eccerrlog_syndrome(log),
237 eccerrlog_row(channel, log), 237 eccerrlog_row(channel, log),
238 -1, -1, 238 -1, -1,
239 "i3000 UE", "", NULL); 239 "i3000 UE", "");
240 } 240 }
241 } 241 }
242} 242}
@@ -245,7 +245,7 @@ static void i3200_check(struct mem_ctl_info *mci)
245{ 245{
246 struct i3200_error_info info; 246 struct i3200_error_info info;
247 247
248 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 248 edac_dbg(1, "MC%d\n", mci->mc_idx);
249 i3200_get_and_clear_error_info(mci, &info); 249 i3200_get_and_clear_error_info(mci, &info);
250 i3200_process_error_info(mci, &info); 250 i3200_process_error_info(mci, &info);
251} 251}
@@ -332,7 +332,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
332 void __iomem *window; 332 void __iomem *window;
333 struct i3200_priv *priv; 333 struct i3200_priv *priv;
334 334
335 debugf0("MC: %s()\n", __func__); 335 edac_dbg(0, "MC:\n");
336 336
337 window = i3200_map_mchbar(pdev); 337 window = i3200_map_mchbar(pdev);
338 if (!window) 338 if (!window)
@@ -352,9 +352,9 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
352 if (!mci) 352 if (!mci)
353 return -ENOMEM; 353 return -ENOMEM;
354 354
355 debugf3("MC: %s(): init mci\n", __func__); 355 edac_dbg(3, "MC: init mci\n");
356 356
357 mci->dev = &pdev->dev; 357 mci->pdev = &pdev->dev;
358 mci->mtype_cap = MEM_FLAG_DDR2; 358 mci->mtype_cap = MEM_FLAG_DDR2;
359 359
360 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 360 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -379,7 +379,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
379 */ 379 */
380 for (i = 0; i < mci->nr_csrows; i++) { 380 for (i = 0; i < mci->nr_csrows; i++) {
381 unsigned long nr_pages; 381 unsigned long nr_pages;
382 struct csrow_info *csrow = &mci->csrows[i]; 382 struct csrow_info *csrow = mci->csrows[i];
383 383
384 nr_pages = drb_to_nr_pages(drbs, stacked, 384 nr_pages = drb_to_nr_pages(drbs, stacked,
385 i / I3200_RANKS_PER_CHANNEL, 385 i / I3200_RANKS_PER_CHANNEL,
@@ -389,7 +389,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
389 continue; 389 continue;
390 390
391 for (j = 0; j < nr_channels; j++) { 391 for (j = 0; j < nr_channels; j++) {
392 struct dimm_info *dimm = csrow->channels[j].dimm; 392 struct dimm_info *dimm = csrow->channels[j]->dimm;
393 393
394 dimm->nr_pages = nr_pages / nr_channels; 394 dimm->nr_pages = nr_pages / nr_channels;
395 dimm->grain = nr_pages << PAGE_SHIFT; 395 dimm->grain = nr_pages << PAGE_SHIFT;
@@ -403,12 +403,12 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
403 403
404 rc = -ENODEV; 404 rc = -ENODEV;
405 if (edac_mc_add_mc(mci)) { 405 if (edac_mc_add_mc(mci)) {
406 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 406 edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
407 goto fail; 407 goto fail;
408 } 408 }
409 409
410 /* get this far and it's successful */ 410 /* get this far and it's successful */
411 debugf3("MC: %s(): success\n", __func__); 411 edac_dbg(3, "MC: success\n");
412 return 0; 412 return 0;
413 413
414fail: 414fail:
@@ -424,7 +424,7 @@ static int __devinit i3200_init_one(struct pci_dev *pdev,
424{ 424{
425 int rc; 425 int rc;
426 426
427 debugf0("MC: %s()\n", __func__); 427 edac_dbg(0, "MC:\n");
428 428
429 if (pci_enable_device(pdev) < 0) 429 if (pci_enable_device(pdev) < 0)
430 return -EIO; 430 return -EIO;
@@ -441,7 +441,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
441 struct mem_ctl_info *mci; 441 struct mem_ctl_info *mci;
442 struct i3200_priv *priv; 442 struct i3200_priv *priv;
443 443
444 debugf0("%s()\n", __func__); 444 edac_dbg(0, "\n");
445 445
446 mci = edac_mc_del_mc(&pdev->dev); 446 mci = edac_mc_del_mc(&pdev->dev);
447 if (!mci) 447 if (!mci)
@@ -475,7 +475,7 @@ static int __init i3200_init(void)
475{ 475{
476 int pci_rc; 476 int pci_rc;
477 477
478 debugf3("MC: %s()\n", __func__); 478 edac_dbg(3, "MC:\n");
479 479
480 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 480 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
481 opstate_init(); 481 opstate_init();
@@ -489,14 +489,14 @@ static int __init i3200_init(void)
489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
490 PCI_DEVICE_ID_INTEL_3200_HB, NULL); 490 PCI_DEVICE_ID_INTEL_3200_HB, NULL);
491 if (!mci_pdev) { 491 if (!mci_pdev) {
492 debugf0("i3200 pci_get_device fail\n"); 492 edac_dbg(0, "i3200 pci_get_device fail\n");
493 pci_rc = -ENODEV; 493 pci_rc = -ENODEV;
494 goto fail1; 494 goto fail1;
495 } 495 }
496 496
497 pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); 497 pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
498 if (pci_rc < 0) { 498 if (pci_rc < 0) {
499 debugf0("i3200 init fail\n"); 499 edac_dbg(0, "i3200 init fail\n");
500 pci_rc = -ENODEV; 500 pci_rc = -ENODEV;
501 goto fail1; 501 goto fail1;
502 } 502 }
@@ -516,7 +516,7 @@ fail0:
516 516
517static void __exit i3200_exit(void) 517static void __exit i3200_exit(void)
518{ 518{
519 debugf3("MC: %s()\n", __func__); 519 edac_dbg(3, "MC:\n");
520 520
521 pci_unregister_driver(&i3200_driver); 521 pci_unregister_driver(&i3200_driver);
522 if (!i3200_registered) { 522 if (!i3200_registered) {
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 11ea835f155a..39c63757c2a1 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -273,7 +273,7 @@
273#define CHANNELS_PER_BRANCH 2 273#define CHANNELS_PER_BRANCH 2
274#define MAX_BRANCHES 2 274#define MAX_BRANCHES 2
275 275
276/* Defines to extract the vaious fields from the 276/* Defines to extract the various fields from the
277 * MTRx - Memory Technology Registers 277 * MTRx - Memory Technology Registers
278 */ 278 */
279#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8)) 279#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8))
@@ -287,22 +287,6 @@
287#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 287#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
288#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 288#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
289 289
290#ifdef CONFIG_EDAC_DEBUG
291static char *numrow_toString[] = {
292 "8,192 - 13 rows",
293 "16,384 - 14 rows",
294 "32,768 - 15 rows",
295 "reserved"
296};
297
298static char *numcol_toString[] = {
299 "1,024 - 10 columns",
300 "2,048 - 11 columns",
301 "4,096 - 12 columns",
302 "reserved"
303};
304#endif
305
306/* enables the report of miscellaneous messages as CE errors - default off */ 290/* enables the report of miscellaneous messages as CE errors - default off */
307static int misc_messages; 291static int misc_messages;
308 292
@@ -344,7 +328,13 @@ struct i5000_pvt {
344 struct pci_dev *branch_1; /* 22.0 */ 328 struct pci_dev *branch_1; /* 22.0 */
345 329
346 u16 tolm; /* top of low memory */ 330 u16 tolm; /* top of low memory */
347 u64 ambase; /* AMB BAR */ 331 union {
332 u64 ambase; /* AMB BAR */
333 struct {
334 u32 ambase_bottom;
335 u32 ambase_top;
336 } u __packed;
337 };
348 338
349 u16 mir0, mir1, mir2; 339 u16 mir0, mir1, mir2;
350 340
@@ -494,10 +484,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
494 ras = NREC_RAS(info->nrecmemb); 484 ras = NREC_RAS(info->nrecmemb);
495 cas = NREC_CAS(info->nrecmemb); 485 cas = NREC_CAS(info->nrecmemb);
496 486
497 debugf0("\t\tCSROW= %d Channel= %d " 487 edac_dbg(0, "\t\tCSROW= %d Channel= %d (DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
498 "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 488 rank, channel, bank,
499 rank, channel, bank, 489 rdwr ? "Write" : "Read", ras, cas);
500 rdwr ? "Write" : "Read", ras, cas);
501 490
502 /* Only 1 bit will be on */ 491 /* Only 1 bit will be on */
503 switch (allErrors) { 492 switch (allErrors) {
@@ -536,10 +525,10 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
536 bank, ras, cas, allErrors, specific); 525 bank, ras, cas, allErrors, specific);
537 526
538 /* Call the helper to output message */ 527 /* Call the helper to output message */
539 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0, 528 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
540 channel >> 1, channel & 1, rank, 529 channel >> 1, channel & 1, rank,
541 rdwr ? "Write error" : "Read error", 530 rdwr ? "Write error" : "Read error",
542 msg, NULL); 531 msg);
543} 532}
544 533
545/* 534/*
@@ -574,7 +563,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
574 /* ONLY ONE of the possible error bits will be set, as per the docs */ 563 /* ONLY ONE of the possible error bits will be set, as per the docs */
575 ue_errors = allErrors & FERR_NF_UNCORRECTABLE; 564 ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
576 if (ue_errors) { 565 if (ue_errors) {
577 debugf0("\tUncorrected bits= 0x%x\n", ue_errors); 566 edac_dbg(0, "\tUncorrected bits= 0x%x\n", ue_errors);
578 567
579 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); 568 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
580 569
@@ -590,11 +579,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
590 ras = NREC_RAS(info->nrecmemb); 579 ras = NREC_RAS(info->nrecmemb);
591 cas = NREC_CAS(info->nrecmemb); 580 cas = NREC_CAS(info->nrecmemb);
592 581
593 debugf0 582 edac_dbg(0, "\t\tCSROW= %d Channels= %d,%d (Branch= %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
594 ("\t\tCSROW= %d Channels= %d,%d (Branch= %d " 583 rank, channel, channel + 1, branch >> 1, bank,
595 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 584 rdwr ? "Write" : "Read", ras, cas);
596 rank, channel, channel + 1, branch >> 1, bank,
597 rdwr ? "Write" : "Read", ras, cas);
598 585
599 switch (ue_errors) { 586 switch (ue_errors) {
600 case FERR_NF_M12ERR: 587 case FERR_NF_M12ERR:
@@ -637,16 +624,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
637 rank, bank, ras, cas, ue_errors, specific); 624 rank, bank, ras, cas, ue_errors, specific);
638 625
639 /* Call the helper to output message */ 626 /* Call the helper to output message */
640 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 627 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
641 channel >> 1, -1, rank, 628 channel >> 1, -1, rank,
642 rdwr ? "Write error" : "Read error", 629 rdwr ? "Write error" : "Read error",
643 msg, NULL); 630 msg);
644 } 631 }
645 632
646 /* Check correctable errors */ 633 /* Check correctable errors */
647 ce_errors = allErrors & FERR_NF_CORRECTABLE; 634 ce_errors = allErrors & FERR_NF_CORRECTABLE;
648 if (ce_errors) { 635 if (ce_errors) {
649 debugf0("\tCorrected bits= 0x%x\n", ce_errors); 636 edac_dbg(0, "\tCorrected bits= 0x%x\n", ce_errors);
650 637
651 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); 638 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
652 639
@@ -664,10 +651,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
664 ras = REC_RAS(info->recmemb); 651 ras = REC_RAS(info->recmemb);
665 cas = REC_CAS(info->recmemb); 652 cas = REC_CAS(info->recmemb);
666 653
667 debugf0("\t\tCSROW= %d Channel= %d (Branch %d " 654 edac_dbg(0, "\t\tCSROW= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
668 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 655 rank, channel, branch >> 1, bank,
669 rank, channel, branch >> 1, bank, 656 rdwr ? "Write" : "Read", ras, cas);
670 rdwr ? "Write" : "Read", ras, cas);
671 657
672 switch (ce_errors) { 658 switch (ce_errors) {
673 case FERR_NF_M17ERR: 659 case FERR_NF_M17ERR:
@@ -692,10 +678,10 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
692 specific); 678 specific);
693 679
694 /* Call the helper to output message */ 680 /* Call the helper to output message */
695 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, 681 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
696 channel >> 1, channel % 2, rank, 682 channel >> 1, channel % 2, rank,
697 rdwr ? "Write error" : "Read error", 683 rdwr ? "Write error" : "Read error",
698 msg, NULL); 684 msg);
699 } 685 }
700 686
701 if (!misc_messages) 687 if (!misc_messages)
@@ -738,9 +724,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
738 "Err=%#x (%s)", misc_errors, specific); 724 "Err=%#x (%s)", misc_errors, specific);
739 725
740 /* Call the helper to output message */ 726 /* Call the helper to output message */
741 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, 727 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
742 branch >> 1, -1, -1, 728 branch >> 1, -1, -1,
743 "Misc error", msg, NULL); 729 "Misc error", msg);
744 } 730 }
745} 731}
746 732
@@ -779,7 +765,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
779static void i5000_check_error(struct mem_ctl_info *mci) 765static void i5000_check_error(struct mem_ctl_info *mci)
780{ 766{
781 struct i5000_error_info info; 767 struct i5000_error_info info;
782 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 768 edac_dbg(4, "MC%d\n", mci->mc_idx);
783 i5000_get_error_info(mci, &info); 769 i5000_get_error_info(mci, &info);
784 i5000_process_error_info(mci, &info, 1); 770 i5000_process_error_info(mci, &info, 1);
785} 771}
@@ -850,15 +836,16 @@ static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
850 836
851 pvt->fsb_error_regs = pdev; 837 pvt->fsb_error_regs = pdev;
852 838
853 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 839 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
854 pci_name(pvt->system_address), 840 pci_name(pvt->system_address),
855 pvt->system_address->vendor, pvt->system_address->device); 841 pvt->system_address->vendor, pvt->system_address->device);
856 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 842 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
857 pci_name(pvt->branchmap_werrors), 843 pci_name(pvt->branchmap_werrors),
858 pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); 844 pvt->branchmap_werrors->vendor,
859 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 845 pvt->branchmap_werrors->device);
860 pci_name(pvt->fsb_error_regs), 846 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
861 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); 847 pci_name(pvt->fsb_error_regs),
848 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
862 849
863 pdev = NULL; 850 pdev = NULL;
864 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 851 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -981,16 +968,25 @@ static void decode_mtr(int slot_row, u16 mtr)
981 968
982 ans = MTR_DIMMS_PRESENT(mtr); 969 ans = MTR_DIMMS_PRESENT(mtr);
983 970
984 debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, 971 edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
985 ans ? "Present" : "NOT Present"); 972 slot_row, mtr, ans ? "" : "NOT ");
986 if (!ans) 973 if (!ans)
987 return; 974 return;
988 975
989 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 976 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
990 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 977 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
991 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); 978 edac_dbg(2, "\t\tNUMRANK: %s\n",
992 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 979 MTR_DIMM_RANK(mtr) ? "double" : "single");
993 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 980 edac_dbg(2, "\t\tNUMROW: %s\n",
981 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
982 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
983 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
984 "reserved");
985 edac_dbg(2, "\t\tNUMCOL: %s\n",
986 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
987 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
988 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
989 "reserved");
994} 990}
995 991
996static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, 992static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
@@ -1061,7 +1057,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1061 "--------------------------------"); 1057 "--------------------------------");
1062 p += n; 1058 p += n;
1063 space -= n; 1059 space -= n;
1064 debugf2("%s\n", mem_buffer); 1060 edac_dbg(2, "%s\n", mem_buffer);
1065 p = mem_buffer; 1061 p = mem_buffer;
1066 space = PAGE_SIZE; 1062 space = PAGE_SIZE;
1067 } 1063 }
@@ -1082,7 +1078,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1082 } 1078 }
1083 p += n; 1079 p += n;
1084 space -= n; 1080 space -= n;
1085 debugf2("%s\n", mem_buffer); 1081 edac_dbg(2, "%s\n", mem_buffer);
1086 p = mem_buffer; 1082 p = mem_buffer;
1087 space = PAGE_SIZE; 1083 space = PAGE_SIZE;
1088 } 1084 }
@@ -1092,7 +1088,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1092 "--------------------------------"); 1088 "--------------------------------");
1093 p += n; 1089 p += n;
1094 space -= n; 1090 space -= n;
1095 debugf2("%s\n", mem_buffer); 1091 edac_dbg(2, "%s\n", mem_buffer);
1096 p = mem_buffer; 1092 p = mem_buffer;
1097 space = PAGE_SIZE; 1093 space = PAGE_SIZE;
1098 1094
@@ -1105,7 +1101,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1105 p += n; 1101 p += n;
1106 space -= n; 1102 space -= n;
1107 } 1103 }
1108 debugf2("%s\n", mem_buffer); 1104 edac_dbg(2, "%s\n", mem_buffer);
1109 p = mem_buffer; 1105 p = mem_buffer;
1110 space = PAGE_SIZE; 1106 space = PAGE_SIZE;
1111 1107
@@ -1118,7 +1114,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1118 } 1114 }
1119 1115
1120 /* output the last message and free buffer */ 1116 /* output the last message and free buffer */
1121 debugf2("%s\n", mem_buffer); 1117 edac_dbg(2, "%s\n", mem_buffer);
1122 kfree(mem_buffer); 1118 kfree(mem_buffer);
1123} 1119}
1124 1120
@@ -1141,24 +1137,25 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1141 pvt = mci->pvt_info; 1137 pvt = mci->pvt_info;
1142 1138
1143 pci_read_config_dword(pvt->system_address, AMBASE, 1139 pci_read_config_dword(pvt->system_address, AMBASE,
1144 (u32 *) & pvt->ambase); 1140 &pvt->u.ambase_bottom);
1145 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), 1141 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
1146 ((u32 *) & pvt->ambase) + sizeof(u32)); 1142 &pvt->u.ambase_top);
1147 1143
1148 maxdimmperch = pvt->maxdimmperch; 1144 maxdimmperch = pvt->maxdimmperch;
1149 maxch = pvt->maxch; 1145 maxch = pvt->maxch;
1150 1146
1151 debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", 1147 edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
1152 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); 1148 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
1153 1149
1154 /* Get the Branch Map regs */ 1150 /* Get the Branch Map regs */
1155 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); 1151 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
1156 pvt->tolm >>= 12; 1152 pvt->tolm >>= 12;
1157 debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 1153 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
1158 pvt->tolm); 1154 pvt->tolm, pvt->tolm);
1159 1155
1160 actual_tolm = pvt->tolm << 28; 1156 actual_tolm = pvt->tolm << 28;
1161 debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm); 1157 edac_dbg(2, "Actual TOLM byte addr=%u (0x%x)\n",
1158 actual_tolm, actual_tolm);
1162 1159
1163 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); 1160 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
1164 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); 1161 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
@@ -1168,15 +1165,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1168 limit = (pvt->mir0 >> 4) & 0x0FFF; 1165 limit = (pvt->mir0 >> 4) & 0x0FFF;
1169 way0 = pvt->mir0 & 0x1; 1166 way0 = pvt->mir0 & 0x1;
1170 way1 = pvt->mir0 & 0x2; 1167 way1 = pvt->mir0 & 0x2;
1171 debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1168 edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
1169 limit, way1, way0);
1172 limit = (pvt->mir1 >> 4) & 0x0FFF; 1170 limit = (pvt->mir1 >> 4) & 0x0FFF;
1173 way0 = pvt->mir1 & 0x1; 1171 way0 = pvt->mir1 & 0x1;
1174 way1 = pvt->mir1 & 0x2; 1172 way1 = pvt->mir1 & 0x2;
1175 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1173 edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
1174 limit, way1, way0);
1176 limit = (pvt->mir2 >> 4) & 0x0FFF; 1175 limit = (pvt->mir2 >> 4) & 0x0FFF;
1177 way0 = pvt->mir2 & 0x1; 1176 way0 = pvt->mir2 & 0x1;
1178 way1 = pvt->mir2 & 0x2; 1177 way1 = pvt->mir2 & 0x2;
1179 debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1178 edac_dbg(2, "MIR2: limit= 0x%x WAY1= %u WAY0= %x\n",
1179 limit, way1, way0);
1180 1180
1181 /* Get the MTR[0-3] regs */ 1181 /* Get the MTR[0-3] regs */
1182 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { 1182 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
@@ -1185,31 +1185,31 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1185 pci_read_config_word(pvt->branch_0, where, 1185 pci_read_config_word(pvt->branch_0, where,
1186 &pvt->b0_mtr[slot_row]); 1186 &pvt->b0_mtr[slot_row]);
1187 1187
1188 debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, 1188 edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
1189 pvt->b0_mtr[slot_row]); 1189 slot_row, where, pvt->b0_mtr[slot_row]);
1190 1190
1191 if (pvt->maxch >= CHANNELS_PER_BRANCH) { 1191 if (pvt->maxch >= CHANNELS_PER_BRANCH) {
1192 pci_read_config_word(pvt->branch_1, where, 1192 pci_read_config_word(pvt->branch_1, where,
1193 &pvt->b1_mtr[slot_row]); 1193 &pvt->b1_mtr[slot_row]);
1194 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, 1194 edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
1195 where, pvt->b1_mtr[slot_row]); 1195 slot_row, where, pvt->b1_mtr[slot_row]);
1196 } else { 1196 } else {
1197 pvt->b1_mtr[slot_row] = 0; 1197 pvt->b1_mtr[slot_row] = 0;
1198 } 1198 }
1199 } 1199 }
1200 1200
1201 /* Read and dump branch 0's MTRs */ 1201 /* Read and dump branch 0's MTRs */
1202 debugf2("\nMemory Technology Registers:\n"); 1202 edac_dbg(2, "Memory Technology Registers:\n");
1203 debugf2(" Branch 0:\n"); 1203 edac_dbg(2, " Branch 0:\n");
1204 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { 1204 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
1205 decode_mtr(slot_row, pvt->b0_mtr[slot_row]); 1205 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1206 } 1206 }
1207 pci_read_config_word(pvt->branch_0, AMB_PRESENT_0, 1207 pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
1208 &pvt->b0_ambpresent0); 1208 &pvt->b0_ambpresent0);
1209 debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); 1209 edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
1210 pci_read_config_word(pvt->branch_0, AMB_PRESENT_1, 1210 pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
1211 &pvt->b0_ambpresent1); 1211 &pvt->b0_ambpresent1);
1212 debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); 1212 edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
1213 1213
1214 /* Only if we have 2 branchs (4 channels) */ 1214 /* Only if we have 2 branchs (4 channels) */
1215 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1215 if (pvt->maxch < CHANNELS_PER_BRANCH) {
@@ -1217,18 +1217,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1217 pvt->b1_ambpresent1 = 0; 1217 pvt->b1_ambpresent1 = 0;
1218 } else { 1218 } else {
1219 /* Read and dump branch 1's MTRs */ 1219 /* Read and dump branch 1's MTRs */
1220 debugf2(" Branch 1:\n"); 1220 edac_dbg(2, " Branch 1:\n");
1221 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { 1221 for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
1222 decode_mtr(slot_row, pvt->b1_mtr[slot_row]); 1222 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1223 } 1223 }
1224 pci_read_config_word(pvt->branch_1, AMB_PRESENT_0, 1224 pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
1225 &pvt->b1_ambpresent0); 1225 &pvt->b1_ambpresent0);
1226 debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", 1226 edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
1227 pvt->b1_ambpresent0); 1227 pvt->b1_ambpresent0);
1228 pci_read_config_word(pvt->branch_1, AMB_PRESENT_1, 1228 pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
1229 &pvt->b1_ambpresent1); 1229 &pvt->b1_ambpresent1);
1230 debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", 1230 edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
1231 pvt->b1_ambpresent1); 1231 pvt->b1_ambpresent1);
1232 } 1232 }
1233 1233
1234 /* Go and determine the size of each DIMM and place in an 1234 /* Go and determine the size of each DIMM and place in an
@@ -1363,10 +1363,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1363 int num_channels; 1363 int num_channels;
1364 int num_dimms_per_channel; 1364 int num_dimms_per_channel;
1365 1365
1366 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1366 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1367 __FILE__, __func__, 1367 pdev->bus->number,
1368 pdev->bus->number, 1368 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1369 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1370 1369
1371 /* We only are looking for func 0 of the set */ 1370 /* We only are looking for func 0 of the set */
1372 if (PCI_FUNC(pdev->devfn) != 0) 1371 if (PCI_FUNC(pdev->devfn) != 0)
@@ -1388,8 +1387,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1388 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, 1387 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
1389 &num_channels); 1388 &num_channels);
1390 1389
1391 debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n", 1390 edac_dbg(0, "MC: Number of Branches=2 Channels= %d DIMMS= %d\n",
1392 __func__, num_channels, num_dimms_per_channel); 1391 num_channels, num_dimms_per_channel);
1393 1392
1394 /* allocate a new MC control structure */ 1393 /* allocate a new MC control structure */
1395 1394
@@ -1406,10 +1405,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1406 if (mci == NULL) 1405 if (mci == NULL)
1407 return -ENOMEM; 1406 return -ENOMEM;
1408 1407
1409 kobject_get(&mci->edac_mci_kobj); 1408 edac_dbg(0, "MC: mci = %p\n", mci);
1410 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1411 1409
1412 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1410 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1413 1411
1414 pvt = mci->pvt_info; 1412 pvt = mci->pvt_info;
1415 pvt->system_address = pdev; /* Record this device in our private */ 1413 pvt->system_address = pdev; /* Record this device in our private */
@@ -1439,19 +1437,16 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1439 /* initialize the MC control structure 'csrows' table 1437 /* initialize the MC control structure 'csrows' table
1440 * with the mapping and control information */ 1438 * with the mapping and control information */
1441 if (i5000_init_csrows(mci)) { 1439 if (i5000_init_csrows(mci)) {
1442 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1440 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5000_init_csrows() returned nonzero value\n");
1443 " because i5000_init_csrows() returned nonzero "
1444 "value\n");
1445 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1441 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1446 } else { 1442 } else {
1447 debugf1("MC: Enable error reporting now\n"); 1443 edac_dbg(1, "MC: Enable error reporting now\n");
1448 i5000_enable_error_reporting(mci); 1444 i5000_enable_error_reporting(mci);
1449 } 1445 }
1450 1446
1451 /* add this new MC control structure to EDAC's list of MCs */ 1447 /* add this new MC control structure to EDAC's list of MCs */
1452 if (edac_mc_add_mc(mci)) { 1448 if (edac_mc_add_mc(mci)) {
1453 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", 1449 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1454 __FILE__, __func__);
1455 /* FIXME: perhaps some code should go here that disables error 1450 /* FIXME: perhaps some code should go here that disables error
1456 * reporting if we just enabled it 1451 * reporting if we just enabled it
1457 */ 1452 */
@@ -1479,7 +1474,6 @@ fail1:
1479 i5000_put_devices(mci); 1474 i5000_put_devices(mci);
1480 1475
1481fail0: 1476fail0:
1482 kobject_put(&mci->edac_mci_kobj);
1483 edac_mc_free(mci); 1477 edac_mc_free(mci);
1484 return -ENODEV; 1478 return -ENODEV;
1485} 1479}
@@ -1496,7 +1490,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
1496{ 1490{
1497 int rc; 1491 int rc;
1498 1492
1499 debugf0("MC: %s: %s()\n", __FILE__, __func__); 1493 edac_dbg(0, "MC:\n");
1500 1494
1501 /* wake up device */ 1495 /* wake up device */
1502 rc = pci_enable_device(pdev); 1496 rc = pci_enable_device(pdev);
@@ -1515,7 +1509,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1515{ 1509{
1516 struct mem_ctl_info *mci; 1510 struct mem_ctl_info *mci;
1517 1511
1518 debugf0("%s: %s()\n", __FILE__, __func__); 1512 edac_dbg(0, "\n");
1519 1513
1520 if (i5000_pci) 1514 if (i5000_pci)
1521 edac_pci_release_generic_ctl(i5000_pci); 1515 edac_pci_release_generic_ctl(i5000_pci);
@@ -1525,7 +1519,6 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1525 1519
1526 /* retrieve references to resources, and free those resources */ 1520 /* retrieve references to resources, and free those resources */
1527 i5000_put_devices(mci); 1521 i5000_put_devices(mci);
1528 kobject_put(&mci->edac_mci_kobj);
1529 edac_mc_free(mci); 1522 edac_mc_free(mci);
1530} 1523}
1531 1524
@@ -1562,7 +1555,7 @@ static int __init i5000_init(void)
1562{ 1555{
1563 int pci_rc; 1556 int pci_rc;
1564 1557
1565 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1558 edac_dbg(2, "MC:\n");
1566 1559
1567 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1560 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1568 opstate_init(); 1561 opstate_init();
@@ -1578,7 +1571,7 @@ static int __init i5000_init(void)
1578 */ 1571 */
1579static void __exit i5000_exit(void) 1572static void __exit i5000_exit(void)
1580{ 1573{
1581 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1574 edac_dbg(2, "MC:\n");
1582 pci_unregister_driver(&i5000_driver); 1575 pci_unregister_driver(&i5000_driver);
1583} 1576}
1584 1577
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index e9e7c2a29dc3..c4b5e5f868e8 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -431,10 +431,10 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
431 "bank %u, cas %u, ras %u\n", 431 "bank %u, cas %u, ras %u\n",
432 bank, cas, ras); 432 bank, cas, ras);
433 433
434 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 434 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
435 0, 0, syndrome, 435 0, 0, syndrome,
436 chan, rank, -1, 436 chan, rank, -1,
437 msg, detail, NULL); 437 msg, detail);
438} 438}
439 439
440static void i5100_handle_ue(struct mem_ctl_info *mci, 440static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -453,10 +453,10 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
453 "bank %u, cas %u, ras %u\n", 453 "bank %u, cas %u, ras %u\n",
454 bank, cas, ras); 454 bank, cas, ras);
455 455
456 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 456 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
457 0, 0, syndrome, 457 0, 0, syndrome,
458 chan, rank, -1, 458 chan, rank, -1,
459 msg, detail, NULL); 459 msg, detail);
460} 460}
461 461
462static void i5100_read_log(struct mem_ctl_info *mci, int chan, 462static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -859,8 +859,8 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
859 i5100_rank_to_slot(mci, chan, rank)); 859 i5100_rank_to_slot(mci, chan, rank));
860 } 860 }
861 861
862 debugf2("dimm channel %d, rank %d, size %ld\n", 862 edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
863 chan, rank, (long)PAGES_TO_MiB(npages)); 863 chan, rank, (long)PAGES_TO_MiB(npages));
864 } 864 }
865} 865}
866 866
@@ -943,7 +943,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
943 goto bail_disable_ch1; 943 goto bail_disable_ch1;
944 } 944 }
945 945
946 mci->dev = &pdev->dev; 946 mci->pdev = &pdev->dev;
947 947
948 priv = mci->pvt_info; 948 priv = mci->pvt_info;
949 priv->ranksperchan = ranksperch; 949 priv->ranksperchan = ranksperch;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6640c29e1885..277246998b80 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -300,24 +300,6 @@ static inline int extract_fbdchan_indx(u32 x)
300 return (x>>28) & 0x3; 300 return (x>>28) & 0x3;
301} 301}
302 302
303#ifdef CONFIG_EDAC_DEBUG
304/* MTR NUMROW */
305static const char *numrow_toString[] = {
306 "8,192 - 13 rows",
307 "16,384 - 14 rows",
308 "32,768 - 15 rows",
309 "65,536 - 16 rows"
310};
311
312/* MTR NUMCOL */
313static const char *numcol_toString[] = {
314 "1,024 - 10 columns",
315 "2,048 - 11 columns",
316 "4,096 - 12 columns",
317 "reserved"
318};
319#endif
320
321/* Device name and register DID (Device ID) */ 303/* Device name and register DID (Device ID) */
322struct i5400_dev_info { 304struct i5400_dev_info {
323 const char *ctl_name; /* name for this device */ 305 const char *ctl_name; /* name for this device */
@@ -345,7 +327,13 @@ struct i5400_pvt {
345 struct pci_dev *branch_1; /* 22.0 */ 327 struct pci_dev *branch_1; /* 22.0 */
346 328
347 u16 tolm; /* top of low memory */ 329 u16 tolm; /* top of low memory */
348 u64 ambase; /* AMB BAR */ 330 union {
331 u64 ambase; /* AMB BAR */
332 struct {
333 u32 ambase_bottom;
334 u32 ambase_top;
335 } u __packed;
336 };
349 337
350 u16 mir0, mir1; 338 u16 mir0, mir1;
351 339
@@ -560,10 +548,9 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
560 ras = nrec_ras(info); 548 ras = nrec_ras(info);
561 cas = nrec_cas(info); 549 cas = nrec_cas(info);
562 550
563 debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d " 551 edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
564 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", 552 rank, channel, channel + 1, branch >> 1, bank,
565 rank, channel, channel + 1, branch >> 1, bank, 553 buf_id, rdwr_str(rdwr), ras, cas);
566 buf_id, rdwr_str(rdwr), ras, cas);
567 554
568 /* Only 1 bit will be on */ 555 /* Only 1 bit will be on */
569 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 556 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
@@ -573,10 +560,10 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
573 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)", 560 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
574 bank, buf_id, ras, cas, allErrors, error_name[errnum]); 561 bank, buf_id, ras, cas, allErrors, error_name[errnum]);
575 562
576 edac_mc_handle_error(tp_event, mci, 0, 0, 0, 563 edac_mc_handle_error(tp_event, mci, 1, 0, 0, 0,
577 branch >> 1, -1, rank, 564 branch >> 1, -1, rank,
578 rdwr ? "Write error" : "Read error", 565 rdwr ? "Write error" : "Read error",
579 msg, NULL); 566 msg);
580} 567}
581 568
582/* 569/*
@@ -613,7 +600,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
613 600
614 /* Correctable errors */ 601 /* Correctable errors */
615 if (allErrors & ERROR_NF_CORRECTABLE) { 602 if (allErrors & ERROR_NF_CORRECTABLE) {
616 debugf0("\tCorrected bits= 0x%lx\n", allErrors); 603 edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors);
617 604
618 branch = extract_fbdchan_indx(info->ferr_nf_fbd); 605 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
619 606
@@ -634,10 +621,9 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
634 /* Only 1 bit will be on */ 621 /* Only 1 bit will be on */
635 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 622 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
636 623
637 debugf0("\t\tDIMM= %d Channel= %d (Branch %d " 624 edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
638 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 625 rank, channel, branch >> 1, bank,
639 rank, channel, branch >> 1, bank, 626 rdwr_str(rdwr), ras, cas);
640 rdwr_str(rdwr), ras, cas);
641 627
642 /* Form out message */ 628 /* Form out message */
643 snprintf(msg, sizeof(msg), 629 snprintf(msg, sizeof(msg),
@@ -646,10 +632,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
646 branch >> 1, bank, rdwr_str(rdwr), ras, cas, 632 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
647 allErrors, error_name[errnum]); 633 allErrors, error_name[errnum]);
648 634
649 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, 635 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
650 branch >> 1, channel % 2, rank, 636 branch >> 1, channel % 2, rank,
651 rdwr ? "Write error" : "Read error", 637 rdwr ? "Write error" : "Read error",
652 msg, NULL); 638 msg);
653 639
654 return; 640 return;
655 } 641 }
@@ -700,7 +686,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
700static void i5400_check_error(struct mem_ctl_info *mci) 686static void i5400_check_error(struct mem_ctl_info *mci)
701{ 687{
702 struct i5400_error_info info; 688 struct i5400_error_info info;
703 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 689 edac_dbg(4, "MC%d\n", mci->mc_idx);
704 i5400_get_error_info(mci, &info); 690 i5400_get_error_info(mci, &info);
705 i5400_process_error_info(mci, &info); 691 i5400_process_error_info(mci, &info);
706} 692}
@@ -786,15 +772,16 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
786 } 772 }
787 pvt->fsb_error_regs = pdev; 773 pvt->fsb_error_regs = pdev;
788 774
789 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 775 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
790 pci_name(pvt->system_address), 776 pci_name(pvt->system_address),
791 pvt->system_address->vendor, pvt->system_address->device); 777 pvt->system_address->vendor, pvt->system_address->device);
792 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 778 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
793 pci_name(pvt->branchmap_werrors), 779 pci_name(pvt->branchmap_werrors),
794 pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); 780 pvt->branchmap_werrors->vendor,
795 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 781 pvt->branchmap_werrors->device);
796 pci_name(pvt->fsb_error_regs), 782 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
797 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); 783 pci_name(pvt->fsb_error_regs),
784 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
798 785
799 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, 786 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
800 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); 787 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
@@ -882,8 +869,8 @@ static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
882 n = dimm; 869 n = dimm;
883 870
884 if (n >= DIMMS_PER_CHANNEL) { 871 if (n >= DIMMS_PER_CHANNEL) {
885 debugf0("ERROR: trying to access an invalid dimm: %d\n", 872 edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n",
886 dimm); 873 dimm);
887 return 0; 874 return 0;
888 } 875 }
889 876
@@ -903,20 +890,29 @@ static void decode_mtr(int slot_row, u16 mtr)
903 890
904 ans = MTR_DIMMS_PRESENT(mtr); 891 ans = MTR_DIMMS_PRESENT(mtr);
905 892
906 debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, 893 edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
907 ans ? "Present" : "NOT Present"); 894 slot_row, mtr, ans ? "" : "NOT ");
908 if (!ans) 895 if (!ans)
909 return; 896 return;
910 897
911 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 898 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
912 899
913 debugf2("\t\tELECTRICAL THROTTLING is %s\n", 900 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
914 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 901 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
915 902
916 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 903 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
917 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); 904 edac_dbg(2, "\t\tNUMRANK: %s\n",
918 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 905 MTR_DIMM_RANK(mtr) ? "double" : "single");
919 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 906 edac_dbg(2, "\t\tNUMROW: %s\n",
907 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
908 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
909 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
910 "65,536 - 16 rows");
911 edac_dbg(2, "\t\tNUMCOL: %s\n",
912 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
913 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
914 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
915 "reserved");
920} 916}
921 917
922static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel, 918static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
@@ -989,7 +985,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
989 "-------------------------------"); 985 "-------------------------------");
990 p += n; 986 p += n;
991 space -= n; 987 space -= n;
992 debugf2("%s\n", mem_buffer); 988 edac_dbg(2, "%s\n", mem_buffer);
993 p = mem_buffer; 989 p = mem_buffer;
994 space = PAGE_SIZE; 990 space = PAGE_SIZE;
995 } 991 }
@@ -1004,7 +1000,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1004 p += n; 1000 p += n;
1005 space -= n; 1001 space -= n;
1006 } 1002 }
1007 debugf2("%s\n", mem_buffer); 1003 edac_dbg(2, "%s\n", mem_buffer);
1008 p = mem_buffer; 1004 p = mem_buffer;
1009 space = PAGE_SIZE; 1005 space = PAGE_SIZE;
1010 } 1006 }
@@ -1014,7 +1010,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1014 "-------------------------------"); 1010 "-------------------------------");
1015 p += n; 1011 p += n;
1016 space -= n; 1012 space -= n;
1017 debugf2("%s\n", mem_buffer); 1013 edac_dbg(2, "%s\n", mem_buffer);
1018 p = mem_buffer; 1014 p = mem_buffer;
1019 space = PAGE_SIZE; 1015 space = PAGE_SIZE;
1020 1016
@@ -1029,7 +1025,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1029 } 1025 }
1030 1026
1031 space -= n; 1027 space -= n;
1032 debugf2("%s\n", mem_buffer); 1028 edac_dbg(2, "%s\n", mem_buffer);
1033 p = mem_buffer; 1029 p = mem_buffer;
1034 space = PAGE_SIZE; 1030 space = PAGE_SIZE;
1035 1031
@@ -1042,7 +1038,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1042 } 1038 }
1043 1039
1044 /* output the last message and free buffer */ 1040 /* output the last message and free buffer */
1045 debugf2("%s\n", mem_buffer); 1041 edac_dbg(2, "%s\n", mem_buffer);
1046 kfree(mem_buffer); 1042 kfree(mem_buffer);
1047} 1043}
1048 1044
@@ -1065,25 +1061,25 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1065 pvt = mci->pvt_info; 1061 pvt = mci->pvt_info;
1066 1062
1067 pci_read_config_dword(pvt->system_address, AMBASE, 1063 pci_read_config_dword(pvt->system_address, AMBASE,
1068 (u32 *) &pvt->ambase); 1064 &pvt->u.ambase_bottom);
1069 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), 1065 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
1070 ((u32 *) &pvt->ambase) + sizeof(u32)); 1066 &pvt->u.ambase_top);
1071 1067
1072 maxdimmperch = pvt->maxdimmperch; 1068 maxdimmperch = pvt->maxdimmperch;
1073 maxch = pvt->maxch; 1069 maxch = pvt->maxch;
1074 1070
1075 debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", 1071 edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
1076 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); 1072 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
1077 1073
1078 /* Get the Branch Map regs */ 1074 /* Get the Branch Map regs */
1079 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); 1075 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
1080 pvt->tolm >>= 12; 1076 pvt->tolm >>= 12;
1081 debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 1077 edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n",
1082 pvt->tolm); 1078 pvt->tolm, pvt->tolm);
1083 1079
1084 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 1080 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
1085 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 1081 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
1086 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 1082 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
1087 1083
1088 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); 1084 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
1089 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); 1085 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
@@ -1092,11 +1088,13 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1092 limit = (pvt->mir0 >> 4) & 0x0fff; 1088 limit = (pvt->mir0 >> 4) & 0x0fff;
1093 way0 = pvt->mir0 & 0x1; 1089 way0 = pvt->mir0 & 0x1;
1094 way1 = pvt->mir0 & 0x2; 1090 way1 = pvt->mir0 & 0x2;
1095 debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1091 edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
1092 limit, way1, way0);
1096 limit = (pvt->mir1 >> 4) & 0xfff; 1093 limit = (pvt->mir1 >> 4) & 0xfff;
1097 way0 = pvt->mir1 & 0x1; 1094 way0 = pvt->mir1 & 0x1;
1098 way1 = pvt->mir1 & 0x2; 1095 way1 = pvt->mir1 & 0x2;
1099 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1096 edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
1097 limit, way1, way0);
1100 1098
1101 /* Get the set of MTR[0-3] regs by each branch */ 1099 /* Get the set of MTR[0-3] regs by each branch */
1102 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) { 1100 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
@@ -1106,8 +1104,8 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1106 pci_read_config_word(pvt->branch_0, where, 1104 pci_read_config_word(pvt->branch_0, where,
1107 &pvt->b0_mtr[slot_row]); 1105 &pvt->b0_mtr[slot_row]);
1108 1106
1109 debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, 1107 edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
1110 pvt->b0_mtr[slot_row]); 1108 slot_row, where, pvt->b0_mtr[slot_row]);
1111 1109
1112 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1110 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1113 pvt->b1_mtr[slot_row] = 0; 1111 pvt->b1_mtr[slot_row] = 0;
@@ -1117,22 +1115,22 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1117 /* Branch 1 set of MTR registers */ 1115 /* Branch 1 set of MTR registers */
1118 pci_read_config_word(pvt->branch_1, where, 1116 pci_read_config_word(pvt->branch_1, where,
1119 &pvt->b1_mtr[slot_row]); 1117 &pvt->b1_mtr[slot_row]);
1120 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, 1118 edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
1121 pvt->b1_mtr[slot_row]); 1119 slot_row, where, pvt->b1_mtr[slot_row]);
1122 } 1120 }
1123 1121
1124 /* Read and dump branch 0's MTRs */ 1122 /* Read and dump branch 0's MTRs */
1125 debugf2("\nMemory Technology Registers:\n"); 1123 edac_dbg(2, "Memory Technology Registers:\n");
1126 debugf2(" Branch 0:\n"); 1124 edac_dbg(2, " Branch 0:\n");
1127 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) 1125 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1128 decode_mtr(slot_row, pvt->b0_mtr[slot_row]); 1126 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1129 1127
1130 pci_read_config_word(pvt->branch_0, AMBPRESENT_0, 1128 pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
1131 &pvt->b0_ambpresent0); 1129 &pvt->b0_ambpresent0);
1132 debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); 1130 edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
1133 pci_read_config_word(pvt->branch_0, AMBPRESENT_1, 1131 pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
1134 &pvt->b0_ambpresent1); 1132 &pvt->b0_ambpresent1);
1135 debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); 1133 edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
1136 1134
1137 /* Only if we have 2 branchs (4 channels) */ 1135 /* Only if we have 2 branchs (4 channels) */
1138 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1136 if (pvt->maxch < CHANNELS_PER_BRANCH) {
@@ -1140,18 +1138,18 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1140 pvt->b1_ambpresent1 = 0; 1138 pvt->b1_ambpresent1 = 0;
1141 } else { 1139 } else {
1142 /* Read and dump branch 1's MTRs */ 1140 /* Read and dump branch 1's MTRs */
1143 debugf2(" Branch 1:\n"); 1141 edac_dbg(2, " Branch 1:\n");
1144 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) 1142 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1145 decode_mtr(slot_row, pvt->b1_mtr[slot_row]); 1143 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1146 1144
1147 pci_read_config_word(pvt->branch_1, AMBPRESENT_0, 1145 pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
1148 &pvt->b1_ambpresent0); 1146 &pvt->b1_ambpresent0);
1149 debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", 1147 edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
1150 pvt->b1_ambpresent0); 1148 pvt->b1_ambpresent0);
1151 pci_read_config_word(pvt->branch_1, AMBPRESENT_1, 1149 pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
1152 &pvt->b1_ambpresent1); 1150 &pvt->b1_ambpresent1);
1153 debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", 1151 edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
1154 pvt->b1_ambpresent1); 1152 pvt->b1_ambpresent1);
1155 } 1153 }
1156 1154
1157 /* Go and determine the size of each DIMM and place in an 1155 /* Go and determine the size of each DIMM and place in an
@@ -1203,10 +1201,9 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1203 1201
1204 size_mb = pvt->dimm_info[slot][channel].megabytes; 1202 size_mb = pvt->dimm_info[slot][channel].megabytes;
1205 1203
1206 debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n", 1204 edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n",
1207 __func__, dimm - mci->dimms, 1205 channel / 2, channel % 2, slot,
1208 channel / 2, channel % 2, slot, 1206 size_mb / 1000, size_mb % 1000);
1209 size_mb / 1000, size_mb % 1000);
1210 1207
1211 dimm->nr_pages = size_mb << 8; 1208 dimm->nr_pages = size_mb << 8;
1212 dimm->grain = 8; 1209 dimm->grain = 8;
@@ -1227,7 +1224,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1227 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+. 1224 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
1228 */ 1225 */
1229 if (ndimms == 1) 1226 if (ndimms == 1)
1230 mci->dimms[0].edac_mode = EDAC_SECDED; 1227 mci->dimms[0]->edac_mode = EDAC_SECDED;
1231 1228
1232 return (ndimms == 0); 1229 return (ndimms == 0);
1233} 1230}
@@ -1270,10 +1267,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1270 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1267 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1271 return -EINVAL; 1268 return -EINVAL;
1272 1269
1273 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1270 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1274 __FILE__, __func__, 1271 pdev->bus->number,
1275 pdev->bus->number, 1272 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1276 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1277 1273
1278 /* We only are looking for func 0 of the set */ 1274 /* We only are looking for func 0 of the set */
1279 if (PCI_FUNC(pdev->devfn) != 0) 1275 if (PCI_FUNC(pdev->devfn) != 0)
@@ -1297,9 +1293,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1297 if (mci == NULL) 1293 if (mci == NULL)
1298 return -ENOMEM; 1294 return -ENOMEM;
1299 1295
1300 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); 1296 edac_dbg(0, "MC: mci = %p\n", mci);
1301 1297
1302 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1298 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1303 1299
1304 pvt = mci->pvt_info; 1300 pvt = mci->pvt_info;
1305 pvt->system_address = pdev; /* Record this device in our private */ 1301 pvt->system_address = pdev; /* Record this device in our private */
@@ -1329,19 +1325,16 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1329 /* initialize the MC control structure 'dimms' table 1325 /* initialize the MC control structure 'dimms' table
1330 * with the mapping and control information */ 1326 * with the mapping and control information */
1331 if (i5400_init_dimms(mci)) { 1327 if (i5400_init_dimms(mci)) {
1332 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1328 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n");
1333 " because i5400_init_dimms() returned nonzero "
1334 "value\n");
1335 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */ 1329 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
1336 } else { 1330 } else {
1337 debugf1("MC: Enable error reporting now\n"); 1331 edac_dbg(1, "MC: Enable error reporting now\n");
1338 i5400_enable_error_reporting(mci); 1332 i5400_enable_error_reporting(mci);
1339 } 1333 }
1340 1334
1341 /* add this new MC control structure to EDAC's list of MCs */ 1335 /* add this new MC control structure to EDAC's list of MCs */
1342 if (edac_mc_add_mc(mci)) { 1336 if (edac_mc_add_mc(mci)) {
1343 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", 1337 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1344 __FILE__, __func__);
1345 /* FIXME: perhaps some code should go here that disables error 1338 /* FIXME: perhaps some code should go here that disables error
1346 * reporting if we just enabled it 1339 * reporting if we just enabled it
1347 */ 1340 */
@@ -1385,7 +1378,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
1385{ 1378{
1386 int rc; 1379 int rc;
1387 1380
1388 debugf0("MC: %s: %s()\n", __FILE__, __func__); 1381 edac_dbg(0, "MC:\n");
1389 1382
1390 /* wake up device */ 1383 /* wake up device */
1391 rc = pci_enable_device(pdev); 1384 rc = pci_enable_device(pdev);
@@ -1404,7 +1397,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
1404{ 1397{
1405 struct mem_ctl_info *mci; 1398 struct mem_ctl_info *mci;
1406 1399
1407 debugf0("%s: %s()\n", __FILE__, __func__); 1400 edac_dbg(0, "\n");
1408 1401
1409 if (i5400_pci) 1402 if (i5400_pci)
1410 edac_pci_release_generic_ctl(i5400_pci); 1403 edac_pci_release_generic_ctl(i5400_pci);
@@ -1450,7 +1443,7 @@ static int __init i5400_init(void)
1450{ 1443{
1451 int pci_rc; 1444 int pci_rc;
1452 1445
1453 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1446 edac_dbg(2, "MC:\n");
1454 1447
1455 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1448 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1456 opstate_init(); 1449 opstate_init();
@@ -1466,7 +1459,7 @@ static int __init i5400_init(void)
1466 */ 1459 */
1467static void __exit i5400_exit(void) 1460static void __exit i5400_exit(void)
1468{ 1461{
1469 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1462 edac_dbg(2, "MC:\n");
1470 pci_unregister_driver(&i5400_driver); 1463 pci_unregister_driver(&i5400_driver);
1471} 1464}
1472 1465
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 97c22fd650ee..a09d0667f72a 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -182,24 +182,6 @@ static const u16 mtr_regs[MAX_SLOTS] = {
182#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 182#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
183#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 183#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
184 184
185#ifdef CONFIG_EDAC_DEBUG
186/* MTR NUMROW */
187static const char *numrow_toString[] = {
188 "8,192 - 13 rows",
189 "16,384 - 14 rows",
190 "32,768 - 15 rows",
191 "65,536 - 16 rows"
192};
193
194/* MTR NUMCOL */
195static const char *numcol_toString[] = {
196 "1,024 - 10 columns",
197 "2,048 - 11 columns",
198 "4,096 - 12 columns",
199 "reserved"
200};
201#endif
202
203/************************************************ 185/************************************************
204 * i7300 Register definitions for error detection 186 * i7300 Register definitions for error detection
205 ************************************************/ 187 ************************************************/
@@ -467,10 +449,10 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
467 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))", 449 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
468 bank, ras, cas, errors, specific); 450 bank, ras, cas, errors, specific);
469 451
470 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0, 452 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
471 branch, -1, rank, 453 branch, -1, rank,
472 is_wr ? "Write error" : "Read error", 454 is_wr ? "Write error" : "Read error",
473 pvt->tmp_prt_buffer, NULL); 455 pvt->tmp_prt_buffer);
474 456
475 } 457 }
476 458
@@ -513,11 +495,11 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
513 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))", 495 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
514 bank, ras, cas, errors, specific); 496 bank, ras, cas, errors, specific);
515 497
516 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 498 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
517 syndrome, 499 syndrome,
518 branch >> 1, channel % 2, rank, 500 branch >> 1, channel % 2, rank,
519 is_wr ? "Write error" : "Read error", 501 is_wr ? "Write error" : "Read error",
520 pvt->tmp_prt_buffer, NULL); 502 pvt->tmp_prt_buffer);
521 } 503 }
522 return; 504 return;
523} 505}
@@ -614,9 +596,8 @@ static int decode_mtr(struct i7300_pvt *pvt,
614 mtr = pvt->mtr[slot][branch]; 596 mtr = pvt->mtr[slot][branch];
615 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; 597 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
616 598
617 debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n", 599 edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
618 slot, channel, 600 slot, channel, ans ? "" : "NOT ");
619 ans ? "Present" : "NOT Present");
620 601
621 /* Determine if there is a DIMM present in this DIMM slot */ 602 /* Determine if there is a DIMM present in this DIMM slot */
622 if (!ans) 603 if (!ans)
@@ -638,16 +619,25 @@ static int decode_mtr(struct i7300_pvt *pvt,
638 619
639 dinfo->megabytes = 1 << addrBits; 620 dinfo->megabytes = 1 << addrBits;
640 621
641 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 622 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
642 623
643 debugf2("\t\tELECTRICAL THROTTLING is %s\n", 624 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
644 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 625 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
645 626
646 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 627 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
647 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single"); 628 edac_dbg(2, "\t\tNUMRANK: %s\n",
648 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 629 MTR_DIMM_RANKS(mtr) ? "double" : "single");
649 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 630 edac_dbg(2, "\t\tNUMROW: %s\n",
650 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); 631 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
632 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
633 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
634 "65,536 - 16 rows");
635 edac_dbg(2, "\t\tNUMCOL: %s\n",
636 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
637 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
638 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
639 "reserved");
640 edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
651 641
652 /* 642 /*
653 * The type of error detection actually depends of the 643 * The type of error detection actually depends of the
@@ -663,9 +653,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
663 dimm->mtype = MEM_FB_DDR2; 653 dimm->mtype = MEM_FB_DDR2;
664 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 654 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
665 dimm->edac_mode = EDAC_SECDED; 655 dimm->edac_mode = EDAC_SECDED;
666 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); 656 edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
667 } else { 657 } else {
668 debugf2("\t\tECC code is on Lockstep mode\n"); 658 edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
669 if (MTR_DRAM_WIDTH(mtr) == 8) 659 if (MTR_DRAM_WIDTH(mtr) == 8)
670 dimm->edac_mode = EDAC_S8ECD8ED; 660 dimm->edac_mode = EDAC_S8ECD8ED;
671 else 661 else
@@ -674,9 +664,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
674 664
675 /* ask what device type on this row */ 665 /* ask what device type on this row */
676 if (MTR_DRAM_WIDTH(mtr) == 8) { 666 if (MTR_DRAM_WIDTH(mtr) == 8) {
677 debugf2("\t\tScrub algorithm for x8 is on %s mode\n", 667 edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
678 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? 668 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
679 "enhanced" : "normal"); 669 "enhanced" : "normal");
680 670
681 dimm->dtype = DEV_X8; 671 dimm->dtype = DEV_X8;
682 } else 672 } else
@@ -710,14 +700,14 @@ static void print_dimm_size(struct i7300_pvt *pvt)
710 p += n; 700 p += n;
711 space -= n; 701 space -= n;
712 } 702 }
713 debugf2("%s\n", pvt->tmp_prt_buffer); 703 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
714 p = pvt->tmp_prt_buffer; 704 p = pvt->tmp_prt_buffer;
715 space = PAGE_SIZE; 705 space = PAGE_SIZE;
716 n = snprintf(p, space, "-------------------------------" 706 n = snprintf(p, space, "-------------------------------"
717 "------------------------------"); 707 "------------------------------");
718 p += n; 708 p += n;
719 space -= n; 709 space -= n;
720 debugf2("%s\n", pvt->tmp_prt_buffer); 710 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
721 p = pvt->tmp_prt_buffer; 711 p = pvt->tmp_prt_buffer;
722 space = PAGE_SIZE; 712 space = PAGE_SIZE;
723 713
@@ -733,7 +723,7 @@ static void print_dimm_size(struct i7300_pvt *pvt)
733 space -= n; 723 space -= n;
734 } 724 }
735 725
736 debugf2("%s\n", pvt->tmp_prt_buffer); 726 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
737 p = pvt->tmp_prt_buffer; 727 p = pvt->tmp_prt_buffer;
738 space = PAGE_SIZE; 728 space = PAGE_SIZE;
739 } 729 }
@@ -742,7 +732,7 @@ static void print_dimm_size(struct i7300_pvt *pvt)
742 "------------------------------"); 732 "------------------------------");
743 p += n; 733 p += n;
744 space -= n; 734 space -= n;
745 debugf2("%s\n", pvt->tmp_prt_buffer); 735 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
746 p = pvt->tmp_prt_buffer; 736 p = pvt->tmp_prt_buffer;
747 space = PAGE_SIZE; 737 space = PAGE_SIZE;
748#endif 738#endif
@@ -765,7 +755,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
765 755
766 pvt = mci->pvt_info; 756 pvt = mci->pvt_info;
767 757
768 debugf2("Memory Technology Registers:\n"); 758 edac_dbg(2, "Memory Technology Registers:\n");
769 759
770 /* Get the AMB present registers for the four channels */ 760 /* Get the AMB present registers for the four channels */
771 for (branch = 0; branch < MAX_BRANCHES; branch++) { 761 for (branch = 0; branch < MAX_BRANCHES; branch++) {
@@ -774,15 +764,15 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
774 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 764 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
775 AMBPRESENT_0, 765 AMBPRESENT_0,
776 &pvt->ambpresent[channel]); 766 &pvt->ambpresent[channel]);
777 debugf2("\t\tAMB-present CH%d = 0x%x:\n", 767 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
778 channel, pvt->ambpresent[channel]); 768 channel, pvt->ambpresent[channel]);
779 769
780 channel = to_channel(1, branch); 770 channel = to_channel(1, branch);
781 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 771 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
782 AMBPRESENT_1, 772 AMBPRESENT_1,
783 &pvt->ambpresent[channel]); 773 &pvt->ambpresent[channel]);
784 debugf2("\t\tAMB-present CH%d = 0x%x:\n", 774 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
785 channel, pvt->ambpresent[channel]); 775 channel, pvt->ambpresent[channel]);
786 } 776 }
787 777
788 /* Get the set of MTR[0-7] regs by each branch */ 778 /* Get the set of MTR[0-7] regs by each branch */
@@ -824,12 +814,11 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
824static void decode_mir(int mir_no, u16 mir[MAX_MIR]) 814static void decode_mir(int mir_no, u16 mir[MAX_MIR])
825{ 815{
826 if (mir[mir_no] & 3) 816 if (mir[mir_no] & 3)
827 debugf2("MIR%d: limit= 0x%x Branch(es) that participate:" 817 edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
828 " %s %s\n", 818 mir_no,
829 mir_no, 819 (mir[mir_no] >> 4) & 0xfff,
830 (mir[mir_no] >> 4) & 0xfff, 820 (mir[mir_no] & 1) ? "B0" : "",
831 (mir[mir_no] & 1) ? "B0" : "", 821 (mir[mir_no] & 2) ? "B1" : "");
832 (mir[mir_no] & 2) ? "B1" : "");
833} 822}
834 823
835/** 824/**
@@ -849,17 +838,17 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
849 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, 838 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
850 (u32 *) &pvt->ambase); 839 (u32 *) &pvt->ambase);
851 840
852 debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); 841 edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
853 842
854 /* Get the Branch Map regs */ 843 /* Get the Branch Map regs */
855 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); 844 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
856 pvt->tolm >>= 12; 845 pvt->tolm >>= 12;
857 debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 846 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
858 pvt->tolm); 847 pvt->tolm, pvt->tolm);
859 848
860 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 849 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
861 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 850 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
862 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 851 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
863 852
864 /* Get memory controller settings */ 853 /* Get memory controller settings */
865 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, 854 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
@@ -868,15 +857,15 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
868 &pvt->mc_settings_a); 857 &pvt->mc_settings_a);
869 858
870 if (IS_SINGLE_MODE(pvt->mc_settings_a)) 859 if (IS_SINGLE_MODE(pvt->mc_settings_a))
871 debugf0("Memory controller operating on single mode\n"); 860 edac_dbg(0, "Memory controller operating on single mode\n");
872 else 861 else
873 debugf0("Memory controller operating on %s mode\n", 862 edac_dbg(0, "Memory controller operating on %smirrored mode\n",
874 IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored"); 863 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
875 864
876 debugf0("Error detection is %s\n", 865 edac_dbg(0, "Error detection is %s\n",
877 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 866 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
878 debugf0("Retry is %s\n", 867 edac_dbg(0, "Retry is %s\n",
879 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 868 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
880 869
881 /* Get Memory Interleave Range registers */ 870 /* Get Memory Interleave Range registers */
882 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, 871 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
@@ -970,18 +959,18 @@ static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
970 } 959 }
971 } 960 }
972 961
973 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 962 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
974 pci_name(pvt->pci_dev_16_0_fsb_ctlr), 963 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
975 pvt->pci_dev_16_0_fsb_ctlr->vendor, 964 pvt->pci_dev_16_0_fsb_ctlr->vendor,
976 pvt->pci_dev_16_0_fsb_ctlr->device); 965 pvt->pci_dev_16_0_fsb_ctlr->device);
977 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 966 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
978 pci_name(pvt->pci_dev_16_1_fsb_addr_map), 967 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
979 pvt->pci_dev_16_1_fsb_addr_map->vendor, 968 pvt->pci_dev_16_1_fsb_addr_map->vendor,
980 pvt->pci_dev_16_1_fsb_addr_map->device); 969 pvt->pci_dev_16_1_fsb_addr_map->device);
981 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 970 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
982 pci_name(pvt->pci_dev_16_2_fsb_err_regs), 971 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
983 pvt->pci_dev_16_2_fsb_err_regs->vendor, 972 pvt->pci_dev_16_2_fsb_err_regs->vendor,
984 pvt->pci_dev_16_2_fsb_err_regs->device); 973 pvt->pci_dev_16_2_fsb_err_regs->device);
985 974
986 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, 975 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
987 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, 976 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
@@ -1032,10 +1021,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1032 if (rc == -EIO) 1021 if (rc == -EIO)
1033 return rc; 1022 return rc;
1034 1023
1035 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1024 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1036 __func__, 1025 pdev->bus->number,
1037 pdev->bus->number, 1026 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1038 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1039 1027
1040 /* We only are looking for func 0 of the set */ 1028 /* We only are looking for func 0 of the set */
1041 if (PCI_FUNC(pdev->devfn) != 0) 1029 if (PCI_FUNC(pdev->devfn) != 0)
@@ -1055,9 +1043,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1055 if (mci == NULL) 1043 if (mci == NULL)
1056 return -ENOMEM; 1044 return -ENOMEM;
1057 1045
1058 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1046 edac_dbg(0, "MC: mci = %p\n", mci);
1059 1047
1060 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1048 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1061 1049
1062 pvt = mci->pvt_info; 1050 pvt = mci->pvt_info;
1063 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ 1051 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
@@ -1088,19 +1076,16 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1088 /* initialize the MC control structure 'csrows' table 1076 /* initialize the MC control structure 'csrows' table
1089 * with the mapping and control information */ 1077 * with the mapping and control information */
1090 if (i7300_get_mc_regs(mci)) { 1078 if (i7300_get_mc_regs(mci)) {
1091 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1079 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1092 " because i7300_init_csrows() returned nonzero "
1093 "value\n");
1094 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1080 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1095 } else { 1081 } else {
1096 debugf1("MC: Enable error reporting now\n"); 1082 edac_dbg(1, "MC: Enable error reporting now\n");
1097 i7300_enable_error_reporting(mci); 1083 i7300_enable_error_reporting(mci);
1098 } 1084 }
1099 1085
1100 /* add this new MC control structure to EDAC's list of MCs */ 1086 /* add this new MC control structure to EDAC's list of MCs */
1101 if (edac_mc_add_mc(mci)) { 1087 if (edac_mc_add_mc(mci)) {
1102 debugf0("MC: " __FILE__ 1088 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1103 ": %s(): failed edac_mc_add_mc()\n", __func__);
1104 /* FIXME: perhaps some code should go here that disables error 1089 /* FIXME: perhaps some code should go here that disables error
1105 * reporting if we just enabled it 1090 * reporting if we just enabled it
1106 */ 1091 */
@@ -1142,7 +1127,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
1142 struct mem_ctl_info *mci; 1127 struct mem_ctl_info *mci;
1143 char *tmp; 1128 char *tmp;
1144 1129
1145 debugf0(__FILE__ ": %s()\n", __func__); 1130 edac_dbg(0, "\n");
1146 1131
1147 if (i7300_pci) 1132 if (i7300_pci)
1148 edac_pci_release_generic_ctl(i7300_pci); 1133 edac_pci_release_generic_ctl(i7300_pci);
@@ -1189,7 +1174,7 @@ static int __init i7300_init(void)
1189{ 1174{
1190 int pci_rc; 1175 int pci_rc;
1191 1176
1192 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1177 edac_dbg(2, "\n");
1193 1178
1194 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1179 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1195 opstate_init(); 1180 opstate_init();
@@ -1204,7 +1189,7 @@ static int __init i7300_init(void)
1204 */ 1189 */
1205static void __exit i7300_exit(void) 1190static void __exit i7300_exit(void)
1206{ 1191{
1207 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1192 edac_dbg(2, "\n");
1208 pci_unregister_driver(&i7300_driver); 1193 pci_unregister_driver(&i7300_driver);
1209} 1194}
1210 1195
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index a499c7ed820a..3672101023bd 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -248,6 +248,8 @@ struct i7core_dev {
248}; 248};
249 249
250struct i7core_pvt { 250struct i7core_pvt {
251 struct device *addrmatch_dev, *chancounts_dev;
252
251 struct pci_dev *pci_noncore; 253 struct pci_dev *pci_noncore;
252 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; 254 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
253 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; 255 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
@@ -514,29 +516,28 @@ static int get_dimm_config(struct mem_ctl_info *mci)
514 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); 516 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
515 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); 517 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
516 518
517 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", 519 edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
518 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, 520 pvt->i7core_dev->socket, pvt->info.mc_control,
519 pvt->info.max_dod, pvt->info.ch_map); 521 pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
520 522
521 if (ECC_ENABLED(pvt)) { 523 if (ECC_ENABLED(pvt)) {
522 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); 524 edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
523 if (ECCx8(pvt)) 525 if (ECCx8(pvt))
524 mode = EDAC_S8ECD8ED; 526 mode = EDAC_S8ECD8ED;
525 else 527 else
526 mode = EDAC_S4ECD4ED; 528 mode = EDAC_S4ECD4ED;
527 } else { 529 } else {
528 debugf0("ECC disabled\n"); 530 edac_dbg(0, "ECC disabled\n");
529 mode = EDAC_NONE; 531 mode = EDAC_NONE;
530 } 532 }
531 533
532 /* FIXME: need to handle the error codes */ 534 /* FIXME: need to handle the error codes */
533 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked " 535 edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
534 "x%x x 0x%x\n", 536 numdimms(pvt->info.max_dod),
535 numdimms(pvt->info.max_dod), 537 numrank(pvt->info.max_dod >> 2),
536 numrank(pvt->info.max_dod >> 2), 538 numbank(pvt->info.max_dod >> 4),
537 numbank(pvt->info.max_dod >> 4), 539 numrow(pvt->info.max_dod >> 6),
538 numrow(pvt->info.max_dod >> 6), 540 numcol(pvt->info.max_dod >> 9));
539 numcol(pvt->info.max_dod >> 9));
540 541
541 for (i = 0; i < NUM_CHANS; i++) { 542 for (i = 0; i < NUM_CHANS; i++) {
542 u32 data, dimm_dod[3], value[8]; 543 u32 data, dimm_dod[3], value[8];
@@ -545,11 +546,11 @@ static int get_dimm_config(struct mem_ctl_info *mci)
545 continue; 546 continue;
546 547
547 if (!CH_ACTIVE(pvt, i)) { 548 if (!CH_ACTIVE(pvt, i)) {
548 debugf0("Channel %i is not active\n", i); 549 edac_dbg(0, "Channel %i is not active\n", i);
549 continue; 550 continue;
550 } 551 }
551 if (CH_DISABLED(pvt, i)) { 552 if (CH_DISABLED(pvt, i)) {
552 debugf0("Channel %i is disabled\n", i); 553 edac_dbg(0, "Channel %i is disabled\n", i);
553 continue; 554 continue;
554 } 555 }
555 556
@@ -580,15 +581,14 @@ static int get_dimm_config(struct mem_ctl_info *mci)
580 pci_read_config_dword(pvt->pci_ch[i][1], 581 pci_read_config_dword(pvt->pci_ch[i][1],
581 MC_DOD_CH_DIMM2, &dimm_dod[2]); 582 MC_DOD_CH_DIMM2, &dimm_dod[2]);
582 583
583 debugf0("Ch%d phy rd%d, wr%d (0x%08x): " 584 edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
584 "%s%s%s%cDIMMs\n", 585 i,
585 i, 586 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
586 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), 587 data,
587 data, 588 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
588 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "", 589 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
589 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "", 590 pvt->channel[i].has_4rank ? "HAS_4R " : "",
590 pvt->channel[i].has_4rank ? "HAS_4R " : "", 591 (data & REGISTERED_DIMM) ? 'R' : 'U');
591 (data & REGISTERED_DIMM) ? 'R' : 'U');
592 592
593 for (j = 0; j < 3; j++) { 593 for (j = 0; j < 3; j++) {
594 u32 banks, ranks, rows, cols; 594 u32 banks, ranks, rows, cols;
@@ -607,11 +607,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
607 /* DDR3 has 8 I/O banks */ 607 /* DDR3 has 8 I/O banks */
608 size = (rows * cols * banks * ranks) >> (20 - 3); 608 size = (rows * cols * banks * ranks) >> (20 - 3);
609 609
610 debugf0("\tdimm %d %d Mb offset: %x, " 610 edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
611 "bank: %d, rank: %d, row: %#x, col: %#x\n", 611 j, size,
612 j, size, 612 RANKOFFSET(dimm_dod[j]),
613 RANKOFFSET(dimm_dod[j]), 613 banks, ranks, rows, cols);
614 banks, ranks, rows, cols);
615 614
616 npages = MiB_TO_PAGES(size); 615 npages = MiB_TO_PAGES(size);
617 616
@@ -647,12 +646,12 @@ static int get_dimm_config(struct mem_ctl_info *mci)
647 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); 646 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
648 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); 647 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
649 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); 648 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
650 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); 649 edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
651 for (j = 0; j < 8; j++) 650 for (j = 0; j < 8; j++)
652 debugf1("\t\t%#x\t%#x\t%#x\n", 651 edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
653 (value[j] >> 27) & 0x1, 652 (value[j] >> 27) & 0x1,
654 (value[j] >> 24) & 0x7, 653 (value[j] >> 24) & 0x7,
655 (value[j] & ((1 << 24) - 1))); 654 (value[j] & ((1 << 24) - 1)));
656 } 655 }
657 656
658 return 0; 657 return 0;
@@ -662,6 +661,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
662 Error insertion routines 661 Error insertion routines
663 ****************************************************************************/ 662 ****************************************************************************/
664 663
664#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
665
665/* The i7core has independent error injection features per channel. 666/* The i7core has independent error injection features per channel.
666 However, to have a simpler code, we don't allow enabling error injection 667 However, to have a simpler code, we don't allow enabling error injection
667 on more than one channel. 668 on more than one channel.
@@ -691,9 +692,11 @@ static int disable_inject(const struct mem_ctl_info *mci)
691 * bit 0 - refers to the lower 32-byte half cacheline 692 * bit 0 - refers to the lower 32-byte half cacheline
692 * bit 1 - refers to the upper 32-byte half cacheline 693 * bit 1 - refers to the upper 32-byte half cacheline
693 */ 694 */
694static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, 695static ssize_t i7core_inject_section_store(struct device *dev,
696 struct device_attribute *mattr,
695 const char *data, size_t count) 697 const char *data, size_t count)
696{ 698{
699 struct mem_ctl_info *mci = to_mci(dev);
697 struct i7core_pvt *pvt = mci->pvt_info; 700 struct i7core_pvt *pvt = mci->pvt_info;
698 unsigned long value; 701 unsigned long value;
699 int rc; 702 int rc;
@@ -709,9 +712,11 @@ static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
709 return count; 712 return count;
710} 713}
711 714
712static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, 715static ssize_t i7core_inject_section_show(struct device *dev,
713 char *data) 716 struct device_attribute *mattr,
717 char *data)
714{ 718{
719 struct mem_ctl_info *mci = to_mci(dev);
715 struct i7core_pvt *pvt = mci->pvt_info; 720 struct i7core_pvt *pvt = mci->pvt_info;
716 return sprintf(data, "0x%08x\n", pvt->inject.section); 721 return sprintf(data, "0x%08x\n", pvt->inject.section);
717} 722}
@@ -724,10 +729,12 @@ static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
724 * bit 1 - inject ECC error 729 * bit 1 - inject ECC error
725 * bit 2 - inject parity error 730 * bit 2 - inject parity error
726 */ 731 */
727static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, 732static ssize_t i7core_inject_type_store(struct device *dev,
733 struct device_attribute *mattr,
728 const char *data, size_t count) 734 const char *data, size_t count)
729{ 735{
730 struct i7core_pvt *pvt = mci->pvt_info; 736 struct mem_ctl_info *mci = to_mci(dev);
737struct i7core_pvt *pvt = mci->pvt_info;
731 unsigned long value; 738 unsigned long value;
732 int rc; 739 int rc;
733 740
@@ -742,10 +749,13 @@ static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
742 return count; 749 return count;
743} 750}
744 751
745static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, 752static ssize_t i7core_inject_type_show(struct device *dev,
746 char *data) 753 struct device_attribute *mattr,
754 char *data)
747{ 755{
756 struct mem_ctl_info *mci = to_mci(dev);
748 struct i7core_pvt *pvt = mci->pvt_info; 757 struct i7core_pvt *pvt = mci->pvt_info;
758
749 return sprintf(data, "0x%08x\n", pvt->inject.type); 759 return sprintf(data, "0x%08x\n", pvt->inject.type);
750} 760}
751 761
@@ -759,9 +769,11 @@ static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
759 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an 769 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
760 * uncorrectable error to be injected. 770 * uncorrectable error to be injected.
761 */ 771 */
762static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, 772static ssize_t i7core_inject_eccmask_store(struct device *dev,
763 const char *data, size_t count) 773 struct device_attribute *mattr,
774 const char *data, size_t count)
764{ 775{
776 struct mem_ctl_info *mci = to_mci(dev);
765 struct i7core_pvt *pvt = mci->pvt_info; 777 struct i7core_pvt *pvt = mci->pvt_info;
766 unsigned long value; 778 unsigned long value;
767 int rc; 779 int rc;
@@ -777,10 +789,13 @@ static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
777 return count; 789 return count;
778} 790}
779 791
780static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, 792static ssize_t i7core_inject_eccmask_show(struct device *dev,
781 char *data) 793 struct device_attribute *mattr,
794 char *data)
782{ 795{
796 struct mem_ctl_info *mci = to_mci(dev);
783 struct i7core_pvt *pvt = mci->pvt_info; 797 struct i7core_pvt *pvt = mci->pvt_info;
798
784 return sprintf(data, "0x%08x\n", pvt->inject.eccmask); 799 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
785} 800}
786 801
@@ -797,14 +812,16 @@ static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
797 812
798#define DECLARE_ADDR_MATCH(param, limit) \ 813#define DECLARE_ADDR_MATCH(param, limit) \
799static ssize_t i7core_inject_store_##param( \ 814static ssize_t i7core_inject_store_##param( \
800 struct mem_ctl_info *mci, \ 815 struct device *dev, \
801 const char *data, size_t count) \ 816 struct device_attribute *mattr, \
817 const char *data, size_t count) \
802{ \ 818{ \
819 struct mem_ctl_info *mci = to_mci(dev); \
803 struct i7core_pvt *pvt; \ 820 struct i7core_pvt *pvt; \
804 long value; \ 821 long value; \
805 int rc; \ 822 int rc; \
806 \ 823 \
807 debugf1("%s()\n", __func__); \ 824 edac_dbg(1, "\n"); \
808 pvt = mci->pvt_info; \ 825 pvt = mci->pvt_info; \
809 \ 826 \
810 if (pvt->inject.enable) \ 827 if (pvt->inject.enable) \
@@ -824,13 +841,15 @@ static ssize_t i7core_inject_store_##param( \
824} \ 841} \
825 \ 842 \
826static ssize_t i7core_inject_show_##param( \ 843static ssize_t i7core_inject_show_##param( \
827 struct mem_ctl_info *mci, \ 844 struct device *dev, \
828 char *data) \ 845 struct device_attribute *mattr, \
846 char *data) \
829{ \ 847{ \
848 struct mem_ctl_info *mci = to_mci(dev); \
830 struct i7core_pvt *pvt; \ 849 struct i7core_pvt *pvt; \
831 \ 850 \
832 pvt = mci->pvt_info; \ 851 pvt = mci->pvt_info; \
833 debugf1("%s() pvt=%p\n", __func__, pvt); \ 852 edac_dbg(1, "pvt=%p\n", pvt); \
834 if (pvt->inject.param < 0) \ 853 if (pvt->inject.param < 0) \
835 return sprintf(data, "any\n"); \ 854 return sprintf(data, "any\n"); \
836 else \ 855 else \
@@ -838,14 +857,9 @@ static ssize_t i7core_inject_show_##param( \
838} 857}
839 858
840#define ATTR_ADDR_MATCH(param) \ 859#define ATTR_ADDR_MATCH(param) \
841 { \ 860 static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \
842 .attr = { \ 861 i7core_inject_show_##param, \
843 .name = #param, \ 862 i7core_inject_store_##param)
844 .mode = (S_IRUGO | S_IWUSR) \
845 }, \
846 .show = i7core_inject_show_##param, \
847 .store = i7core_inject_store_##param, \
848 }
849 863
850DECLARE_ADDR_MATCH(channel, 3); 864DECLARE_ADDR_MATCH(channel, 3);
851DECLARE_ADDR_MATCH(dimm, 3); 865DECLARE_ADDR_MATCH(dimm, 3);
@@ -854,14 +868,21 @@ DECLARE_ADDR_MATCH(bank, 32);
854DECLARE_ADDR_MATCH(page, 0x10000); 868DECLARE_ADDR_MATCH(page, 0x10000);
855DECLARE_ADDR_MATCH(col, 0x4000); 869DECLARE_ADDR_MATCH(col, 0x4000);
856 870
871ATTR_ADDR_MATCH(channel);
872ATTR_ADDR_MATCH(dimm);
873ATTR_ADDR_MATCH(rank);
874ATTR_ADDR_MATCH(bank);
875ATTR_ADDR_MATCH(page);
876ATTR_ADDR_MATCH(col);
877
857static int write_and_test(struct pci_dev *dev, const int where, const u32 val) 878static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
858{ 879{
859 u32 read; 880 u32 read;
860 int count; 881 int count;
861 882
862 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n", 883 edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
863 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), 884 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
864 where, val); 885 where, val);
865 886
866 for (count = 0; count < 10; count++) { 887 for (count = 0; count < 10; count++) {
867 if (count) 888 if (count)
@@ -899,9 +920,11 @@ static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
899 * is reliable enough to check if the MC is using the 920 * is reliable enough to check if the MC is using the
900 * three channels. However, this is not clear at the datasheet. 921 * three channels. However, this is not clear at the datasheet.
901 */ 922 */
902static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, 923static ssize_t i7core_inject_enable_store(struct device *dev,
903 const char *data, size_t count) 924 struct device_attribute *mattr,
925 const char *data, size_t count)
904{ 926{
927 struct mem_ctl_info *mci = to_mci(dev);
905 struct i7core_pvt *pvt = mci->pvt_info; 928 struct i7core_pvt *pvt = mci->pvt_info;
906 u32 injectmask; 929 u32 injectmask;
907 u64 mask = 0; 930 u64 mask = 0;
@@ -994,17 +1017,18 @@ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
994 pci_write_config_dword(pvt->pci_noncore, 1017 pci_write_config_dword(pvt->pci_noncore,
995 MC_CFG_CONTROL, 8); 1018 MC_CFG_CONTROL, 8);
996 1019
997 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," 1020 edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
998 " inject 0x%08x\n", 1021 mask, pvt->inject.eccmask, injectmask);
999 mask, pvt->inject.eccmask, injectmask);
1000 1022
1001 1023
1002 return count; 1024 return count;
1003} 1025}
1004 1026
1005static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, 1027static ssize_t i7core_inject_enable_show(struct device *dev,
1006 char *data) 1028 struct device_attribute *mattr,
1029 char *data)
1007{ 1030{
1031 struct mem_ctl_info *mci = to_mci(dev);
1008 struct i7core_pvt *pvt = mci->pvt_info; 1032 struct i7core_pvt *pvt = mci->pvt_info;
1009 u32 injectmask; 1033 u32 injectmask;
1010 1034
@@ -1014,7 +1038,7 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1014 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], 1038 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1015 MC_CHANNEL_ERROR_INJECT, &injectmask); 1039 MC_CHANNEL_ERROR_INJECT, &injectmask);
1016 1040
1017 debugf0("Inject error read: 0x%018x\n", injectmask); 1041 edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
1018 1042
1019 if (injectmask & 0x0c) 1043 if (injectmask & 0x0c)
1020 pvt->inject.enable = 1; 1044 pvt->inject.enable = 1;
@@ -1024,12 +1048,14 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1024 1048
1025#define DECLARE_COUNTER(param) \ 1049#define DECLARE_COUNTER(param) \
1026static ssize_t i7core_show_counter_##param( \ 1050static ssize_t i7core_show_counter_##param( \
1027 struct mem_ctl_info *mci, \ 1051 struct device *dev, \
1028 char *data) \ 1052 struct device_attribute *mattr, \
1053 char *data) \
1029{ \ 1054{ \
1055 struct mem_ctl_info *mci = to_mci(dev); \
1030 struct i7core_pvt *pvt = mci->pvt_info; \ 1056 struct i7core_pvt *pvt = mci->pvt_info; \
1031 \ 1057 \
1032 debugf1("%s() \n", __func__); \ 1058 edac_dbg(1, "\n"); \
1033 if (!pvt->ce_count_available || (pvt->is_registered)) \ 1059 if (!pvt->ce_count_available || (pvt->is_registered)) \
1034 return sprintf(data, "data unavailable\n"); \ 1060 return sprintf(data, "data unavailable\n"); \
1035 return sprintf(data, "%lu\n", \ 1061 return sprintf(data, "%lu\n", \
@@ -1037,121 +1063,179 @@ static ssize_t i7core_show_counter_##param( \
1037} 1063}
1038 1064
1039#define ATTR_COUNTER(param) \ 1065#define ATTR_COUNTER(param) \
1040 { \ 1066 static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \
1041 .attr = { \ 1067 i7core_show_counter_##param, \
1042 .name = __stringify(udimm##param), \ 1068 NULL)
1043 .mode = (S_IRUGO | S_IWUSR) \
1044 }, \
1045 .show = i7core_show_counter_##param \
1046 }
1047 1069
1048DECLARE_COUNTER(0); 1070DECLARE_COUNTER(0);
1049DECLARE_COUNTER(1); 1071DECLARE_COUNTER(1);
1050DECLARE_COUNTER(2); 1072DECLARE_COUNTER(2);
1051 1073
1074ATTR_COUNTER(0);
1075ATTR_COUNTER(1);
1076ATTR_COUNTER(2);
1077
1052/* 1078/*
1053 * Sysfs struct 1079 * inject_addrmatch device sysfs struct
1054 */ 1080 */
1055 1081
1056static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { 1082static struct attribute *i7core_addrmatch_attrs[] = {
1057 ATTR_ADDR_MATCH(channel), 1083 &dev_attr_channel.attr,
1058 ATTR_ADDR_MATCH(dimm), 1084 &dev_attr_dimm.attr,
1059 ATTR_ADDR_MATCH(rank), 1085 &dev_attr_rank.attr,
1060 ATTR_ADDR_MATCH(bank), 1086 &dev_attr_bank.attr,
1061 ATTR_ADDR_MATCH(page), 1087 &dev_attr_page.attr,
1062 ATTR_ADDR_MATCH(col), 1088 &dev_attr_col.attr,
1063 { } /* End of list */ 1089 NULL
1064}; 1090};
1065 1091
1066static const struct mcidev_sysfs_group i7core_inject_addrmatch = { 1092static struct attribute_group addrmatch_grp = {
1067 .name = "inject_addrmatch", 1093 .attrs = i7core_addrmatch_attrs,
1068 .mcidev_attr = i7core_addrmatch_attrs,
1069}; 1094};
1070 1095
1071static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { 1096static const struct attribute_group *addrmatch_groups[] = {
1072 ATTR_COUNTER(0), 1097 &addrmatch_grp,
1073 ATTR_COUNTER(1), 1098 NULL
1074 ATTR_COUNTER(2),
1075 { .attr = { .name = NULL } }
1076}; 1099};
1077 1100
1078static const struct mcidev_sysfs_group i7core_udimm_counters = { 1101static void addrmatch_release(struct device *device)
1079 .name = "all_channel_counts", 1102{
1080 .mcidev_attr = i7core_udimm_counters_attrs, 1103 edac_dbg(1, "Releasing device %s\n", dev_name(device));
1104 kfree(device);
1105}
1106
1107static struct device_type addrmatch_type = {
1108 .groups = addrmatch_groups,
1109 .release = addrmatch_release,
1081}; 1110};
1082 1111
1083static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { 1112/*
1084 { 1113 * all_channel_counts sysfs struct
1085 .attr = { 1114 */
1086 .name = "inject_section", 1115
1087 .mode = (S_IRUGO | S_IWUSR) 1116static struct attribute *i7core_udimm_counters_attrs[] = {
1088 }, 1117 &dev_attr_udimm0.attr,
1089 .show = i7core_inject_section_show, 1118 &dev_attr_udimm1.attr,
1090 .store = i7core_inject_section_store, 1119 &dev_attr_udimm2.attr,
1091 }, { 1120 NULL
1092 .attr = {
1093 .name = "inject_type",
1094 .mode = (S_IRUGO | S_IWUSR)
1095 },
1096 .show = i7core_inject_type_show,
1097 .store = i7core_inject_type_store,
1098 }, {
1099 .attr = {
1100 .name = "inject_eccmask",
1101 .mode = (S_IRUGO | S_IWUSR)
1102 },
1103 .show = i7core_inject_eccmask_show,
1104 .store = i7core_inject_eccmask_store,
1105 }, {
1106 .grp = &i7core_inject_addrmatch,
1107 }, {
1108 .attr = {
1109 .name = "inject_enable",
1110 .mode = (S_IRUGO | S_IWUSR)
1111 },
1112 .show = i7core_inject_enable_show,
1113 .store = i7core_inject_enable_store,
1114 },
1115 { } /* End of list */
1116}; 1121};
1117 1122
1118static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { 1123static struct attribute_group all_channel_counts_grp = {
1119 { 1124 .attrs = i7core_udimm_counters_attrs,
1120 .attr = {
1121 .name = "inject_section",
1122 .mode = (S_IRUGO | S_IWUSR)
1123 },
1124 .show = i7core_inject_section_show,
1125 .store = i7core_inject_section_store,
1126 }, {
1127 .attr = {
1128 .name = "inject_type",
1129 .mode = (S_IRUGO | S_IWUSR)
1130 },
1131 .show = i7core_inject_type_show,
1132 .store = i7core_inject_type_store,
1133 }, {
1134 .attr = {
1135 .name = "inject_eccmask",
1136 .mode = (S_IRUGO | S_IWUSR)
1137 },
1138 .show = i7core_inject_eccmask_show,
1139 .store = i7core_inject_eccmask_store,
1140 }, {
1141 .grp = &i7core_inject_addrmatch,
1142 }, {
1143 .attr = {
1144 .name = "inject_enable",
1145 .mode = (S_IRUGO | S_IWUSR)
1146 },
1147 .show = i7core_inject_enable_show,
1148 .store = i7core_inject_enable_store,
1149 }, {
1150 .grp = &i7core_udimm_counters,
1151 },
1152 { } /* End of list */
1153}; 1125};
1154 1126
1127static const struct attribute_group *all_channel_counts_groups[] = {
1128 &all_channel_counts_grp,
1129 NULL
1130};
1131
1132static void all_channel_counts_release(struct device *device)
1133{
1134 edac_dbg(1, "Releasing device %s\n", dev_name(device));
1135 kfree(device);
1136}
1137
1138static struct device_type all_channel_counts_type = {
1139 .groups = all_channel_counts_groups,
1140 .release = all_channel_counts_release,
1141};
1142
1143/*
1144 * inject sysfs attributes
1145 */
1146
1147static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
1148 i7core_inject_section_show, i7core_inject_section_store);
1149
1150static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR,
1151 i7core_inject_type_show, i7core_inject_type_store);
1152
1153
1154static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR,
1155 i7core_inject_eccmask_show, i7core_inject_eccmask_store);
1156
1157static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR,
1158 i7core_inject_enable_show, i7core_inject_enable_store);
1159
1160static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
1161{
1162 struct i7core_pvt *pvt = mci->pvt_info;
1163 int rc;
1164
1165 rc = device_create_file(&mci->dev, &dev_attr_inject_section);
1166 if (rc < 0)
1167 return rc;
1168 rc = device_create_file(&mci->dev, &dev_attr_inject_type);
1169 if (rc < 0)
1170 return rc;
1171 rc = device_create_file(&mci->dev, &dev_attr_inject_eccmask);
1172 if (rc < 0)
1173 return rc;
1174 rc = device_create_file(&mci->dev, &dev_attr_inject_enable);
1175 if (rc < 0)
1176 return rc;
1177
1178 pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL);
1179 if (!pvt->addrmatch_dev)
1180 return rc;
1181
1182 pvt->addrmatch_dev->type = &addrmatch_type;
1183 pvt->addrmatch_dev->bus = mci->dev.bus;
1184 device_initialize(pvt->addrmatch_dev);
1185 pvt->addrmatch_dev->parent = &mci->dev;
1186 dev_set_name(pvt->addrmatch_dev, "inject_addrmatch");
1187 dev_set_drvdata(pvt->addrmatch_dev, mci);
1188
1189 edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev));
1190
1191 rc = device_add(pvt->addrmatch_dev);
1192 if (rc < 0)
1193 return rc;
1194
1195 if (!pvt->is_registered) {
1196 pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
1197 GFP_KERNEL);
1198 if (!pvt->chancounts_dev) {
1199 put_device(pvt->addrmatch_dev);
1200 device_del(pvt->addrmatch_dev);
1201 return rc;
1202 }
1203
1204 pvt->chancounts_dev->type = &all_channel_counts_type;
1205 pvt->chancounts_dev->bus = mci->dev.bus;
1206 device_initialize(pvt->chancounts_dev);
1207 pvt->chancounts_dev->parent = &mci->dev;
1208 dev_set_name(pvt->chancounts_dev, "all_channel_counts");
1209 dev_set_drvdata(pvt->chancounts_dev, mci);
1210
1211 edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev));
1212
1213 rc = device_add(pvt->chancounts_dev);
1214 if (rc < 0)
1215 return rc;
1216 }
1217 return 0;
1218}
1219
1220static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
1221{
1222 struct i7core_pvt *pvt = mci->pvt_info;
1223
1224 edac_dbg(1, "\n");
1225
1226 device_remove_file(&mci->dev, &dev_attr_inject_section);
1227 device_remove_file(&mci->dev, &dev_attr_inject_type);
1228 device_remove_file(&mci->dev, &dev_attr_inject_eccmask);
1229 device_remove_file(&mci->dev, &dev_attr_inject_enable);
1230
1231 if (!pvt->is_registered) {
1232 put_device(pvt->chancounts_dev);
1233 device_del(pvt->chancounts_dev);
1234 }
1235 put_device(pvt->addrmatch_dev);
1236 device_del(pvt->addrmatch_dev);
1237}
1238
1155/**************************************************************************** 1239/****************************************************************************
1156 Device initialization routines: put/get, init/exit 1240 Device initialization routines: put/get, init/exit
1157 ****************************************************************************/ 1241 ****************************************************************************/
@@ -1164,14 +1248,14 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev)
1164{ 1248{
1165 int i; 1249 int i;
1166 1250
1167 debugf0(__FILE__ ": %s()\n", __func__); 1251 edac_dbg(0, "\n");
1168 for (i = 0; i < i7core_dev->n_devs; i++) { 1252 for (i = 0; i < i7core_dev->n_devs; i++) {
1169 struct pci_dev *pdev = i7core_dev->pdev[i]; 1253 struct pci_dev *pdev = i7core_dev->pdev[i];
1170 if (!pdev) 1254 if (!pdev)
1171 continue; 1255 continue;
1172 debugf0("Removing dev %02x:%02x.%d\n", 1256 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1173 pdev->bus->number, 1257 pdev->bus->number,
1174 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1258 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1175 pci_dev_put(pdev); 1259 pci_dev_put(pdev);
1176 } 1260 }
1177} 1261}
@@ -1214,12 +1298,12 @@ static unsigned i7core_pci_lastbus(void)
1214 1298
1215 while ((b = pci_find_next_bus(b)) != NULL) { 1299 while ((b = pci_find_next_bus(b)) != NULL) {
1216 bus = b->number; 1300 bus = b->number;
1217 debugf0("Found bus %d\n", bus); 1301 edac_dbg(0, "Found bus %d\n", bus);
1218 if (bus > last_bus) 1302 if (bus > last_bus)
1219 last_bus = bus; 1303 last_bus = bus;
1220 } 1304 }
1221 1305
1222 debugf0("Last bus %d\n", last_bus); 1306 edac_dbg(0, "Last bus %d\n", last_bus);
1223 1307
1224 return last_bus; 1308 return last_bus;
1225} 1309}
@@ -1326,10 +1410,10 @@ static int i7core_get_onedevice(struct pci_dev **prev,
1326 return -ENODEV; 1410 return -ENODEV;
1327 } 1411 }
1328 1412
1329 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", 1413 edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1330 socket, bus, dev_descr->dev, 1414 socket, bus, dev_descr->dev,
1331 dev_descr->func, 1415 dev_descr->func,
1332 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1416 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1333 1417
1334 /* 1418 /*
1335 * As stated on drivers/pci/search.c, the reference count for 1419 * As stated on drivers/pci/search.c, the reference count for
@@ -1427,13 +1511,13 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
1427 family = "unknown"; 1511 family = "unknown";
1428 pvt->enable_scrub = false; 1512 pvt->enable_scrub = false;
1429 } 1513 }
1430 debugf0("Detected a processor type %s\n", family); 1514 edac_dbg(0, "Detected a processor type %s\n", family);
1431 } else 1515 } else
1432 goto error; 1516 goto error;
1433 1517
1434 debugf0("Associated fn %d.%d, dev = %p, socket %d\n", 1518 edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n",
1435 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1519 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1436 pdev, i7core_dev->socket); 1520 pdev, i7core_dev->socket);
1437 1521
1438 if (PCI_SLOT(pdev->devfn) == 3 && 1522 if (PCI_SLOT(pdev->devfn) == 3 &&
1439 PCI_FUNC(pdev->devfn) == 2) 1523 PCI_FUNC(pdev->devfn) == 2)
@@ -1452,18 +1536,6 @@ error:
1452/**************************************************************************** 1536/****************************************************************************
1453 Error check routines 1537 Error check routines
1454 ****************************************************************************/ 1538 ****************************************************************************/
1455static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
1456 const int chan,
1457 const int dimm,
1458 const int add)
1459{
1460 int i;
1461
1462 for (i = 0; i < add; i++) {
1463 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
1464 chan, dimm, -1, "error", "", NULL);
1465 }
1466}
1467 1539
1468static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, 1540static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1469 const int chan, 1541 const int chan,
@@ -1502,12 +1574,17 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1502 1574
1503 /*updated the edac core */ 1575 /*updated the edac core */
1504 if (add0 != 0) 1576 if (add0 != 0)
1505 i7core_rdimm_update_errcount(mci, chan, 0, add0); 1577 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0,
1578 0, 0, 0,
1579 chan, 0, -1, "error", "");
1506 if (add1 != 0) 1580 if (add1 != 0)
1507 i7core_rdimm_update_errcount(mci, chan, 1, add1); 1581 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1,
1582 0, 0, 0,
1583 chan, 1, -1, "error", "");
1508 if (add2 != 0) 1584 if (add2 != 0)
1509 i7core_rdimm_update_errcount(mci, chan, 2, add2); 1585 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2,
1510 1586 0, 0, 0,
1587 chan, 2, -1, "error", "");
1511} 1588}
1512 1589
1513static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) 1590static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
@@ -1530,8 +1607,8 @@ static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1530 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, 1607 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1531 &rcv[2][1]); 1608 &rcv[2][1]);
1532 for (i = 0 ; i < 3; i++) { 1609 for (i = 0 ; i < 3; i++) {
1533 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", 1610 edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1534 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); 1611 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1535 /*if the channel has 3 dimms*/ 1612 /*if the channel has 3 dimms*/
1536 if (pvt->channel[i].dimms > 2) { 1613 if (pvt->channel[i].dimms > 2) {
1537 new0 = DIMM_BOT_COR_ERR(rcv[i][0]); 1614 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
@@ -1562,7 +1639,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1562 int new0, new1, new2; 1639 int new0, new1, new2;
1563 1640
1564 if (!pvt->pci_mcr[4]) { 1641 if (!pvt->pci_mcr[4]) {
1565 debugf0("%s MCR registers not found\n", __func__); 1642 edac_dbg(0, "MCR registers not found\n");
1566 return; 1643 return;
1567 } 1644 }
1568 1645
@@ -1626,7 +1703,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1626 const struct mce *m) 1703 const struct mce *m)
1627{ 1704{
1628 struct i7core_pvt *pvt = mci->pvt_info; 1705 struct i7core_pvt *pvt = mci->pvt_info;
1629 char *type, *optype, *err, msg[80]; 1706 char *type, *optype, *err;
1630 enum hw_event_mc_err_type tp_event; 1707 enum hw_event_mc_err_type tp_event;
1631 unsigned long error = m->status & 0x1ff0000l; 1708 unsigned long error = m->status & 0x1ff0000l;
1632 bool uncorrected_error = m->mcgstatus & 1ll << 61; 1709 bool uncorrected_error = m->mcgstatus & 1ll << 61;
@@ -1704,20 +1781,18 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1704 err = "unknown"; 1781 err = "unknown";
1705 } 1782 }
1706 1783
1707 snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
1708
1709 /* 1784 /*
1710 * Call the helper to output message 1785 * Call the helper to output message
1711 * FIXME: what to do if core_err_cnt > 1? Currently, it generates 1786 * FIXME: what to do if core_err_cnt > 1? Currently, it generates
1712 * only one event 1787 * only one event
1713 */ 1788 */
1714 if (uncorrected_error || !pvt->is_registered) 1789 if (uncorrected_error || !pvt->is_registered)
1715 edac_mc_handle_error(tp_event, mci, 1790 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1716 m->addr >> PAGE_SHIFT, 1791 m->addr >> PAGE_SHIFT,
1717 m->addr & ~PAGE_MASK, 1792 m->addr & ~PAGE_MASK,
1718 syndrome, 1793 syndrome,
1719 channel, dimm, -1, 1794 channel, dimm, -1,
1720 err, msg, m); 1795 err, optype);
1721} 1796}
1722 1797
1723/* 1798/*
@@ -2094,8 +2169,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2094 struct i7core_pvt *pvt; 2169 struct i7core_pvt *pvt;
2095 2170
2096 if (unlikely(!mci || !mci->pvt_info)) { 2171 if (unlikely(!mci || !mci->pvt_info)) {
2097 debugf0("MC: " __FILE__ ": %s(): dev = %p\n", 2172 edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
2098 __func__, &i7core_dev->pdev[0]->dev);
2099 2173
2100 i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); 2174 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2101 return; 2175 return;
@@ -2103,8 +2177,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2103 2177
2104 pvt = mci->pvt_info; 2178 pvt = mci->pvt_info;
2105 2179
2106 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 2180 edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2107 __func__, mci, &i7core_dev->pdev[0]->dev);
2108 2181
2109 /* Disable scrubrate setting */ 2182 /* Disable scrubrate setting */
2110 if (pvt->enable_scrub) 2183 if (pvt->enable_scrub)
@@ -2114,9 +2187,10 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2114 i7core_pci_ctl_release(pvt); 2187 i7core_pci_ctl_release(pvt);
2115 2188
2116 /* Remove MC sysfs nodes */ 2189 /* Remove MC sysfs nodes */
2117 edac_mc_del_mc(mci->dev); 2190 i7core_delete_sysfs_devices(mci);
2191 edac_mc_del_mc(mci->pdev);
2118 2192
2119 debugf1("%s: free mci struct\n", mci->ctl_name); 2193 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
2120 kfree(mci->ctl_name); 2194 kfree(mci->ctl_name);
2121 edac_mc_free(mci); 2195 edac_mc_free(mci);
2122 i7core_dev->mci = NULL; 2196 i7core_dev->mci = NULL;
@@ -2142,8 +2216,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2142 if (unlikely(!mci)) 2216 if (unlikely(!mci))
2143 return -ENOMEM; 2217 return -ENOMEM;
2144 2218
2145 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 2219 edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2146 __func__, mci, &i7core_dev->pdev[0]->dev);
2147 2220
2148 pvt = mci->pvt_info; 2221 pvt = mci->pvt_info;
2149 memset(pvt, 0, sizeof(*pvt)); 2222 memset(pvt, 0, sizeof(*pvt));
@@ -2172,15 +2245,11 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2172 if (unlikely(rc < 0)) 2245 if (unlikely(rc < 0))
2173 goto fail0; 2246 goto fail0;
2174 2247
2175 if (pvt->is_registered)
2176 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2177 else
2178 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2179 2248
2180 /* Get dimm basic config */ 2249 /* Get dimm basic config */
2181 get_dimm_config(mci); 2250 get_dimm_config(mci);
2182 /* record ptr to the generic device */ 2251 /* record ptr to the generic device */
2183 mci->dev = &i7core_dev->pdev[0]->dev; 2252 mci->pdev = &i7core_dev->pdev[0]->dev;
2184 /* Set the function pointer to an actual operation function */ 2253 /* Set the function pointer to an actual operation function */
2185 mci->edac_check = i7core_check_error; 2254 mci->edac_check = i7core_check_error;
2186 2255
@@ -2190,8 +2259,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2190 2259
2191 /* add this new MC control structure to EDAC's list of MCs */ 2260 /* add this new MC control structure to EDAC's list of MCs */
2192 if (unlikely(edac_mc_add_mc(mci))) { 2261 if (unlikely(edac_mc_add_mc(mci))) {
2193 debugf0("MC: " __FILE__ 2262 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2194 ": %s(): failed edac_mc_add_mc()\n", __func__);
2195 /* FIXME: perhaps some code should go here that disables error 2263 /* FIXME: perhaps some code should go here that disables error
2196 * reporting if we just enabled it 2264 * reporting if we just enabled it
2197 */ 2265 */
@@ -2199,6 +2267,12 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2199 rc = -EINVAL; 2267 rc = -EINVAL;
2200 goto fail0; 2268 goto fail0;
2201 } 2269 }
2270 if (i7core_create_sysfs_devices(mci)) {
2271 edac_dbg(0, "MC: failed to create sysfs nodes\n");
2272 edac_mc_del_mc(mci->pdev);
2273 rc = -EINVAL;
2274 goto fail0;
2275 }
2202 2276
2203 /* Default error mask is any memory */ 2277 /* Default error mask is any memory */
2204 pvt->inject.channel = 0; 2278 pvt->inject.channel = 0;
@@ -2298,7 +2372,7 @@ static void __devexit i7core_remove(struct pci_dev *pdev)
2298{ 2372{
2299 struct i7core_dev *i7core_dev; 2373 struct i7core_dev *i7core_dev;
2300 2374
2301 debugf0(__FILE__ ": %s()\n", __func__); 2375 edac_dbg(0, "\n");
2302 2376
2303 /* 2377 /*
2304 * we have a trouble here: pdev value for removal will be wrong, since 2378 * we have a trouble here: pdev value for removal will be wrong, since
@@ -2347,7 +2421,7 @@ static int __init i7core_init(void)
2347{ 2421{
2348 int pci_rc; 2422 int pci_rc;
2349 2423
2350 debugf2("MC: " __FILE__ ": %s()\n", __func__); 2424 edac_dbg(2, "\n");
2351 2425
2352 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 2426 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2353 opstate_init(); 2427 opstate_init();
@@ -2374,7 +2448,7 @@ static int __init i7core_init(void)
2374 */ 2448 */
2375static void __exit i7core_exit(void) 2449static void __exit i7core_exit(void)
2376{ 2450{
2377 debugf2("MC: " __FILE__ ": %s()\n", __func__); 2451 edac_dbg(2, "\n");
2378 pci_unregister_driver(&i7core_driver); 2452 pci_unregister_driver(&i7core_driver);
2379 mce_unregister_decode_chain(&i7_mce_dec); 2453 mce_unregister_decode_chain(&i7_mce_dec);
2380} 2454}
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 52072c28a8a6..90f303db5d1d 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -124,7 +124,7 @@ static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
124 *info) 124 *info)
125{ 125{
126 struct pci_dev *pdev; 126 struct pci_dev *pdev;
127 pdev = to_pci_dev(mci->dev); 127 pdev = to_pci_dev(mci->pdev);
128 pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap); 128 pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
129 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) 129 if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
130 /* Clear error to allow next error to be reported [p.61] */ 130 /* Clear error to allow next error to be reported [p.61] */
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { 156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
157 error_found = 1; 157 error_found = 1;
158 if (handle_errors) 158 if (handle_errors)
159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
160 page, pageoffset, 0, 160 page, pageoffset, 0,
161 edac_mc_find_csrow_by_page(mci, page), 161 edac_mc_find_csrow_by_page(mci, page),
162 0, -1, mci->ctl_name, "", NULL); 162 0, -1, mci->ctl_name, "");
163 } 163 }
164 164
165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { 165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
166 error_found = 1; 166 error_found = 1;
167 if (handle_errors) 167 if (handle_errors)
168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
169 page, pageoffset, 0, 169 page, pageoffset, 0,
170 edac_mc_find_csrow_by_page(mci, page), 170 edac_mc_find_csrow_by_page(mci, page),
171 0, -1, mci->ctl_name, "", NULL); 171 0, -1, mci->ctl_name, "");
172 } 172 }
173 173
174 return error_found; 174 return error_found;
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
178{ 178{
179 struct i82443bxgx_edacmc_error_info info; 179 struct i82443bxgx_edacmc_error_info info;
180 180
181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 181 edac_dbg(1, "MC%d\n", mci->mc_idx);
182 i82443bxgx_edacmc_get_error_info(mci, &info); 182 i82443bxgx_edacmc_get_error_info(mci, &info);
183 i82443bxgx_edacmc_process_error_info(mci, &info, 1); 183 i82443bxgx_edacmc_process_error_info(mci, &info, 1);
184} 184}
@@ -197,18 +197,17 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
197 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); 197 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
198 row_high_limit_last = 0; 198 row_high_limit_last = 0;
199 for (index = 0; index < mci->nr_csrows; index++) { 199 for (index = 0; index < mci->nr_csrows; index++) {
200 csrow = &mci->csrows[index]; 200 csrow = mci->csrows[index];
201 dimm = csrow->channels[0].dimm; 201 dimm = csrow->channels[0]->dimm;
202 202
203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
204 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", 204 edac_dbg(1, "MC%d: Row=%d DRB = %#0x\n",
205 mci->mc_idx, __FILE__, __func__, index, drbar); 205 mci->mc_idx, index, drbar);
206 row_high_limit = ((u32) drbar << 23); 206 row_high_limit = ((u32) drbar << 23);
207 /* find the DRAM Chip Select Base address and mask */ 207 /* find the DRAM Chip Select Base address and mask */
208 debugf1("MC%d: %s: %s() Row=%d, " 208 edac_dbg(1, "MC%d: Row=%d, Boundary Address=%#0x, Last = %#0x\n",
209 "Boundary Address=%#0x, Last = %#0x\n", 209 mci->mc_idx, index, row_high_limit,
210 mci->mc_idx, __FILE__, __func__, index, row_high_limit, 210 row_high_limit_last);
211 row_high_limit_last);
212 211
213 /* 440GX goes to 2GB, represented with a DRB of 0. */ 212 /* 440GX goes to 2GB, represented with a DRB of 0. */
214 if (row_high_limit_last && !row_high_limit) 213 if (row_high_limit_last && !row_high_limit)
@@ -241,7 +240,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
241 enum mem_type mtype; 240 enum mem_type mtype;
242 enum edac_type edac_mode; 241 enum edac_type edac_mode;
243 242
244 debugf0("MC: %s: %s()\n", __FILE__, __func__); 243 edac_dbg(0, "MC:\n");
245 244
246 /* Something is really hosed if PCI config space reads from 245 /* Something is really hosed if PCI config space reads from
247 * the MC aren't working. 246 * the MC aren't working.
@@ -259,8 +258,8 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
259 if (mci == NULL) 258 if (mci == NULL)
260 return -ENOMEM; 259 return -ENOMEM;
261 260
262 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); 261 edac_dbg(0, "MC: mci = %p\n", mci);
263 mci->dev = &pdev->dev; 262 mci->pdev = &pdev->dev;
264 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; 263 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
265 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 264 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
266 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); 265 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
@@ -275,8 +274,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
275 mtype = MEM_RDR; 274 mtype = MEM_RDR;
276 break; 275 break;
277 default: 276 default:
278 debugf0("Unknown/reserved DRAM type value " 277 edac_dbg(0, "Unknown/reserved DRAM type value in DRAMC register!\n");
279 "in DRAMC register!\n");
280 mtype = -MEM_UNKNOWN; 278 mtype = -MEM_UNKNOWN;
281 } 279 }
282 280
@@ -305,8 +303,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
305 edac_mode = EDAC_SECDED; 303 edac_mode = EDAC_SECDED;
306 break; 304 break;
307 default: 305 default:
308 debugf0("%s(): Unknown/reserved ECC state " 306 edac_dbg(0, "Unknown/reserved ECC state in NBXCFG register!\n");
309 "in NBXCFG register!\n", __func__);
310 edac_mode = EDAC_UNKNOWN; 307 edac_mode = EDAC_UNKNOWN;
311 break; 308 break;
312 } 309 }
@@ -330,7 +327,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
330 mci->ctl_page_to_phys = NULL; 327 mci->ctl_page_to_phys = NULL;
331 328
332 if (edac_mc_add_mc(mci)) { 329 if (edac_mc_add_mc(mci)) {
333 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 330 edac_dbg(3, "failed edac_mc_add_mc()\n");
334 goto fail; 331 goto fail;
335 } 332 }
336 333
@@ -345,7 +342,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
345 __func__); 342 __func__);
346 } 343 }
347 344
348 debugf3("MC: %s: %s(): success\n", __FILE__, __func__); 345 edac_dbg(3, "MC: success\n");
349 return 0; 346 return 0;
350 347
351fail: 348fail:
@@ -361,7 +358,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
361{ 358{
362 int rc; 359 int rc;
363 360
364 debugf0("MC: %s: %s()\n", __FILE__, __func__); 361 edac_dbg(0, "MC:\n");
365 362
366 /* don't need to call pci_enable_device() */ 363 /* don't need to call pci_enable_device() */
367 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); 364 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -376,7 +373,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
376{ 373{
377 struct mem_ctl_info *mci; 374 struct mem_ctl_info *mci;
378 375
379 debugf0("%s: %s()\n", __FILE__, __func__); 376 edac_dbg(0, "\n");
380 377
381 if (i82443bxgx_pci) 378 if (i82443bxgx_pci)
382 edac_pci_release_generic_ctl(i82443bxgx_pci); 379 edac_pci_release_generic_ctl(i82443bxgx_pci);
@@ -428,7 +425,7 @@ static int __init i82443bxgx_edacmc_init(void)
428 id = &i82443bxgx_pci_tbl[i]; 425 id = &i82443bxgx_pci_tbl[i];
429 } 426 }
430 if (!mci_pdev) { 427 if (!mci_pdev) {
431 debugf0("i82443bxgx pci_get_device fail\n"); 428 edac_dbg(0, "i82443bxgx pci_get_device fail\n");
432 pci_rc = -ENODEV; 429 pci_rc = -ENODEV;
433 goto fail1; 430 goto fail1;
434 } 431 }
@@ -436,7 +433,7 @@ static int __init i82443bxgx_edacmc_init(void)
436 pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl); 433 pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
437 434
438 if (pci_rc < 0) { 435 if (pci_rc < 0) {
439 debugf0("i82443bxgx init fail\n"); 436 edac_dbg(0, "i82443bxgx init fail\n");
440 pci_rc = -ENODEV; 437 pci_rc = -ENODEV;
441 goto fail1; 438 goto fail1;
442 } 439 }
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 08045059d10b..1faa74971513 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -67,7 +67,7 @@ static void i82860_get_error_info(struct mem_ctl_info *mci,
67{ 67{
68 struct pci_dev *pdev; 68 struct pci_dev *pdev;
69 69
70 pdev = to_pci_dev(mci->dev); 70 pdev = to_pci_dev(mci->pdev);
71 71
72 /* 72 /*
73 * This is a mess because there is no atomic way to read all the 73 * This is a mess because there is no atomic way to read all the
@@ -109,25 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
109 return 1; 109 return 1;
110 110
111 if ((info->errsts ^ info->errsts2) & 0x0003) { 111 if ((info->errsts ^ info->errsts2) & 0x0003) {
112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
113 -1, -1, -1, "UE overwrote CE", "", NULL); 113 -1, -1, -1, "UE overwrote CE", "");
114 info->errsts = info->errsts2; 114 info->errsts = info->errsts2;
115 } 115 }
116 116
117 info->eap >>= PAGE_SHIFT; 117 info->eap >>= PAGE_SHIFT;
118 row = edac_mc_find_csrow_by_page(mci, info->eap); 118 row = edac_mc_find_csrow_by_page(mci, info->eap);
119 dimm = mci->csrows[row].channels[0].dimm; 119 dimm = mci->csrows[row]->channels[0]->dimm;
120 120
121 if (info->errsts & 0x0002) 121 if (info->errsts & 0x0002)
122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
123 info->eap, 0, 0, 123 info->eap, 0, 0,
124 dimm->location[0], dimm->location[1], -1, 124 dimm->location[0], dimm->location[1], -1,
125 "i82860 UE", "", NULL); 125 "i82860 UE", "");
126 else 126 else
127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
128 info->eap, 0, info->derrsyn, 128 info->eap, 0, info->derrsyn,
129 dimm->location[0], dimm->location[1], -1, 129 dimm->location[0], dimm->location[1], -1,
130 "i82860 CE", "", NULL); 130 "i82860 CE", "");
131 131
132 return 1; 132 return 1;
133} 133}
@@ -136,7 +136,7 @@ static void i82860_check(struct mem_ctl_info *mci)
136{ 136{
137 struct i82860_error_info info; 137 struct i82860_error_info info;
138 138
139 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 139 edac_dbg(1, "MC%d\n", mci->mc_idx);
140 i82860_get_error_info(mci, &info); 140 i82860_get_error_info(mci, &info);
141 i82860_process_error_info(mci, &info, 1); 141 i82860_process_error_info(mci, &info, 1);
142} 142}
@@ -161,14 +161,13 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
161 * in all eight rows. 161 * in all eight rows.
162 */ 162 */
163 for (index = 0; index < mci->nr_csrows; index++) { 163 for (index = 0; index < mci->nr_csrows; index++) {
164 csrow = &mci->csrows[index]; 164 csrow = mci->csrows[index];
165 dimm = csrow->channels[0].dimm; 165 dimm = csrow->channels[0]->dimm;
166 166
167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value); 167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
168 cumul_size = (value & I82860_GBA_MASK) << 168 cumul_size = (value & I82860_GBA_MASK) <<
169 (I82860_GBA_SHIFT - PAGE_SHIFT); 169 (I82860_GBA_SHIFT - PAGE_SHIFT);
170 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 170 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
171 cumul_size);
172 171
173 if (cumul_size == last_cumul_size) 172 if (cumul_size == last_cumul_size)
174 continue; /* not populated */ 173 continue; /* not populated */
@@ -210,8 +209,8 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
210 if (!mci) 209 if (!mci)
211 return -ENOMEM; 210 return -ENOMEM;
212 211
213 debugf3("%s(): init mci\n", __func__); 212 edac_dbg(3, "init mci\n");
214 mci->dev = &pdev->dev; 213 mci->pdev = &pdev->dev;
215 mci->mtype_cap = MEM_FLAG_DDR; 214 mci->mtype_cap = MEM_FLAG_DDR;
216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 215 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
217 /* I"m not sure about this but I think that all RDRAM is SECDED */ 216 /* I"m not sure about this but I think that all RDRAM is SECDED */
@@ -229,7 +228,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
229 * type of memory controller. The ID is therefore hardcoded to 0. 228 * type of memory controller. The ID is therefore hardcoded to 0.
230 */ 229 */
231 if (edac_mc_add_mc(mci)) { 230 if (edac_mc_add_mc(mci)) {
232 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 231 edac_dbg(3, "failed edac_mc_add_mc()\n");
233 goto fail; 232 goto fail;
234 } 233 }
235 234
@@ -245,7 +244,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
245 } 244 }
246 245
247 /* get this far and it's successful */ 246 /* get this far and it's successful */
248 debugf3("%s(): success\n", __func__); 247 edac_dbg(3, "success\n");
249 248
250 return 0; 249 return 0;
251 250
@@ -260,7 +259,7 @@ static int __devinit i82860_init_one(struct pci_dev *pdev,
260{ 259{
261 int rc; 260 int rc;
262 261
263 debugf0("%s()\n", __func__); 262 edac_dbg(0, "\n");
264 i82860_printk(KERN_INFO, "i82860 init one\n"); 263 i82860_printk(KERN_INFO, "i82860 init one\n");
265 264
266 if (pci_enable_device(pdev) < 0) 265 if (pci_enable_device(pdev) < 0)
@@ -278,7 +277,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
278{ 277{
279 struct mem_ctl_info *mci; 278 struct mem_ctl_info *mci;
280 279
281 debugf0("%s()\n", __func__); 280 edac_dbg(0, "\n");
282 281
283 if (i82860_pci) 282 if (i82860_pci)
284 edac_pci_release_generic_ctl(i82860_pci); 283 edac_pci_release_generic_ctl(i82860_pci);
@@ -311,7 +310,7 @@ static int __init i82860_init(void)
311{ 310{
312 int pci_rc; 311 int pci_rc;
313 312
314 debugf3("%s()\n", __func__); 313 edac_dbg(3, "\n");
315 314
316 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 315 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
317 opstate_init(); 316 opstate_init();
@@ -324,7 +323,7 @@ static int __init i82860_init(void)
324 PCI_DEVICE_ID_INTEL_82860_0, NULL); 323 PCI_DEVICE_ID_INTEL_82860_0, NULL);
325 324
326 if (mci_pdev == NULL) { 325 if (mci_pdev == NULL) {
327 debugf0("860 pci_get_device fail\n"); 326 edac_dbg(0, "860 pci_get_device fail\n");
328 pci_rc = -ENODEV; 327 pci_rc = -ENODEV;
329 goto fail1; 328 goto fail1;
330 } 329 }
@@ -332,7 +331,7 @@ static int __init i82860_init(void)
332 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); 331 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
333 332
334 if (pci_rc < 0) { 333 if (pci_rc < 0) {
335 debugf0("860 init fail\n"); 334 edac_dbg(0, "860 init fail\n");
336 pci_rc = -ENODEV; 335 pci_rc = -ENODEV;
337 goto fail1; 336 goto fail1;
338 } 337 }
@@ -352,7 +351,7 @@ fail0:
352 351
353static void __exit i82860_exit(void) 352static void __exit i82860_exit(void)
354{ 353{
355 debugf3("%s()\n", __func__); 354 edac_dbg(3, "\n");
356 355
357 pci_unregister_driver(&i82860_driver); 356 pci_unregister_driver(&i82860_driver);
358 357
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index b613e31c16e5..3e416b1a6b53 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -189,7 +189,7 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci,
189{ 189{
190 struct pci_dev *pdev; 190 struct pci_dev *pdev;
191 191
192 pdev = to_pci_dev(mci->dev); 192 pdev = to_pci_dev(mci->pdev);
193 193
194 /* 194 /*
195 * This is a mess because there is no atomic way to read all the 195 * This is a mess because there is no atomic way to read all the
@@ -227,7 +227,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
227{ 227{
228 int row, multi_chan; 228 int row, multi_chan;
229 229
230 multi_chan = mci->csrows[0].nr_channels - 1; 230 multi_chan = mci->csrows[0]->nr_channels - 1;
231 231
232 if (!(info->errsts & 0x0081)) 232 if (!(info->errsts & 0x0081))
233 return 0; 233 return 0;
@@ -236,9 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
236 return 1; 236 return 1;
237 237
238 if ((info->errsts ^ info->errsts2) & 0x0081) { 238 if ((info->errsts ^ info->errsts2) & 0x0081) {
239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
240 -1, -1, -1, 240 -1, -1, -1,
241 "UE overwrote CE", "", NULL); 241 "UE overwrote CE", "");
242 info->errsts = info->errsts2; 242 info->errsts = info->errsts2;
243 } 243 }
244 244
@@ -246,15 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
246 row = edac_mc_find_csrow_by_page(mci, info->eap); 246 row = edac_mc_find_csrow_by_page(mci, info->eap);
247 247
248 if (info->errsts & 0x0080) 248 if (info->errsts & 0x0080)
249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
250 info->eap, 0, 0, 250 info->eap, 0, 0,
251 row, -1, -1, 251 row, -1, -1,
252 "i82875p UE", "", NULL); 252 "i82875p UE", "");
253 else 253 else
254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
255 info->eap, 0, info->derrsyn, 255 info->eap, 0, info->derrsyn,
256 row, multi_chan ? (info->des & 0x1) : 0, 256 row, multi_chan ? (info->des & 0x1) : 0,
257 -1, "i82875p CE", "", NULL); 257 -1, "i82875p CE", "");
258 258
259 return 1; 259 return 1;
260} 260}
@@ -263,7 +263,7 @@ static void i82875p_check(struct mem_ctl_info *mci)
263{ 263{
264 struct i82875p_error_info info; 264 struct i82875p_error_info info;
265 265
266 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 266 edac_dbg(1, "MC%d\n", mci->mc_idx);
267 i82875p_get_error_info(mci, &info); 267 i82875p_get_error_info(mci, &info);
268 i82875p_process_error_info(mci, &info, 1); 268 i82875p_process_error_info(mci, &info, 1);
269} 269}
@@ -367,12 +367,11 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
367 */ 367 */
368 368
369 for (index = 0; index < mci->nr_csrows; index++) { 369 for (index = 0; index < mci->nr_csrows; index++) {
370 csrow = &mci->csrows[index]; 370 csrow = mci->csrows[index];
371 371
372 value = readb(ovrfl_window + I82875P_DRB + index); 372 value = readb(ovrfl_window + I82875P_DRB + index);
373 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 373 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
374 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 374 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
375 cumul_size);
376 if (cumul_size == last_cumul_size) 375 if (cumul_size == last_cumul_size)
377 continue; /* not populated */ 376 continue; /* not populated */
378 377
@@ -382,7 +381,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
382 last_cumul_size = cumul_size; 381 last_cumul_size = cumul_size;
383 382
384 for (j = 0; j < nr_chans; j++) { 383 for (j = 0; j < nr_chans; j++) {
385 dimm = csrow->channels[j].dimm; 384 dimm = csrow->channels[j]->dimm;
386 385
387 dimm->nr_pages = nr_pages / nr_chans; 386 dimm->nr_pages = nr_pages / nr_chans;
388 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 387 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
@@ -405,7 +404,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
405 u32 nr_chans; 404 u32 nr_chans;
406 struct i82875p_error_info discard; 405 struct i82875p_error_info discard;
407 406
408 debugf0("%s()\n", __func__); 407 edac_dbg(0, "\n");
409 408
410 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 409 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
411 410
@@ -426,11 +425,8 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
426 goto fail0; 425 goto fail0;
427 } 426 }
428 427
429 /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */ 428 edac_dbg(3, "init mci\n");
430 kobject_get(&mci->edac_mci_kobj); 429 mci->pdev = &pdev->dev;
431
432 debugf3("%s(): init mci\n", __func__);
433 mci->dev = &pdev->dev;
434 mci->mtype_cap = MEM_FLAG_DDR; 430 mci->mtype_cap = MEM_FLAG_DDR;
435 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 431 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
436 mci->edac_cap = EDAC_FLAG_UNKNOWN; 432 mci->edac_cap = EDAC_FLAG_UNKNOWN;
@@ -440,7 +436,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
440 mci->dev_name = pci_name(pdev); 436 mci->dev_name = pci_name(pdev);
441 mci->edac_check = i82875p_check; 437 mci->edac_check = i82875p_check;
442 mci->ctl_page_to_phys = NULL; 438 mci->ctl_page_to_phys = NULL;
443 debugf3("%s(): init pvt\n", __func__); 439 edac_dbg(3, "init pvt\n");
444 pvt = (struct i82875p_pvt *)mci->pvt_info; 440 pvt = (struct i82875p_pvt *)mci->pvt_info;
445 pvt->ovrfl_pdev = ovrfl_pdev; 441 pvt->ovrfl_pdev = ovrfl_pdev;
446 pvt->ovrfl_window = ovrfl_window; 442 pvt->ovrfl_window = ovrfl_window;
@@ -451,7 +447,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
451 * type of memory controller. The ID is therefore hardcoded to 0. 447 * type of memory controller. The ID is therefore hardcoded to 0.
452 */ 448 */
453 if (edac_mc_add_mc(mci)) { 449 if (edac_mc_add_mc(mci)) {
454 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 450 edac_dbg(3, "failed edac_mc_add_mc()\n");
455 goto fail1; 451 goto fail1;
456 } 452 }
457 453
@@ -467,11 +463,10 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
467 } 463 }
468 464
469 /* get this far and it's successful */ 465 /* get this far and it's successful */
470 debugf3("%s(): success\n", __func__); 466 edac_dbg(3, "success\n");
471 return 0; 467 return 0;
472 468
473fail1: 469fail1:
474 kobject_put(&mci->edac_mci_kobj);
475 edac_mc_free(mci); 470 edac_mc_free(mci);
476 471
477fail0: 472fail0:
@@ -489,7 +484,7 @@ static int __devinit i82875p_init_one(struct pci_dev *pdev,
489{ 484{
490 int rc; 485 int rc;
491 486
492 debugf0("%s()\n", __func__); 487 edac_dbg(0, "\n");
493 i82875p_printk(KERN_INFO, "i82875p init one\n"); 488 i82875p_printk(KERN_INFO, "i82875p init one\n");
494 489
495 if (pci_enable_device(pdev) < 0) 490 if (pci_enable_device(pdev) < 0)
@@ -508,7 +503,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
508 struct mem_ctl_info *mci; 503 struct mem_ctl_info *mci;
509 struct i82875p_pvt *pvt = NULL; 504 struct i82875p_pvt *pvt = NULL;
510 505
511 debugf0("%s()\n", __func__); 506 edac_dbg(0, "\n");
512 507
513 if (i82875p_pci) 508 if (i82875p_pci)
514 edac_pci_release_generic_ctl(i82875p_pci); 509 edac_pci_release_generic_ctl(i82875p_pci);
@@ -554,7 +549,7 @@ static int __init i82875p_init(void)
554{ 549{
555 int pci_rc; 550 int pci_rc;
556 551
557 debugf3("%s()\n", __func__); 552 edac_dbg(3, "\n");
558 553
559 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 554 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
560 opstate_init(); 555 opstate_init();
@@ -569,7 +564,7 @@ static int __init i82875p_init(void)
569 PCI_DEVICE_ID_INTEL_82875_0, NULL); 564 PCI_DEVICE_ID_INTEL_82875_0, NULL);
570 565
571 if (!mci_pdev) { 566 if (!mci_pdev) {
572 debugf0("875p pci_get_device fail\n"); 567 edac_dbg(0, "875p pci_get_device fail\n");
573 pci_rc = -ENODEV; 568 pci_rc = -ENODEV;
574 goto fail1; 569 goto fail1;
575 } 570 }
@@ -577,7 +572,7 @@ static int __init i82875p_init(void)
577 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); 572 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
578 573
579 if (pci_rc < 0) { 574 if (pci_rc < 0) {
580 debugf0("875p init fail\n"); 575 edac_dbg(0, "875p init fail\n");
581 pci_rc = -ENODEV; 576 pci_rc = -ENODEV;
582 goto fail1; 577 goto fail1;
583 } 578 }
@@ -597,7 +592,7 @@ fail0:
597 592
598static void __exit i82875p_exit(void) 593static void __exit i82875p_exit(void)
599{ 594{
600 debugf3("%s()\n", __func__); 595 edac_dbg(3, "\n");
601 596
602 i82875p_remove_one(mci_pdev); 597 i82875p_remove_one(mci_pdev);
603 pci_dev_put(mci_pdev); 598 pci_dev_put(mci_pdev);
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 433332c7cdba..069e26c11c4f 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -241,7 +241,7 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci,
241{ 241{
242 struct pci_dev *pdev; 242 struct pci_dev *pdev;
243 243
244 pdev = to_pci_dev(mci->dev); 244 pdev = to_pci_dev(mci->pdev);
245 245
246 /* 246 /*
247 * This is a mess because there is no atomic way to read all the 247 * This is a mess because there is no atomic way to read all the
@@ -288,8 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
288 return 1; 288 return 1;
289 289
290 if ((info->errsts ^ info->errsts2) & 0x0003) { 290 if ((info->errsts ^ info->errsts2) & 0x0003) {
291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
292 -1, -1, -1, "UE overwrote CE", "", NULL); 292 -1, -1, -1, "UE overwrote CE", "");
293 info->errsts = info->errsts2; 293 info->errsts = info->errsts2;
294 } 294 }
295 295
@@ -308,21 +308,21 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
308 (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page); 308 (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
309 return 0; 309 return 0;
310 } 310 }
311 chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1; 311 chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1;
312 offst = info->eap 312 offst = info->eap
313 & ((1 << PAGE_SHIFT) - 313 & ((1 << PAGE_SHIFT) -
314 (1 << mci->csrows[row].channels[chan].dimm->grain)); 314 (1 << mci->csrows[row]->channels[chan]->dimm->grain));
315 315
316 if (info->errsts & 0x0002) 316 if (info->errsts & 0x0002)
317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
318 page, offst, 0, 318 page, offst, 0,
319 row, -1, -1, 319 row, -1, -1,
320 "i82975x UE", "", NULL); 320 "i82975x UE", "");
321 else 321 else
322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
323 page, offst, info->derrsyn, 323 page, offst, info->derrsyn,
324 row, chan ? chan : 0, -1, 324 row, chan ? chan : 0, -1,
325 "i82975x CE", "", NULL); 325 "i82975x CE", "");
326 326
327 return 1; 327 return 1;
328} 328}
@@ -331,7 +331,7 @@ static void i82975x_check(struct mem_ctl_info *mci)
331{ 331{
332 struct i82975x_error_info info; 332 struct i82975x_error_info info;
333 333
334 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 334 edac_dbg(1, "MC%d\n", mci->mc_idx);
335 i82975x_get_error_info(mci, &info); 335 i82975x_get_error_info(mci, &info);
336 i82975x_process_error_info(mci, &info, 1); 336 i82975x_process_error_info(mci, &info, 1);
337} 337}
@@ -394,7 +394,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
394 */ 394 */
395 395
396 for (index = 0; index < mci->nr_csrows; index++) { 396 for (index = 0; index < mci->nr_csrows; index++) {
397 csrow = &mci->csrows[index]; 397 csrow = mci->csrows[index];
398 398
399 value = readb(mch_window + I82975X_DRB + index + 399 value = readb(mch_window + I82975X_DRB + index +
400 ((index >= 4) ? 0x80 : 0)); 400 ((index >= 4) ? 0x80 : 0));
@@ -406,8 +406,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
406 */ 406 */
407 if (csrow->nr_channels > 1) 407 if (csrow->nr_channels > 1)
408 cumul_size <<= 1; 408 cumul_size <<= 1;
409 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 409 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
410 cumul_size);
411 410
412 nr_pages = cumul_size - last_cumul_size; 411 nr_pages = cumul_size - last_cumul_size;
413 if (!nr_pages) 412 if (!nr_pages)
@@ -421,10 +420,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
421 */ 420 */
422 dtype = i82975x_dram_type(mch_window, index); 421 dtype = i82975x_dram_type(mch_window, index);
423 for (chan = 0; chan < csrow->nr_channels; chan++) { 422 for (chan = 0; chan < csrow->nr_channels; chan++) {
424 dimm = mci->csrows[index].channels[chan].dimm; 423 dimm = mci->csrows[index]->channels[chan]->dimm;
425 424
426 dimm->nr_pages = nr_pages / csrow->nr_channels; 425 dimm->nr_pages = nr_pages / csrow->nr_channels;
427 strncpy(csrow->channels[chan].dimm->label, 426 strncpy(csrow->channels[chan]->dimm->label,
428 labels[(index >> 1) + (chan * 2)], 427 labels[(index >> 1) + (chan * 2)],
429 EDAC_MC_LABEL_LEN); 428 EDAC_MC_LABEL_LEN);
430 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ 429 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
@@ -489,11 +488,11 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
489 u8 c1drb[4]; 488 u8 c1drb[4];
490#endif 489#endif
491 490
492 debugf0("%s()\n", __func__); 491 edac_dbg(0, "\n");
493 492
494 pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); 493 pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
495 if (!(mchbar & 1)) { 494 if (!(mchbar & 1)) {
496 debugf3("%s(): failed, MCHBAR disabled!\n", __func__); 495 edac_dbg(3, "failed, MCHBAR disabled!\n");
497 goto fail0; 496 goto fail0;
498 } 497 }
499 mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ 498 mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
@@ -558,8 +557,8 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
558 goto fail1; 557 goto fail1;
559 } 558 }
560 559
561 debugf3("%s(): init mci\n", __func__); 560 edac_dbg(3, "init mci\n");
562 mci->dev = &pdev->dev; 561 mci->pdev = &pdev->dev;
563 mci->mtype_cap = MEM_FLAG_DDR2; 562 mci->mtype_cap = MEM_FLAG_DDR2;
564 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 563 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
565 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 564 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
@@ -569,7 +568,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
569 mci->dev_name = pci_name(pdev); 568 mci->dev_name = pci_name(pdev);
570 mci->edac_check = i82975x_check; 569 mci->edac_check = i82975x_check;
571 mci->ctl_page_to_phys = NULL; 570 mci->ctl_page_to_phys = NULL;
572 debugf3("%s(): init pvt\n", __func__); 571 edac_dbg(3, "init pvt\n");
573 pvt = (struct i82975x_pvt *) mci->pvt_info; 572 pvt = (struct i82975x_pvt *) mci->pvt_info;
574 pvt->mch_window = mch_window; 573 pvt->mch_window = mch_window;
575 i82975x_init_csrows(mci, pdev, mch_window); 574 i82975x_init_csrows(mci, pdev, mch_window);
@@ -578,12 +577,12 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
578 577
579 /* finalize this instance of memory controller with edac core */ 578 /* finalize this instance of memory controller with edac core */
580 if (edac_mc_add_mc(mci)) { 579 if (edac_mc_add_mc(mci)) {
581 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 580 edac_dbg(3, "failed edac_mc_add_mc()\n");
582 goto fail2; 581 goto fail2;
583 } 582 }
584 583
585 /* get this far and it's successful */ 584 /* get this far and it's successful */
586 debugf3("%s(): success\n", __func__); 585 edac_dbg(3, "success\n");
587 return 0; 586 return 0;
588 587
589fail2: 588fail2:
@@ -601,7 +600,7 @@ static int __devinit i82975x_init_one(struct pci_dev *pdev,
601{ 600{
602 int rc; 601 int rc;
603 602
604 debugf0("%s()\n", __func__); 603 edac_dbg(0, "\n");
605 604
606 if (pci_enable_device(pdev) < 0) 605 if (pci_enable_device(pdev) < 0)
607 return -EIO; 606 return -EIO;
@@ -619,7 +618,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
619 struct mem_ctl_info *mci; 618 struct mem_ctl_info *mci;
620 struct i82975x_pvt *pvt; 619 struct i82975x_pvt *pvt;
621 620
622 debugf0("%s()\n", __func__); 621 edac_dbg(0, "\n");
623 622
624 mci = edac_mc_del_mc(&pdev->dev); 623 mci = edac_mc_del_mc(&pdev->dev);
625 if (mci == NULL) 624 if (mci == NULL)
@@ -655,7 +654,7 @@ static int __init i82975x_init(void)
655{ 654{
656 int pci_rc; 655 int pci_rc;
657 656
658 debugf3("%s()\n", __func__); 657 edac_dbg(3, "\n");
659 658
660 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 659 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
661 opstate_init(); 660 opstate_init();
@@ -669,7 +668,7 @@ static int __init i82975x_init(void)
669 PCI_DEVICE_ID_INTEL_82975_0, NULL); 668 PCI_DEVICE_ID_INTEL_82975_0, NULL);
670 669
671 if (!mci_pdev) { 670 if (!mci_pdev) {
672 debugf0("i82975x pci_get_device fail\n"); 671 edac_dbg(0, "i82975x pci_get_device fail\n");
673 pci_rc = -ENODEV; 672 pci_rc = -ENODEV;
674 goto fail1; 673 goto fail1;
675 } 674 }
@@ -677,7 +676,7 @@ static int __init i82975x_init(void)
677 pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); 676 pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
678 677
679 if (pci_rc < 0) { 678 if (pci_rc < 0) {
680 debugf0("i82975x init fail\n"); 679 edac_dbg(0, "i82975x init fail\n");
681 pci_rc = -ENODEV; 680 pci_rc = -ENODEV;
682 goto fail1; 681 goto fail1;
683 } 682 }
@@ -697,7 +696,7 @@ fail0:
697 696
698static void __exit i82975x_exit(void) 697static void __exit i82975x_exit(void)
699{ 698{
700 debugf3("%s()\n", __func__); 699 edac_dbg(3, "\n");
701 700
702 pci_unregister_driver(&i82975x_driver); 701 pci_unregister_driver(&i82975x_driver);
703 702
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 0e374625f6f8..a1e791ec25d3 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -49,34 +49,45 @@ static u32 orig_hid1[2];
49 49
50/************************ MC SYSFS parts ***********************************/ 50/************************ MC SYSFS parts ***********************************/
51 51
52static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci, 52#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
53
54static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
55 struct device_attribute *mattr,
53 char *data) 56 char *data)
54{ 57{
58 struct mem_ctl_info *mci = to_mci(dev);
55 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 59 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
56 return sprintf(data, "0x%08x", 60 return sprintf(data, "0x%08x",
57 in_be32(pdata->mc_vbase + 61 in_be32(pdata->mc_vbase +
58 MPC85XX_MC_DATA_ERR_INJECT_HI)); 62 MPC85XX_MC_DATA_ERR_INJECT_HI));
59} 63}
60 64
61static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci, 65static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
66 struct device_attribute *mattr,
62 char *data) 67 char *data)
63{ 68{
69 struct mem_ctl_info *mci = to_mci(dev);
64 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 70 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
65 return sprintf(data, "0x%08x", 71 return sprintf(data, "0x%08x",
66 in_be32(pdata->mc_vbase + 72 in_be32(pdata->mc_vbase +
67 MPC85XX_MC_DATA_ERR_INJECT_LO)); 73 MPC85XX_MC_DATA_ERR_INJECT_LO));
68} 74}
69 75
70static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data) 76static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
77 struct device_attribute *mattr,
78 char *data)
71{ 79{
80 struct mem_ctl_info *mci = to_mci(dev);
72 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 81 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
73 return sprintf(data, "0x%08x", 82 return sprintf(data, "0x%08x",
74 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); 83 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
75} 84}
76 85
77static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci, 86static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
87 struct device_attribute *mattr,
78 const char *data, size_t count) 88 const char *data, size_t count)
79{ 89{
90 struct mem_ctl_info *mci = to_mci(dev);
80 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 91 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
81 if (isdigit(*data)) { 92 if (isdigit(*data)) {
82 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, 93 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
@@ -86,9 +97,11 @@ static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
86 return 0; 97 return 0;
87} 98}
88 99
89static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci, 100static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
101 struct device_attribute *mattr,
90 const char *data, size_t count) 102 const char *data, size_t count)
91{ 103{
104 struct mem_ctl_info *mci = to_mci(dev);
92 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 105 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
93 if (isdigit(*data)) { 106 if (isdigit(*data)) {
94 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, 107 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
@@ -98,9 +111,11 @@ static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
98 return 0; 111 return 0;
99} 112}
100 113
101static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci, 114static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
102 const char *data, size_t count) 115 struct device_attribute *mattr,
116 const char *data, size_t count)
103{ 117{
118 struct mem_ctl_info *mci = to_mci(dev);
104 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 119 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
105 if (isdigit(*data)) { 120 if (isdigit(*data)) {
106 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, 121 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
@@ -110,38 +125,35 @@ static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
110 return 0; 125 return 0;
111} 126}
112 127
113static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = { 128DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
114 { 129 mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
115 .attr = { 130DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
116 .name = "inject_data_hi", 131 mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
117 .mode = (S_IRUGO | S_IWUSR) 132DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
118 }, 133 mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
119 .show = mpc85xx_mc_inject_data_hi_show,
120 .store = mpc85xx_mc_inject_data_hi_store},
121 {
122 .attr = {
123 .name = "inject_data_lo",
124 .mode = (S_IRUGO | S_IWUSR)
125 },
126 .show = mpc85xx_mc_inject_data_lo_show,
127 .store = mpc85xx_mc_inject_data_lo_store},
128 {
129 .attr = {
130 .name = "inject_ctrl",
131 .mode = (S_IRUGO | S_IWUSR)
132 },
133 .show = mpc85xx_mc_inject_ctrl_show,
134 .store = mpc85xx_mc_inject_ctrl_store},
135 134
136 /* End of list */ 135static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
137 { 136{
138 .attr = {.name = NULL} 137 int rc;
139 } 138
140}; 139 rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
140 if (rc < 0)
141 return rc;
142 rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
143 if (rc < 0)
144 return rc;
145 rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
146 if (rc < 0)
147 return rc;
141 148
142static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci) 149 return 0;
150}
151
152static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
143{ 153{
144 mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes; 154 device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
155 device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
156 device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
145} 157}
146 158
147/**************************** PCI Err device ***************************/ 159/**************************** PCI Err device ***************************/
@@ -268,7 +280,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
268 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); 280 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
269 281
270 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 282 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
271 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 283 edac_dbg(3, "failed edac_pci_add_device()\n");
272 goto err; 284 goto err;
273 } 285 }
274 286
@@ -291,7 +303,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
291 } 303 }
292 304
293 devres_remove_group(&op->dev, mpc85xx_pci_err_probe); 305 devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
294 debugf3("%s(): success\n", __func__); 306 edac_dbg(3, "success\n");
295 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); 307 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
296 308
297 return 0; 309 return 0;
@@ -309,7 +321,7 @@ static int mpc85xx_pci_err_remove(struct platform_device *op)
309 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); 321 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
310 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 322 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
311 323
312 debugf0("%s()\n", __func__); 324 edac_dbg(0, "\n");
313 325
314 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 326 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
315 orig_pci_err_cap_dr); 327 orig_pci_err_cap_dr);
@@ -570,7 +582,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
570 pdata->edac_idx = edac_dev_idx++; 582 pdata->edac_idx = edac_dev_idx++;
571 583
572 if (edac_device_add_device(edac_dev) > 0) { 584 if (edac_device_add_device(edac_dev) > 0) {
573 debugf3("%s(): failed edac_device_add_device()\n", __func__); 585 edac_dbg(3, "failed edac_device_add_device()\n");
574 goto err; 586 goto err;
575 } 587 }
576 588
@@ -598,7 +610,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
598 610
599 devres_remove_group(&op->dev, mpc85xx_l2_err_probe); 611 devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
600 612
601 debugf3("%s(): success\n", __func__); 613 edac_dbg(3, "success\n");
602 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); 614 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
603 615
604 return 0; 616 return 0;
@@ -616,7 +628,7 @@ static int mpc85xx_l2_err_remove(struct platform_device *op)
616 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); 628 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
617 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 629 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
618 630
619 debugf0("%s()\n", __func__); 631 edac_dbg(0, "\n");
620 632
621 if (edac_op_state == EDAC_OPSTATE_INT) { 633 if (edac_op_state == EDAC_OPSTATE_INT) {
622 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); 634 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
@@ -813,7 +825,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
813 pfn = err_addr >> PAGE_SHIFT; 825 pfn = err_addr >> PAGE_SHIFT;
814 826
815 for (row_index = 0; row_index < mci->nr_csrows; row_index++) { 827 for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
816 csrow = &mci->csrows[row_index]; 828 csrow = mci->csrows[row_index];
817 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) 829 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
818 break; 830 break;
819 } 831 }
@@ -854,16 +866,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); 866 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
855 867
856 if (err_detect & DDR_EDE_SBE) 868 if (err_detect & DDR_EDE_SBE)
857 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 869 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
858 pfn, err_addr & ~PAGE_MASK, syndrome, 870 pfn, err_addr & ~PAGE_MASK, syndrome,
859 row_index, 0, -1, 871 row_index, 0, -1,
860 mci->ctl_name, "", NULL); 872 mci->ctl_name, "");
861 873
862 if (err_detect & DDR_EDE_MBE) 874 if (err_detect & DDR_EDE_MBE)
863 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 875 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
864 pfn, err_addr & ~PAGE_MASK, syndrome, 876 pfn, err_addr & ~PAGE_MASK, syndrome,
865 row_index, 0, -1, 877 row_index, 0, -1,
866 mci->ctl_name, "", NULL); 878 mci->ctl_name, "");
867 879
868 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 880 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
869} 881}
@@ -933,8 +945,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
933 u32 start; 945 u32 start;
934 u32 end; 946 u32 end;
935 947
936 csrow = &mci->csrows[index]; 948 csrow = mci->csrows[index];
937 dimm = csrow->channels[0].dimm; 949 dimm = csrow->channels[0]->dimm;
938 950
939 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + 951 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
940 (index * MPC85XX_MC_CS_BNDS_OFS)); 952 (index * MPC85XX_MC_CS_BNDS_OFS));
@@ -990,9 +1002,9 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
990 pdata = mci->pvt_info; 1002 pdata = mci->pvt_info;
991 pdata->name = "mpc85xx_mc_err"; 1003 pdata->name = "mpc85xx_mc_err";
992 pdata->irq = NO_IRQ; 1004 pdata->irq = NO_IRQ;
993 mci->dev = &op->dev; 1005 mci->pdev = &op->dev;
994 pdata->edac_idx = edac_mc_idx++; 1006 pdata->edac_idx = edac_mc_idx++;
995 dev_set_drvdata(mci->dev, mci); 1007 dev_set_drvdata(mci->pdev, mci);
996 mci->ctl_name = pdata->name; 1008 mci->ctl_name = pdata->name;
997 mci->dev_name = pdata->name; 1009 mci->dev_name = pdata->name;
998 1010
@@ -1026,7 +1038,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1026 goto err; 1038 goto err;
1027 } 1039 }
1028 1040
1029 debugf3("%s(): init mci\n", __func__); 1041 edac_dbg(3, "init mci\n");
1030 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | 1042 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
1031 MEM_FLAG_DDR | MEM_FLAG_DDR2; 1043 MEM_FLAG_DDR | MEM_FLAG_DDR2;
1032 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 1044 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
@@ -1041,8 +1053,6 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1041 1053
1042 mci->scrub_mode = SCRUB_SW_SRC; 1054 mci->scrub_mode = SCRUB_SW_SRC;
1043 1055
1044 mpc85xx_set_mc_sysfs_attributes(mci);
1045
1046 mpc85xx_init_csrows(mci); 1056 mpc85xx_init_csrows(mci);
1047 1057
1048 /* store the original error disable bits */ 1058 /* store the original error disable bits */
@@ -1054,7 +1064,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1054 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); 1064 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
1055 1065
1056 if (edac_mc_add_mc(mci)) { 1066 if (edac_mc_add_mc(mci)) {
1057 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1067 edac_dbg(3, "failed edac_mc_add_mc()\n");
1068 goto err;
1069 }
1070
1071 if (mpc85xx_create_sysfs_attributes(mci)) {
1072 edac_mc_del_mc(mci->pdev);
1073 edac_dbg(3, "failed edac_mc_add_mc()\n");
1058 goto err; 1074 goto err;
1059 } 1075 }
1060 1076
@@ -1088,7 +1104,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
1088 } 1104 }
1089 1105
1090 devres_remove_group(&op->dev, mpc85xx_mc_err_probe); 1106 devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
1091 debugf3("%s(): success\n", __func__); 1107 edac_dbg(3, "success\n");
1092 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); 1108 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
1093 1109
1094 return 0; 1110 return 0;
@@ -1106,7 +1122,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op)
1106 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); 1122 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
1107 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 1123 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
1108 1124
1109 debugf0("%s()\n", __func__); 1125 edac_dbg(0, "\n");
1110 1126
1111 if (edac_op_state == EDAC_OPSTATE_INT) { 1127 if (edac_op_state == EDAC_OPSTATE_INT) {
1112 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); 1128 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
@@ -1117,6 +1133,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op)
1117 orig_ddr_err_disable); 1133 orig_ddr_err_disable);
1118 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); 1134 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
1119 1135
1136 mpc85xx_remove_sysfs_attributes(mci);
1120 edac_mc_del_mc(&op->dev); 1137 edac_mc_del_mc(&op->dev);
1121 edac_mc_free(mci); 1138 edac_mc_free(mci);
1122 return 0; 1139 return 0;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index b0bb5a3d2527..2b315c2edc3c 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -169,7 +169,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
169 MV64X60_PCIx_ERR_MASK_VAL); 169 MV64X60_PCIx_ERR_MASK_VAL);
170 170
171 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 171 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
172 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 172 edac_dbg(3, "failed edac_pci_add_device()\n");
173 goto err; 173 goto err;
174 } 174 }
175 175
@@ -194,7 +194,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
194 devres_remove_group(&pdev->dev, mv64x60_pci_err_probe); 194 devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
195 195
196 /* get this far and it's successful */ 196 /* get this far and it's successful */
197 debugf3("%s(): success\n", __func__); 197 edac_dbg(3, "success\n");
198 198
199 return 0; 199 return 0;
200 200
@@ -210,7 +210,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev)
210{ 210{
211 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); 211 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
212 212
213 debugf0("%s()\n", __func__); 213 edac_dbg(0, "\n");
214 214
215 edac_pci_del_device(&pdev->dev); 215 edac_pci_del_device(&pdev->dev);
216 216
@@ -336,7 +336,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
336 pdata->edac_idx = edac_dev_idx++; 336 pdata->edac_idx = edac_dev_idx++;
337 337
338 if (edac_device_add_device(edac_dev) > 0) { 338 if (edac_device_add_device(edac_dev) > 0) {
339 debugf3("%s(): failed edac_device_add_device()\n", __func__); 339 edac_dbg(3, "failed edac_device_add_device()\n");
340 goto err; 340 goto err;
341 } 341 }
342 342
@@ -363,7 +363,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
363 devres_remove_group(&pdev->dev, mv64x60_sram_err_probe); 363 devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
364 364
365 /* get this far and it's successful */ 365 /* get this far and it's successful */
366 debugf3("%s(): success\n", __func__); 366 edac_dbg(3, "success\n");
367 367
368 return 0; 368 return 0;
369 369
@@ -379,7 +379,7 @@ static int mv64x60_sram_err_remove(struct platform_device *pdev)
379{ 379{
380 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); 380 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
381 381
382 debugf0("%s()\n", __func__); 382 edac_dbg(0, "\n");
383 383
384 edac_device_del_device(&pdev->dev); 384 edac_device_del_device(&pdev->dev);
385 edac_device_free_ctl_info(edac_dev); 385 edac_device_free_ctl_info(edac_dev);
@@ -531,7 +531,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
531 pdata->edac_idx = edac_dev_idx++; 531 pdata->edac_idx = edac_dev_idx++;
532 532
533 if (edac_device_add_device(edac_dev) > 0) { 533 if (edac_device_add_device(edac_dev) > 0) {
534 debugf3("%s(): failed edac_device_add_device()\n", __func__); 534 edac_dbg(3, "failed edac_device_add_device()\n");
535 goto err; 535 goto err;
536 } 536 }
537 537
@@ -558,7 +558,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
558 devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe); 558 devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
559 559
560 /* get this far and it's successful */ 560 /* get this far and it's successful */
561 debugf3("%s(): success\n", __func__); 561 edac_dbg(3, "success\n");
562 562
563 return 0; 563 return 0;
564 564
@@ -574,7 +574,7 @@ static int mv64x60_cpu_err_remove(struct platform_device *pdev)
574{ 574{
575 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); 575 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
576 576
577 debugf0("%s()\n", __func__); 577 edac_dbg(0, "\n");
578 578
579 edac_device_del_device(&pdev->dev); 579 edac_device_del_device(&pdev->dev);
580 edac_device_free_ctl_info(edac_dev); 580 edac_device_free_ctl_info(edac_dev);
@@ -611,17 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
611 611
612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ 612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
613 if (!(reg & 0x1)) 613 if (!(reg & 0x1))
614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
615 err_addr >> PAGE_SHIFT, 615 err_addr >> PAGE_SHIFT,
616 err_addr & PAGE_MASK, syndrome, 616 err_addr & PAGE_MASK, syndrome,
617 0, 0, -1, 617 0, 0, -1,
618 mci->ctl_name, "", NULL); 618 mci->ctl_name, "");
619 else /* 2 bit error, UE */ 619 else /* 2 bit error, UE */
620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
621 err_addr >> PAGE_SHIFT, 621 err_addr >> PAGE_SHIFT,
622 err_addr & PAGE_MASK, 0, 622 err_addr & PAGE_MASK, 0,
623 0, 0, -1, 623 0, 0, -1,
624 mci->ctl_name, "", NULL); 624 mci->ctl_name, "");
625 625
626 /* clear the error */ 626 /* clear the error */
627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); 627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -670,8 +670,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
670 670
671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); 671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
672 672
673 csrow = &mci->csrows[0]; 673 csrow = mci->csrows[0];
674 dimm = csrow->channels[0].dimm; 674 dimm = csrow->channels[0]->dimm;
675 675
676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT; 676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
677 dimm->grain = 8; 677 dimm->grain = 8;
@@ -724,7 +724,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
724 } 724 }
725 725
726 pdata = mci->pvt_info; 726 pdata = mci->pvt_info;
727 mci->dev = &pdev->dev; 727 mci->pdev = &pdev->dev;
728 platform_set_drvdata(pdev, mci); 728 platform_set_drvdata(pdev, mci);
729 pdata->name = "mv64x60_mc_err"; 729 pdata->name = "mv64x60_mc_err";
730 pdata->irq = NO_IRQ; 730 pdata->irq = NO_IRQ;
@@ -766,7 +766,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
766 goto err2; 766 goto err2;
767 } 767 }
768 768
769 debugf3("%s(): init mci\n", __func__); 769 edac_dbg(3, "init mci\n");
770 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 770 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
771 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 771 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
772 mci->edac_cap = EDAC_FLAG_SECDED; 772 mci->edac_cap = EDAC_FLAG_SECDED;
@@ -790,7 +790,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); 790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
791 791
792 if (edac_mc_add_mc(mci)) { 792 if (edac_mc_add_mc(mci)) {
793 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 793 edac_dbg(3, "failed edac_mc_add_mc()\n");
794 goto err; 794 goto err;
795 } 795 }
796 796
@@ -815,7 +815,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
815 } 815 }
816 816
817 /* get this far and it's successful */ 817 /* get this far and it's successful */
818 debugf3("%s(): success\n", __func__); 818 edac_dbg(3, "success\n");
819 819
820 return 0; 820 return 0;
821 821
@@ -831,7 +831,7 @@ static int mv64x60_mc_err_remove(struct platform_device *pdev)
831{ 831{
832 struct mem_ctl_info *mci = platform_get_drvdata(pdev); 832 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
833 833
834 debugf0("%s()\n", __func__); 834 edac_dbg(0, "\n");
835 835
836 edac_mc_del_mc(&pdev->dev); 836 edac_mc_del_mc(&pdev->dev);
837 edac_mc_free(mci); 837 edac_mc_free(mci);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index b095a906a994..2d35b78ada3c 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -74,7 +74,7 @@ static int system_mmc_id;
74 74
75static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) 75static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
76{ 76{
77 struct pci_dev *pdev = to_pci_dev(mci->dev); 77 struct pci_dev *pdev = to_pci_dev(mci->pdev);
78 u32 tmp; 78 u32 tmp;
79 79
80 pci_read_config_dword(pdev, MCDEBUG_ERRSTA, 80 pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
@@ -95,7 +95,7 @@ static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
95 95
96static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) 96static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
97{ 97{
98 struct pci_dev *pdev = to_pci_dev(mci->dev); 98 struct pci_dev *pdev = to_pci_dev(mci->pdev);
99 u32 errlog1a; 99 u32 errlog1a;
100 u32 cs; 100 u32 cs;
101 101
@@ -110,16 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
110 /* uncorrectable/multi-bit errors */ 110 /* uncorrectable/multi-bit errors */
111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | 111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
112 MCDEBUG_ERRSTA_RFL_STATUS)) { 112 MCDEBUG_ERRSTA_RFL_STATUS)) {
113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
114 mci->csrows[cs].first_page, 0, 0, 114 mci->csrows[cs]->first_page, 0, 0,
115 cs, 0, -1, mci->ctl_name, "", NULL); 115 cs, 0, -1, mci->ctl_name, "");
116 } 116 }
117 117
118 /* correctable/single-bit errors */ 118 /* correctable/single-bit errors */
119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) 119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
121 mci->csrows[cs].first_page, 0, 0, 121 mci->csrows[cs]->first_page, 0, 0,
122 cs, 0, -1, mci->ctl_name, "", NULL); 122 cs, 0, -1, mci->ctl_name, "");
123} 123}
124 124
125static void pasemi_edac_check(struct mem_ctl_info *mci) 125static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -141,8 +141,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
141 int index; 141 int index;
142 142
143 for (index = 0; index < mci->nr_csrows; index++) { 143 for (index = 0; index < mci->nr_csrows; index++) {
144 csrow = &mci->csrows[index]; 144 csrow = mci->csrows[index];
145 dimm = csrow->channels[0].dimm; 145 dimm = csrow->channels[0]->dimm;
146 146
147 pci_read_config_dword(pdev, 147 pci_read_config_dword(pdev,
148 MCDRAM_RANKCFG + (index * 12), 148 MCDRAM_RANKCFG + (index * 12),
@@ -225,7 +225,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
225 MCCFG_ERRCOR_ECC_GEN_EN | 225 MCCFG_ERRCOR_ECC_GEN_EN |
226 MCCFG_ERRCOR_ECC_CRR_EN; 226 MCCFG_ERRCOR_ECC_CRR_EN;
227 227
228 mci->dev = &pdev->dev; 228 mci->pdev = &pdev->dev;
229 mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR; 229 mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
230 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 230 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
231 mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ? 231 mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index f3f9fed06ad7..bf0957635991 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,10 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
727 727
728 for (row = 0; row < mci->nr_csrows; row++) 728 for (row = 0; row < mci->nr_csrows; row++)
729 if (ppc4xx_edac_check_bank_error(status, row)) 729 if (ppc4xx_edac_check_bank_error(status, row))
730 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 730 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
731 0, 0, 0, 731 0, 0, 0,
732 row, 0, -1, 732 row, 0, -1,
733 message, "", NULL); 733 message, "");
734} 734}
735 735
736/** 736/**
@@ -758,10 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
758 758
759 for (row = 0; row < mci->nr_csrows; row++) 759 for (row = 0; row < mci->nr_csrows; row++)
760 if (ppc4xx_edac_check_bank_error(status, row)) 760 if (ppc4xx_edac_check_bank_error(status, row))
761 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 761 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
762 page, offset, 0, 762 page, offset, 0,
763 row, 0, -1, 763 row, 0, -1,
764 message, "", NULL); 764 message, "");
765} 765}
766 766
767/** 767/**
@@ -1027,9 +1027,9 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
1027 1027
1028 /* Initial driver pointers and private data */ 1028 /* Initial driver pointers and private data */
1029 1029
1030 mci->dev = &op->dev; 1030 mci->pdev = &op->dev;
1031 1031
1032 dev_set_drvdata(mci->dev, mci); 1032 dev_set_drvdata(mci->pdev, mci);
1033 1033
1034 pdata = mci->pvt_info; 1034 pdata = mci->pvt_info;
1035 1035
@@ -1334,7 +1334,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
1334 return 0; 1334 return 0;
1335 1335
1336 fail1: 1336 fail1:
1337 edac_mc_del_mc(mci->dev); 1337 edac_mc_del_mc(mci->pdev);
1338 1338
1339 fail: 1339 fail:
1340 edac_mc_free(mci); 1340 edac_mc_free(mci);
@@ -1368,7 +1368,7 @@ ppc4xx_edac_remove(struct platform_device *op)
1368 1368
1369 dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN); 1369 dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
1370 1370
1371 edac_mc_del_mc(mci->dev); 1371 edac_mc_del_mc(mci->pdev);
1372 edac_mc_free(mci); 1372 edac_mc_free(mci);
1373 1373
1374 return 0; 1374 return 0;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e1cacd164f31..f854debd5533 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -140,7 +140,7 @@ static void r82600_get_error_info(struct mem_ctl_info *mci,
140{ 140{
141 struct pci_dev *pdev; 141 struct pci_dev *pdev;
142 142
143 pdev = to_pci_dev(mci->dev); 143 pdev = to_pci_dev(mci->pdev);
144 pci_read_config_dword(pdev, R82600_EAP, &info->eapr); 144 pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
145 145
146 if (info->eapr & BIT(0)) 146 if (info->eapr & BIT(0))
@@ -179,11 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
179 error_found = 1; 179 error_found = 1;
180 180
181 if (handle_errors) 181 if (handle_errors)
182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
183 page, 0, syndrome, 183 page, 0, syndrome,
184 edac_mc_find_csrow_by_page(mci, page), 184 edac_mc_find_csrow_by_page(mci, page),
185 0, -1, 185 0, -1,
186 mci->ctl_name, "", NULL); 186 mci->ctl_name, "");
187 } 187 }
188 188
189 if (info->eapr & BIT(1)) { /* UE? */ 189 if (info->eapr & BIT(1)) { /* UE? */
@@ -191,11 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
191 191
192 if (handle_errors) 192 if (handle_errors)
193 /* 82600 doesn't give enough info */ 193 /* 82600 doesn't give enough info */
194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
195 page, 0, 0, 195 page, 0, 0,
196 edac_mc_find_csrow_by_page(mci, page), 196 edac_mc_find_csrow_by_page(mci, page),
197 0, -1, 197 0, -1,
198 mci->ctl_name, "", NULL); 198 mci->ctl_name, "");
199 } 199 }
200 200
201 return error_found; 201 return error_found;
@@ -205,7 +205,7 @@ static void r82600_check(struct mem_ctl_info *mci)
205{ 205{
206 struct r82600_error_info info; 206 struct r82600_error_info info;
207 207
208 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 208 edac_dbg(1, "MC%d\n", mci->mc_idx);
209 r82600_get_error_info(mci, &info); 209 r82600_get_error_info(mci, &info);
210 r82600_process_error_info(mci, &info, 1); 210 r82600_process_error_info(mci, &info, 1);
211} 211}
@@ -230,19 +230,19 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
230 row_high_limit_last = 0; 230 row_high_limit_last = 0;
231 231
232 for (index = 0; index < mci->nr_csrows; index++) { 232 for (index = 0; index < mci->nr_csrows; index++) {
233 csrow = &mci->csrows[index]; 233 csrow = mci->csrows[index];
234 dimm = csrow->channels[0].dimm; 234 dimm = csrow->channels[0]->dimm;
235 235
236 /* find the DRAM Chip Select Base address and mask */ 236 /* find the DRAM Chip Select Base address and mask */
237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); 237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
238 238
239 debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); 239 edac_dbg(1, "Row=%d DRBA = %#0x\n", index, drbar);
240 240
241 row_high_limit = ((u32) drbar << 24); 241 row_high_limit = ((u32) drbar << 24);
242/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ 242/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
243 243
244 debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n", 244 edac_dbg(1, "Row=%d, Boundary Address=%#0x, Last = %#0x\n",
245 __func__, index, row_high_limit, row_high_limit_last); 245 index, row_high_limit, row_high_limit_last);
246 246
247 /* Empty row [p.57] */ 247 /* Empty row [p.57] */
248 if (row_high_limit == row_high_limit_last) 248 if (row_high_limit == row_high_limit_last)
@@ -277,14 +277,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
277 u32 sdram_refresh_rate; 277 u32 sdram_refresh_rate;
278 struct r82600_error_info discard; 278 struct r82600_error_info discard;
279 279
280 debugf0("%s()\n", __func__); 280 edac_dbg(0, "\n");
281 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); 281 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
282 pci_read_config_dword(pdev, R82600_EAP, &eapr); 282 pci_read_config_dword(pdev, R82600_EAP, &eapr);
283 scrub_disabled = eapr & BIT(31); 283 scrub_disabled = eapr & BIT(31);
284 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); 284 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
285 debugf2("%s(): sdram refresh rate = %#0x\n", __func__, 285 edac_dbg(2, "sdram refresh rate = %#0x\n", sdram_refresh_rate);
286 sdram_refresh_rate); 286 edac_dbg(2, "DRAMC register = %#0x\n", dramcr);
287 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 287 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
289 layers[0].size = R82600_NR_CSROWS; 288 layers[0].size = R82600_NR_CSROWS;
290 layers[0].is_virt_csrow = true; 289 layers[0].is_virt_csrow = true;
@@ -295,8 +294,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
295 if (mci == NULL) 294 if (mci == NULL)
296 return -ENOMEM; 295 return -ENOMEM;
297 296
298 debugf0("%s(): mci = %p\n", __func__, mci); 297 edac_dbg(0, "mci = %p\n", mci);
299 mci->dev = &pdev->dev; 298 mci->pdev = &pdev->dev;
300 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 299 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
301 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 300 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
302 /* FIXME try to work out if the chip leads have been used for COM2 301 /* FIXME try to work out if the chip leads have been used for COM2
@@ -311,8 +310,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
311 310
312 if (ecc_enabled(dramcr)) { 311 if (ecc_enabled(dramcr)) {
313 if (scrub_disabled) 312 if (scrub_disabled)
314 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " 313 edac_dbg(3, "mci = %p - Scrubbing disabled! EAP: %#0x\n",
315 "%#0x\n", __func__, mci, eapr); 314 mci, eapr);
316 } else 315 } else
317 mci->edac_cap = EDAC_FLAG_NONE; 316 mci->edac_cap = EDAC_FLAG_NONE;
318 317
@@ -329,15 +328,14 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
329 * type of memory controller. The ID is therefore hardcoded to 0. 328 * type of memory controller. The ID is therefore hardcoded to 0.
330 */ 329 */
331 if (edac_mc_add_mc(mci)) { 330 if (edac_mc_add_mc(mci)) {
332 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 331 edac_dbg(3, "failed edac_mc_add_mc()\n");
333 goto fail; 332 goto fail;
334 } 333 }
335 334
336 /* get this far and it's successful */ 335 /* get this far and it's successful */
337 336
338 if (disable_hardware_scrub) { 337 if (disable_hardware_scrub) {
339 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", 338 edac_dbg(3, "Disabling Hardware Scrub (scrub on error)\n");
340 __func__);
341 pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); 339 pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
342 } 340 }
343 341
@@ -352,7 +350,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
352 __func__); 350 __func__);
353 } 351 }
354 352
355 debugf3("%s(): success\n", __func__); 353 edac_dbg(3, "success\n");
356 return 0; 354 return 0;
357 355
358fail: 356fail:
@@ -364,7 +362,7 @@ fail:
364static int __devinit r82600_init_one(struct pci_dev *pdev, 362static int __devinit r82600_init_one(struct pci_dev *pdev,
365 const struct pci_device_id *ent) 363 const struct pci_device_id *ent)
366{ 364{
367 debugf0("%s()\n", __func__); 365 edac_dbg(0, "\n");
368 366
369 /* don't need to call pci_enable_device() */ 367 /* don't need to call pci_enable_device() */
370 return r82600_probe1(pdev, ent->driver_data); 368 return r82600_probe1(pdev, ent->driver_data);
@@ -374,7 +372,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
374{ 372{
375 struct mem_ctl_info *mci; 373 struct mem_ctl_info *mci;
376 374
377 debugf0("%s()\n", __func__); 375 edac_dbg(0, "\n");
378 376
379 if (r82600_pci) 377 if (r82600_pci)
380 edac_pci_release_generic_ctl(r82600_pci); 378 edac_pci_release_generic_ctl(r82600_pci);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 36ad17e79d61..f3b1f9fafa4b 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -381,8 +381,8 @@ static inline int numrank(u32 mtr)
381 int ranks = (1 << RANK_CNT_BITS(mtr)); 381 int ranks = (1 << RANK_CNT_BITS(mtr));
382 382
383 if (ranks > 4) { 383 if (ranks > 4) {
384 debugf0("Invalid number of ranks: %d (max = 4) raw value = %x (%04x)", 384 edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n",
385 ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr); 385 ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
386 return -EINVAL; 386 return -EINVAL;
387 } 387 }
388 388
@@ -394,8 +394,8 @@ static inline int numrow(u32 mtr)
394 int rows = (RANK_WIDTH_BITS(mtr) + 12); 394 int rows = (RANK_WIDTH_BITS(mtr) + 12);
395 395
396 if (rows < 13 || rows > 18) { 396 if (rows < 13 || rows > 18) {
397 debugf0("Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)", 397 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
398 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); 398 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
399 return -EINVAL; 399 return -EINVAL;
400 } 400 }
401 401
@@ -407,8 +407,8 @@ static inline int numcol(u32 mtr)
407 int cols = (COL_WIDTH_BITS(mtr) + 10); 407 int cols = (COL_WIDTH_BITS(mtr) + 10);
408 408
409 if (cols > 12) { 409 if (cols > 12) {
410 debugf0("Invalid number of cols: %d (max = 4) raw value = %x (%04x)", 410 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
411 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); 411 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
412 return -EINVAL; 412 return -EINVAL;
413 } 413 }
414 414
@@ -475,8 +475,8 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
475 475
476 if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot && 476 if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
477 PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) { 477 PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
478 debugf1("Associated %02x.%02x.%d with %p\n", 478 edac_dbg(1, "Associated %02x.%02x.%d with %p\n",
479 bus, slot, func, sbridge_dev->pdev[i]); 479 bus, slot, func, sbridge_dev->pdev[i]);
480 return sbridge_dev->pdev[i]; 480 return sbridge_dev->pdev[i];
481 } 481 }
482 } 482 }
@@ -523,45 +523,45 @@ static int get_dimm_config(struct mem_ctl_info *mci)
523 523
524 pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg); 524 pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg);
525 pvt->sbridge_dev->node_id = NODE_ID(reg); 525 pvt->sbridge_dev->node_id = NODE_ID(reg);
526 debugf0("mc#%d: Node ID: %d, source ID: %d\n", 526 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
527 pvt->sbridge_dev->mc, 527 pvt->sbridge_dev->mc,
528 pvt->sbridge_dev->node_id, 528 pvt->sbridge_dev->node_id,
529 pvt->sbridge_dev->source_id); 529 pvt->sbridge_dev->source_id);
530 530
531 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg); 531 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
532 if (IS_MIRROR_ENABLED(reg)) { 532 if (IS_MIRROR_ENABLED(reg)) {
533 debugf0("Memory mirror is enabled\n"); 533 edac_dbg(0, "Memory mirror is enabled\n");
534 pvt->is_mirrored = true; 534 pvt->is_mirrored = true;
535 } else { 535 } else {
536 debugf0("Memory mirror is disabled\n"); 536 edac_dbg(0, "Memory mirror is disabled\n");
537 pvt->is_mirrored = false; 537 pvt->is_mirrored = false;
538 } 538 }
539 539
540 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); 540 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
541 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { 541 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
542 debugf0("Lockstep is enabled\n"); 542 edac_dbg(0, "Lockstep is enabled\n");
543 mode = EDAC_S8ECD8ED; 543 mode = EDAC_S8ECD8ED;
544 pvt->is_lockstep = true; 544 pvt->is_lockstep = true;
545 } else { 545 } else {
546 debugf0("Lockstep is disabled\n"); 546 edac_dbg(0, "Lockstep is disabled\n");
547 mode = EDAC_S4ECD4ED; 547 mode = EDAC_S4ECD4ED;
548 pvt->is_lockstep = false; 548 pvt->is_lockstep = false;
549 } 549 }
550 if (IS_CLOSE_PG(pvt->info.mcmtr)) { 550 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
551 debugf0("address map is on closed page mode\n"); 551 edac_dbg(0, "address map is on closed page mode\n");
552 pvt->is_close_pg = true; 552 pvt->is_close_pg = true;
553 } else { 553 } else {
554 debugf0("address map is on open page mode\n"); 554 edac_dbg(0, "address map is on open page mode\n");
555 pvt->is_close_pg = false; 555 pvt->is_close_pg = false;
556 } 556 }
557 557
558 pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg); 558 pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
559 if (IS_RDIMM_ENABLED(reg)) { 559 if (IS_RDIMM_ENABLED(reg)) {
560 /* FIXME: Can also be LRDIMM */ 560 /* FIXME: Can also be LRDIMM */
561 debugf0("Memory is registered\n"); 561 edac_dbg(0, "Memory is registered\n");
562 mtype = MEM_RDDR3; 562 mtype = MEM_RDDR3;
563 } else { 563 } else {
564 debugf0("Memory is unregistered\n"); 564 edac_dbg(0, "Memory is unregistered\n");
565 mtype = MEM_DDR3; 565 mtype = MEM_DDR3;
566 } 566 }
567 567
@@ -576,7 +576,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
576 i, j, 0); 576 i, j, 0);
577 pci_read_config_dword(pvt->pci_tad[i], 577 pci_read_config_dword(pvt->pci_tad[i],
578 mtr_regs[j], &mtr); 578 mtr_regs[j], &mtr);
579 debugf4("Channel #%d MTR%d = %x\n", i, j, mtr); 579 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
580 if (IS_DIMM_PRESENT(mtr)) { 580 if (IS_DIMM_PRESENT(mtr)) {
581 pvt->channel[i].dimms++; 581 pvt->channel[i].dimms++;
582 582
@@ -588,10 +588,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
588 size = (rows * cols * banks * ranks) >> (20 - 3); 588 size = (rows * cols * banks * ranks) >> (20 - 3);
589 npages = MiB_TO_PAGES(size); 589 npages = MiB_TO_PAGES(size);
590 590
591 debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 591 edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
592 pvt->sbridge_dev->mc, i, j, 592 pvt->sbridge_dev->mc, i, j,
593 size, npages, 593 size, npages,
594 banks, ranks, rows, cols); 594 banks, ranks, rows, cols);
595 595
596 dimm->nr_pages = npages; 596 dimm->nr_pages = npages;
597 dimm->grain = 32; 597 dimm->grain = 32;
@@ -629,8 +629,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
629 tmp_mb = (1 + pvt->tolm) >> 20; 629 tmp_mb = (1 + pvt->tolm) >> 20;
630 630
631 mb = div_u64_rem(tmp_mb, 1000, &kb); 631 mb = div_u64_rem(tmp_mb, 1000, &kb);
632 debugf0("TOLM: %u.%03u GB (0x%016Lx)\n", 632 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
633 mb, kb, (u64)pvt->tolm);
634 633
635 /* Address range is already 45:25 */ 634 /* Address range is already 45:25 */
636 pci_read_config_dword(pvt->pci_sad1, TOHM, 635 pci_read_config_dword(pvt->pci_sad1, TOHM,
@@ -639,8 +638,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
639 tmp_mb = (1 + pvt->tohm) >> 20; 638 tmp_mb = (1 + pvt->tohm) >> 20;
640 639
641 mb = div_u64_rem(tmp_mb, 1000, &kb); 640 mb = div_u64_rem(tmp_mb, 1000, &kb);
642 debugf0("TOHM: %u.%03u GB (0x%016Lx)", 641 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm);
643 mb, kb, (u64)pvt->tohm);
644 642
645 /* 643 /*
646 * Step 2) Get SAD range and SAD Interleave list 644 * Step 2) Get SAD range and SAD Interleave list
@@ -663,13 +661,13 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
663 661
664 tmp_mb = (limit + 1) >> 20; 662 tmp_mb = (limit + 1) >> 20;
665 mb = div_u64_rem(tmp_mb, 1000, &kb); 663 mb = div_u64_rem(tmp_mb, 1000, &kb);
666 debugf0("SAD#%d %s up to %u.%03u GB (0x%016Lx) %s reg=0x%08x\n", 664 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
667 n_sads, 665 n_sads,
668 get_dram_attr(reg), 666 get_dram_attr(reg),
669 mb, kb, 667 mb, kb,
670 ((u64)tmp_mb) << 20L, 668 ((u64)tmp_mb) << 20L,
671 INTERLEAVE_MODE(reg) ? "Interleave: 8:6" : "Interleave: [8:6]XOR[18:16]", 669 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
672 reg); 670 reg);
673 prv = limit; 671 prv = limit;
674 672
675 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], 673 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -679,8 +677,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
679 if (j > 0 && sad_interl == sad_pkg(reg, j)) 677 if (j > 0 && sad_interl == sad_pkg(reg, j))
680 break; 678 break;
681 679
682 debugf0("SAD#%d, interleave #%d: %d\n", 680 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
683 n_sads, j, sad_pkg(reg, j)); 681 n_sads, j, sad_pkg(reg, j));
684 } 682 }
685 } 683 }
686 684
@@ -697,16 +695,16 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
697 tmp_mb = (limit + 1) >> 20; 695 tmp_mb = (limit + 1) >> 20;
698 696
699 mb = div_u64_rem(tmp_mb, 1000, &kb); 697 mb = div_u64_rem(tmp_mb, 1000, &kb);
700 debugf0("TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 698 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
701 n_tads, mb, kb, 699 n_tads, mb, kb,
702 ((u64)tmp_mb) << 20L, 700 ((u64)tmp_mb) << 20L,
703 (u32)TAD_SOCK(reg), 701 (u32)TAD_SOCK(reg),
704 (u32)TAD_CH(reg), 702 (u32)TAD_CH(reg),
705 (u32)TAD_TGT0(reg), 703 (u32)TAD_TGT0(reg),
706 (u32)TAD_TGT1(reg), 704 (u32)TAD_TGT1(reg),
707 (u32)TAD_TGT2(reg), 705 (u32)TAD_TGT2(reg),
708 (u32)TAD_TGT3(reg), 706 (u32)TAD_TGT3(reg),
709 reg); 707 reg);
710 prv = limit; 708 prv = limit;
711 } 709 }
712 710
@@ -722,11 +720,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
722 &reg); 720 &reg);
723 tmp_mb = TAD_OFFSET(reg) >> 20; 721 tmp_mb = TAD_OFFSET(reg) >> 20;
724 mb = div_u64_rem(tmp_mb, 1000, &kb); 722 mb = div_u64_rem(tmp_mb, 1000, &kb);
725 debugf0("TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 723 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
726 i, j, 724 i, j,
727 mb, kb, 725 mb, kb,
728 ((u64)tmp_mb) << 20L, 726 ((u64)tmp_mb) << 20L,
729 reg); 727 reg);
730 } 728 }
731 } 729 }
732 730
@@ -747,12 +745,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
747 tmp_mb = RIR_LIMIT(reg) >> 20; 745 tmp_mb = RIR_LIMIT(reg) >> 20;
748 rir_way = 1 << RIR_WAY(reg); 746 rir_way = 1 << RIR_WAY(reg);
749 mb = div_u64_rem(tmp_mb, 1000, &kb); 747 mb = div_u64_rem(tmp_mb, 1000, &kb);
750 debugf0("CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 748 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
751 i, j, 749 i, j,
752 mb, kb, 750 mb, kb,
753 ((u64)tmp_mb) << 20L, 751 ((u64)tmp_mb) << 20L,
754 rir_way, 752 rir_way,
755 reg); 753 reg);
756 754
757 for (k = 0; k < rir_way; k++) { 755 for (k = 0; k < rir_way; k++) {
758 pci_read_config_dword(pvt->pci_tad[i], 756 pci_read_config_dword(pvt->pci_tad[i],
@@ -761,12 +759,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
761 tmp_mb = RIR_OFFSET(reg) << 6; 759 tmp_mb = RIR_OFFSET(reg) << 6;
762 760
763 mb = div_u64_rem(tmp_mb, 1000, &kb); 761 mb = div_u64_rem(tmp_mb, 1000, &kb);
764 debugf0("CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 762 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
765 i, j, k, 763 i, j, k,
766 mb, kb, 764 mb, kb,
767 ((u64)tmp_mb) << 20L, 765 ((u64)tmp_mb) << 20L,
768 (u32)RIR_RNK_TGT(reg), 766 (u32)RIR_RNK_TGT(reg),
769 reg); 767 reg);
770 } 768 }
771 } 769 }
772 } 770 }
@@ -853,16 +851,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
853 if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way)) 851 if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
854 break; 852 break;
855 sad_interleave[sad_way] = sad_pkg(reg, sad_way); 853 sad_interleave[sad_way] = sad_pkg(reg, sad_way);
856 debugf0("SAD interleave #%d: %d\n", 854 edac_dbg(0, "SAD interleave #%d: %d\n",
857 sad_way, sad_interleave[sad_way]); 855 sad_way, sad_interleave[sad_way]);
858 } 856 }
859 debugf0("mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", 857 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
860 pvt->sbridge_dev->mc, 858 pvt->sbridge_dev->mc,
861 n_sads, 859 n_sads,
862 addr, 860 addr,
863 limit, 861 limit,
864 sad_way + 7, 862 sad_way + 7,
865 interleave_mode ? "" : "XOR[18:16]"); 863 interleave_mode ? "" : "XOR[18:16]");
866 if (interleave_mode) 864 if (interleave_mode)
867 idx = ((addr >> 6) ^ (addr >> 16)) & 7; 865 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
868 else 866 else
@@ -884,8 +882,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
884 return -EINVAL; 882 return -EINVAL;
885 } 883 }
886 *socket = sad_interleave[idx]; 884 *socket = sad_interleave[idx];
887 debugf0("SAD interleave index: %d (wayness %d) = CPU socket %d\n", 885 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
888 idx, sad_way, *socket); 886 idx, sad_way, *socket);
889 887
890 /* 888 /*
891 * Move to the proper node structure, in order to access the 889 * Move to the proper node structure, in order to access the
@@ -972,16 +970,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
972 970
973 offset = TAD_OFFSET(tad_offset); 971 offset = TAD_OFFSET(tad_offset);
974 972
975 debugf0("TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", 973 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
976 n_tads, 974 n_tads,
977 addr, 975 addr,
978 limit, 976 limit,
979 (u32)TAD_SOCK(reg), 977 (u32)TAD_SOCK(reg),
980 ch_way, 978 ch_way,
981 offset, 979 offset,
982 idx, 980 idx,
983 base_ch, 981 base_ch,
984 *channel_mask); 982 *channel_mask);
985 983
986 /* Calculate channel address */ 984 /* Calculate channel address */
987 /* Remove the TAD offset */ 985 /* Remove the TAD offset */
@@ -1017,11 +1015,11 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1017 1015
1018 limit = RIR_LIMIT(reg); 1016 limit = RIR_LIMIT(reg);
1019 mb = div_u64_rem(limit >> 20, 1000, &kb); 1017 mb = div_u64_rem(limit >> 20, 1000, &kb);
1020 debugf0("RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 1018 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1021 n_rir, 1019 n_rir,
1022 mb, kb, 1020 mb, kb,
1023 limit, 1021 limit,
1024 1 << RIR_WAY(reg)); 1022 1 << RIR_WAY(reg));
1025 if (ch_addr <= limit) 1023 if (ch_addr <= limit)
1026 break; 1024 break;
1027 } 1025 }
@@ -1042,12 +1040,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1042 &reg); 1040 &reg);
1043 *rank = RIR_RNK_TGT(reg); 1041 *rank = RIR_RNK_TGT(reg);
1044 1042
1045 debugf0("RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 1043 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
1046 n_rir, 1044 n_rir,
1047 ch_addr, 1045 ch_addr,
1048 limit, 1046 limit,
1049 rir_way, 1047 rir_way,
1050 idx); 1048 idx);
1051 1049
1052 return 0; 1050 return 0;
1053} 1051}
@@ -1064,14 +1062,14 @@ static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
1064{ 1062{
1065 int i; 1063 int i;
1066 1064
1067 debugf0(__FILE__ ": %s()\n", __func__); 1065 edac_dbg(0, "\n");
1068 for (i = 0; i < sbridge_dev->n_devs; i++) { 1066 for (i = 0; i < sbridge_dev->n_devs; i++) {
1069 struct pci_dev *pdev = sbridge_dev->pdev[i]; 1067 struct pci_dev *pdev = sbridge_dev->pdev[i];
1070 if (!pdev) 1068 if (!pdev)
1071 continue; 1069 continue;
1072 debugf0("Removing dev %02x:%02x.%d\n", 1070 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1073 pdev->bus->number, 1071 pdev->bus->number,
1074 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1072 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1075 pci_dev_put(pdev); 1073 pci_dev_put(pdev);
1076 } 1074 }
1077} 1075}
@@ -1177,10 +1175,9 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
1177 return -ENODEV; 1175 return -ENODEV;
1178 } 1176 }
1179 1177
1180 debugf0("Detected dev %02x:%d.%d PCI ID %04x:%04x\n", 1178 edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
1181 bus, dev_descr->dev, 1179 bus, dev_descr->dev, dev_descr->func,
1182 dev_descr->func, 1180 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1183 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1184 1181
1185 /* 1182 /*
1186 * As stated on drivers/pci/search.c, the reference count for 1183 * As stated on drivers/pci/search.c, the reference count for
@@ -1297,10 +1294,10 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
1297 goto error; 1294 goto error;
1298 } 1295 }
1299 1296
1300 debugf0("Associated PCI %02x.%02d.%d with dev = %p\n", 1297 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1301 sbridge_dev->bus, 1298 sbridge_dev->bus,
1302 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1299 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1303 pdev); 1300 pdev);
1304 } 1301 }
1305 1302
1306 /* Check if everything were registered */ 1303 /* Check if everything were registered */
@@ -1435,8 +1432,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1435 * to the group of dimm's where the error may be happening. 1432 * to the group of dimm's where the error may be happening.
1436 */ 1433 */
1437 snprintf(msg, sizeof(msg), 1434 snprintf(msg, sizeof(msg),
1438 "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", 1435 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1439 core_err_cnt,
1440 overflow ? " OVERFLOW" : "", 1436 overflow ? " OVERFLOW" : "",
1441 (uncorrected_error && recoverable) ? " recoverable" : "", 1437 (uncorrected_error && recoverable) ? " recoverable" : "",
1442 area_type, 1438 area_type,
@@ -1445,20 +1441,20 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1445 channel_mask, 1441 channel_mask,
1446 rank); 1442 rank);
1447 1443
1448 debugf0("%s", msg); 1444 edac_dbg(0, "%s\n", msg);
1449 1445
1450 /* FIXME: need support for channel mask */ 1446 /* FIXME: need support for channel mask */
1451 1447
1452 /* Call the helper to output message */ 1448 /* Call the helper to output message */
1453 edac_mc_handle_error(tp_event, mci, 1449 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1454 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 1450 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1455 channel, dimm, -1, 1451 channel, dimm, -1,
1456 optype, msg, m); 1452 optype, msg);
1457 return; 1453 return;
1458err_parsing: 1454err_parsing:
1459 edac_mc_handle_error(tp_event, mci, 0, 0, 0, 1455 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
1460 -1, -1, -1, 1456 -1, -1, -1,
1461 msg, "", m); 1457 msg, "");
1462 1458
1463} 1459}
1464 1460
@@ -1592,8 +1588,7 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1592 struct sbridge_pvt *pvt; 1588 struct sbridge_pvt *pvt;
1593 1589
1594 if (unlikely(!mci || !mci->pvt_info)) { 1590 if (unlikely(!mci || !mci->pvt_info)) {
1595 debugf0("MC: " __FILE__ ": %s(): dev = %p\n", 1591 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
1596 __func__, &sbridge_dev->pdev[0]->dev);
1597 1592
1598 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); 1593 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
1599 return; 1594 return;
@@ -1601,13 +1596,13 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1601 1596
1602 pvt = mci->pvt_info; 1597 pvt = mci->pvt_info;
1603 1598
1604 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 1599 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1605 __func__, mci, &sbridge_dev->pdev[0]->dev); 1600 mci, &sbridge_dev->pdev[0]->dev);
1606 1601
1607 /* Remove MC sysfs nodes */ 1602 /* Remove MC sysfs nodes */
1608 edac_mc_del_mc(mci->dev); 1603 edac_mc_del_mc(mci->pdev);
1609 1604
1610 debugf1("%s: free mci struct\n", mci->ctl_name); 1605 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1611 kfree(mci->ctl_name); 1606 kfree(mci->ctl_name);
1612 edac_mc_free(mci); 1607 edac_mc_free(mci);
1613 sbridge_dev->mci = NULL; 1608 sbridge_dev->mci = NULL;
@@ -1638,8 +1633,8 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1638 if (unlikely(!mci)) 1633 if (unlikely(!mci))
1639 return -ENOMEM; 1634 return -ENOMEM;
1640 1635
1641 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 1636 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1642 __func__, mci, &sbridge_dev->pdev[0]->dev); 1637 mci, &sbridge_dev->pdev[0]->dev);
1643 1638
1644 pvt = mci->pvt_info; 1639 pvt = mci->pvt_info;
1645 memset(pvt, 0, sizeof(*pvt)); 1640 memset(pvt, 0, sizeof(*pvt));
@@ -1670,12 +1665,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1670 get_memory_layout(mci); 1665 get_memory_layout(mci);
1671 1666
1672 /* record ptr to the generic device */ 1667 /* record ptr to the generic device */
1673 mci->dev = &sbridge_dev->pdev[0]->dev; 1668 mci->pdev = &sbridge_dev->pdev[0]->dev;
1674 1669
1675 /* add this new MC control structure to EDAC's list of MCs */ 1670 /* add this new MC control structure to EDAC's list of MCs */
1676 if (unlikely(edac_mc_add_mc(mci))) { 1671 if (unlikely(edac_mc_add_mc(mci))) {
1677 debugf0("MC: " __FILE__ 1672 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1678 ": %s(): failed edac_mc_add_mc()\n", __func__);
1679 rc = -EINVAL; 1673 rc = -EINVAL;
1680 goto fail0; 1674 goto fail0;
1681 } 1675 }
@@ -1722,7 +1716,8 @@ static int __devinit sbridge_probe(struct pci_dev *pdev,
1722 mc = 0; 1716 mc = 0;
1723 1717
1724 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 1718 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1725 debugf0("Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc); 1719 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
1720 mc, mc + 1, num_mc);
1726 sbridge_dev->mc = mc++; 1721 sbridge_dev->mc = mc++;
1727 rc = sbridge_register_mci(sbridge_dev); 1722 rc = sbridge_register_mci(sbridge_dev);
1728 if (unlikely(rc < 0)) 1723 if (unlikely(rc < 0))
@@ -1752,7 +1747,7 @@ static void __devexit sbridge_remove(struct pci_dev *pdev)
1752{ 1747{
1753 struct sbridge_dev *sbridge_dev; 1748 struct sbridge_dev *sbridge_dev;
1754 1749
1755 debugf0(__FILE__ ": %s()\n", __func__); 1750 edac_dbg(0, "\n");
1756 1751
1757 /* 1752 /*
1758 * we have a trouble here: pdev value for removal will be wrong, since 1753 * we have a trouble here: pdev value for removal will be wrong, since
@@ -1801,7 +1796,7 @@ static int __init sbridge_init(void)
1801{ 1796{
1802 int pci_rc; 1797 int pci_rc;
1803 1798
1804 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1799 edac_dbg(2, "\n");
1805 1800
1806 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1801 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1807 opstate_init(); 1802 opstate_init();
@@ -1825,7 +1820,7 @@ static int __init sbridge_init(void)
1825 */ 1820 */
1826static void __exit sbridge_exit(void) 1821static void __exit sbridge_exit(void)
1827{ 1822{
1828 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1823 edac_dbg(2, "\n");
1829 pci_unregister_driver(&sbridge_driver); 1824 pci_unregister_driver(&sbridge_driver);
1830 mce_unregister_decode_chain(&sbridge_mce_dec); 1825 mce_unregister_decode_chain(&sbridge_mce_dec);
1831} 1826}
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 7bb4614730db..1e904b7b79a0 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -69,12 +69,12 @@ static void tile_edac_check(struct mem_ctl_info *mci)
69 69
70 /* Check if the current error count is different from the saved one. */ 70 /* Check if the current error count is different from the saved one. */
71 if (mem_error.sbe_count != priv->ce_count) { 71 if (mem_error.sbe_count != priv->ce_count) {
72 dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); 72 dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node);
73 priv->ce_count = mem_error.sbe_count; 73 priv->ce_count = mem_error.sbe_count;
74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
75 0, 0, 0, 75 0, 0, 0,
76 0, 0, -1, 76 0, 0, -1,
77 mci->ctl_name, "", NULL); 77 mci->ctl_name, "");
78 } 78 }
79} 79}
80 80
@@ -84,10 +84,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
84 */ 84 */
85static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) 85static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
86{ 86{
87 struct csrow_info *csrow = &mci->csrows[0]; 87 struct csrow_info *csrow = mci->csrows[0];
88 struct tile_edac_priv *priv = mci->pvt_info; 88 struct tile_edac_priv *priv = mci->pvt_info;
89 struct mshim_mem_info mem_info; 89 struct mshim_mem_info mem_info;
90 struct dimm_info *dimm = csrow->channels[0].dimm; 90 struct dimm_info *dimm = csrow->channels[0]->dimm;
91 91
92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, 92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != 93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -149,7 +149,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
149 priv->node = pdev->id; 149 priv->node = pdev->id;
150 priv->hv_devhdl = hv_devhdl; 150 priv->hv_devhdl = hv_devhdl;
151 151
152 mci->dev = &pdev->dev; 152 mci->pdev = &pdev->dev;
153 mci->mtype_cap = MEM_FLAG_DDR2; 153 mci->mtype_cap = MEM_FLAG_DDR2;
154 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 154 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
155 155
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 1ac7962d63ea..08a992693e62 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -103,10 +103,10 @@ static int how_many_channel(struct pci_dev *pdev)
103 103
104 pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); 104 pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
105 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ 105 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
106 debugf0("In single channel mode.\n"); 106 edac_dbg(0, "In single channel mode\n");
107 x38_channel_num = 1; 107 x38_channel_num = 1;
108 } else { 108 } else {
109 debugf0("In dual channel mode.\n"); 109 edac_dbg(0, "In dual channel mode\n");
110 x38_channel_num = 2; 110 x38_channel_num = 2;
111 } 111 }
112 112
@@ -151,7 +151,7 @@ static void x38_clear_error_info(struct mem_ctl_info *mci)
151{ 151{
152 struct pci_dev *pdev; 152 struct pci_dev *pdev;
153 153
154 pdev = to_pci_dev(mci->dev); 154 pdev = to_pci_dev(mci->pdev);
155 155
156 /* 156 /*
157 * Clear any error bits. 157 * Clear any error bits.
@@ -172,7 +172,7 @@ static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
172 struct pci_dev *pdev; 172 struct pci_dev *pdev;
173 void __iomem *window = mci->pvt_info; 173 void __iomem *window = mci->pvt_info;
174 174
175 pdev = to_pci_dev(mci->dev); 175 pdev = to_pci_dev(mci->pdev);
176 176
177 /* 177 /*
178 * This is a mess because there is no atomic way to read all the 178 * This is a mess because there is no atomic way to read all the
@@ -215,26 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
215 return; 215 return;
216 216
217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { 217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
219 -1, -1, -1, 219 -1, -1, -1,
220 "UE overwrote CE", "", NULL); 220 "UE overwrote CE", "");
221 info->errsts = info->errsts2; 221 info->errsts = info->errsts2;
222 } 222 }
223 223
224 for (channel = 0; channel < x38_channel_num; channel++) { 224 for (channel = 0; channel < x38_channel_num; channel++) {
225 log = info->eccerrlog[channel]; 225 log = info->eccerrlog[channel];
226 if (log & X38_ECCERRLOG_UE) { 226 if (log & X38_ECCERRLOG_UE) {
227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
228 0, 0, 0, 228 0, 0, 0,
229 eccerrlog_row(channel, log), 229 eccerrlog_row(channel, log),
230 -1, -1, 230 -1, -1,
231 "x38 UE", "", NULL); 231 "x38 UE", "");
232 } else if (log & X38_ECCERRLOG_CE) { 232 } else if (log & X38_ECCERRLOG_CE) {
233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
234 0, 0, eccerrlog_syndrome(log), 234 0, 0, eccerrlog_syndrome(log),
235 eccerrlog_row(channel, log), 235 eccerrlog_row(channel, log),
236 -1, -1, 236 -1, -1,
237 "x38 CE", "", NULL); 237 "x38 CE", "");
238 } 238 }
239 } 239 }
240} 240}
@@ -243,7 +243,7 @@ static void x38_check(struct mem_ctl_info *mci)
243{ 243{
244 struct x38_error_info info; 244 struct x38_error_info info;
245 245
246 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 246 edac_dbg(1, "MC%d\n", mci->mc_idx);
247 x38_get_and_clear_error_info(mci, &info); 247 x38_get_and_clear_error_info(mci, &info);
248 x38_process_error_info(mci, &info); 248 x38_process_error_info(mci, &info);
249} 249}
@@ -331,7 +331,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
331 bool stacked; 331 bool stacked;
332 void __iomem *window; 332 void __iomem *window;
333 333
334 debugf0("MC: %s()\n", __func__); 334 edac_dbg(0, "MC:\n");
335 335
336 window = x38_map_mchbar(pdev); 336 window = x38_map_mchbar(pdev);
337 if (!window) 337 if (!window)
@@ -352,9 +352,9 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
352 if (!mci) 352 if (!mci)
353 return -ENOMEM; 353 return -ENOMEM;
354 354
355 debugf3("MC: %s(): init mci\n", __func__); 355 edac_dbg(3, "MC: init mci\n");
356 356
357 mci->dev = &pdev->dev; 357 mci->pdev = &pdev->dev;
358 mci->mtype_cap = MEM_FLAG_DDR2; 358 mci->mtype_cap = MEM_FLAG_DDR2;
359 359
360 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 360 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -378,7 +378,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
378 */ 378 */
379 for (i = 0; i < mci->nr_csrows; i++) { 379 for (i = 0; i < mci->nr_csrows; i++) {
380 unsigned long nr_pages; 380 unsigned long nr_pages;
381 struct csrow_info *csrow = &mci->csrows[i]; 381 struct csrow_info *csrow = mci->csrows[i];
382 382
383 nr_pages = drb_to_nr_pages(drbs, stacked, 383 nr_pages = drb_to_nr_pages(drbs, stacked,
384 i / X38_RANKS_PER_CHANNEL, 384 i / X38_RANKS_PER_CHANNEL,
@@ -388,7 +388,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
388 continue; 388 continue;
389 389
390 for (j = 0; j < x38_channel_num; j++) { 390 for (j = 0; j < x38_channel_num; j++) {
391 struct dimm_info *dimm = csrow->channels[j].dimm; 391 struct dimm_info *dimm = csrow->channels[j]->dimm;
392 392
393 dimm->nr_pages = nr_pages / x38_channel_num; 393 dimm->nr_pages = nr_pages / x38_channel_num;
394 dimm->grain = nr_pages << PAGE_SHIFT; 394 dimm->grain = nr_pages << PAGE_SHIFT;
@@ -402,12 +402,12 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
402 402
403 rc = -ENODEV; 403 rc = -ENODEV;
404 if (edac_mc_add_mc(mci)) { 404 if (edac_mc_add_mc(mci)) {
405 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 405 edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
406 goto fail; 406 goto fail;
407 } 407 }
408 408
409 /* get this far and it's successful */ 409 /* get this far and it's successful */
410 debugf3("MC: %s(): success\n", __func__); 410 edac_dbg(3, "MC: success\n");
411 return 0; 411 return 0;
412 412
413fail: 413fail:
@@ -423,7 +423,7 @@ static int __devinit x38_init_one(struct pci_dev *pdev,
423{ 423{
424 int rc; 424 int rc;
425 425
426 debugf0("MC: %s()\n", __func__); 426 edac_dbg(0, "MC:\n");
427 427
428 if (pci_enable_device(pdev) < 0) 428 if (pci_enable_device(pdev) < 0)
429 return -EIO; 429 return -EIO;
@@ -439,7 +439,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
439{ 439{
440 struct mem_ctl_info *mci; 440 struct mem_ctl_info *mci;
441 441
442 debugf0("%s()\n", __func__); 442 edac_dbg(0, "\n");
443 443
444 mci = edac_mc_del_mc(&pdev->dev); 444 mci = edac_mc_del_mc(&pdev->dev);
445 if (!mci) 445 if (!mci)
@@ -472,7 +472,7 @@ static int __init x38_init(void)
472{ 472{
473 int pci_rc; 473 int pci_rc;
474 474
475 debugf3("MC: %s()\n", __func__); 475 edac_dbg(3, "MC:\n");
476 476
477 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 477 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
478 opstate_init(); 478 opstate_init();
@@ -486,14 +486,14 @@ static int __init x38_init(void)
486 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 486 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
487 PCI_DEVICE_ID_INTEL_X38_HB, NULL); 487 PCI_DEVICE_ID_INTEL_X38_HB, NULL);
488 if (!mci_pdev) { 488 if (!mci_pdev) {
489 debugf0("x38 pci_get_device fail\n"); 489 edac_dbg(0, "x38 pci_get_device fail\n");
490 pci_rc = -ENODEV; 490 pci_rc = -ENODEV;
491 goto fail1; 491 goto fail1;
492 } 492 }
493 493
494 pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); 494 pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
495 if (pci_rc < 0) { 495 if (pci_rc < 0) {
496 debugf0("x38 init fail\n"); 496 edac_dbg(0, "x38 init fail\n");
497 pci_rc = -ENODEV; 497 pci_rc = -ENODEV;
498 goto fail1; 498 goto fail1;
499 } 499 }
@@ -513,7 +513,7 @@ fail0:
513 513
514static void __exit x38_exit(void) 514static void __exit x38_exit(void)
515{ 515{
516 debugf3("MC: %s()\n", __func__); 516 edac_dbg(3, "MC:\n");
517 517
518 pci_unregister_driver(&x38_driver); 518 pci_unregister_driver(&x38_driver);
519 if (!x38_registered) { 519 if (!x38_registered) {