aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/e752x_edac.c59
-rw-r--r--drivers/edac/edac_mc_sysfs.c158
-rw-r--r--drivers/edac/edac_pci_sysfs.c30
-rw-r--r--drivers/edac/i5100_edac.c981
-rw-r--r--drivers/edac/mpc85xx_edac.c67
-rw-r--r--drivers/edac/mv64x60_edac.c37
8 files changed, 1163 insertions, 177 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 6e6c3c4aea6b..5a11e3cbcae2 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -123,6 +123,13 @@ config EDAC_I5000
123 Support for error detection and correction the Intel 123 Support for error detection and correction the Intel
124 Greekcreek/Blackford chipsets. 124 Greekcreek/Blackford chipsets.
125 125
126config EDAC_I5100
127 tristate "Intel San Clemente MCH"
128 depends on EDAC_MM_EDAC && X86 && PCI
129 help
130 Support for error detection and correction the Intel
131 San Clemente MCH.
132
126config EDAC_MPC85XX 133config EDAC_MPC85XX
127 tristate "Freescale MPC85xx" 134 tristate "Freescale MPC85xx"
128 depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx 135 depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 83807731d4a9..e5e9104b5520 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -19,6 +19,7 @@ endif
19 19
20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o 20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
21obj-$(CONFIG_EDAC_I5000) += i5000_edac.o 21obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
22obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
22obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 23obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
23obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 24obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
24obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 25obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index c94a0eb492cb..facfdb1fa71c 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -28,6 +28,7 @@
28#define E752X_REVISION " Ver: 2.0.2 " __DATE__ 28#define E752X_REVISION " Ver: 2.0.2 " __DATE__
29#define EDAC_MOD_STR "e752x_edac" 29#define EDAC_MOD_STR "e752x_edac"
30 30
31static int report_non_memory_errors;
31static int force_function_unhide; 32static int force_function_unhide;
32static int sysbus_parity = -1; 33static int sysbus_parity = -1;
33 34
@@ -117,7 +118,7 @@ static struct edac_pci_ctl_info *e752x_pci;
117#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */ 118#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
118#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */ 119#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
119#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */ 120#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
120#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */ 121#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
121#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */ 122#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
122#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */ 123#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
123#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */ 124#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
@@ -127,7 +128,7 @@ static struct edac_pci_ctl_info *e752x_pci;
127 /* error address register (32b) */ 128 /* error address register (32b) */
128 /* 129 /*
129 * 31 Reserved 130 * 31 Reserved
130 * 30:2 CE address (64 byte block 34:6) 131 * 30:2 CE address (64 byte block 34:6
131 * 1 Reserved 132 * 1 Reserved
132 * 0 HiLoCS 133 * 0 HiLoCS
133 */ 134 */
@@ -147,11 +148,11 @@ static struct edac_pci_ctl_info *e752x_pci;
147 * 1 Reserved 148 * 1 Reserved
148 * 0 HiLoCS 149 * 0 HiLoCS
149 */ 150 */
150#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */ 151#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
151 /* error address register (32b) */ 152 /* error address register (32b) */
152 /* 153 /*
153 * 31 Reserved 154 * 31 Reserved
154 * 30:2 CE address (64 byte block 34:6) 155 * 30:2 CE address (64 byte block 34:6
155 * 1 Reserved 156 * 1 Reserved
156 * 0 HiLoCS 157 * 0 HiLoCS
157 */ 158 */
@@ -394,9 +395,12 @@ static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
394 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
395 396
396 error_1b = retry_add; 397 error_1b = retry_add;
397 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ 398 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
398 row = pvt->mc_symmetric ? ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ 399
400 /* chip select are bits 14 & 13 */
401 row = pvt->mc_symmetric ? ((page >> 1) & 3) :
399 edac_mc_find_csrow_by_page(mci, page); 402 edac_mc_find_csrow_by_page(mci, page);
403
400 e752x_mc_printk(mci, KERN_WARNING, 404 e752x_mc_printk(mci, KERN_WARNING,
401 "CE page 0x%lx, row %d : Memory read retry\n", 405 "CE page 0x%lx, row %d : Memory read retry\n",
402 (long unsigned int)page, row); 406 (long unsigned int)page, row);
@@ -422,12 +426,21 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
422} 426}
423 427
424static char *global_message[11] = { 428static char *global_message[11] = {
425 "PCI Express C1", "PCI Express C", "PCI Express B1", 429 "PCI Express C1",
426 "PCI Express B", "PCI Express A1", "PCI Express A", 430 "PCI Express C",
427 "DMA Controler", "HUB or NS Interface", "System Bus", 431 "PCI Express B1",
428 "DRAM Controler", "Internal Buffer" 432 "PCI Express B",
433 "PCI Express A1",
434 "PCI Express A",
435 "DMA Controller",
436 "HUB or NS Interface",
437 "System Bus",
438 "DRAM Controller", /* 9th entry */
439 "Internal Buffer"
429}; 440};
430 441
442#define DRAM_ENTRY 9
443
431static char *fatal_message[2] = { "Non-Fatal ", "Fatal " }; 444static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
432 445
433static void do_global_error(int fatal, u32 errors) 446static void do_global_error(int fatal, u32 errors)
@@ -435,9 +448,16 @@ static void do_global_error(int fatal, u32 errors)
435 int i; 448 int i;
436 449
437 for (i = 0; i < 11; i++) { 450 for (i = 0; i < 11; i++) {
438 if (errors & (1 << i)) 451 if (errors & (1 << i)) {
439 e752x_printk(KERN_WARNING, "%sError %s\n", 452 /* If the error is from DRAM Controller OR
440 fatal_message[fatal], global_message[i]); 453 * we are to report ALL errors, then
454 * report the error
455 */
456 if ((i == DRAM_ENTRY) || report_non_memory_errors)
457 e752x_printk(KERN_WARNING, "%sError %s\n",
458 fatal_message[fatal],
459 global_message[i]);
460 }
441 } 461 }
442} 462}
443 463
@@ -1021,7 +1041,7 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1021 struct pci_dev *dev; 1041 struct pci_dev *dev;
1022 1042
1023 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 1043 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1024 pvt->dev_info->err_dev, pvt->bridge_ck); 1044 pvt->dev_info->err_dev, pvt->bridge_ck);
1025 1045
1026 if (pvt->bridge_ck == NULL) 1046 if (pvt->bridge_ck == NULL)
1027 pvt->bridge_ck = pci_scan_single_device(pdev->bus, 1047 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
@@ -1034,8 +1054,9 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1034 return 1; 1054 return 1;
1035 } 1055 }
1036 1056
1037 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, 1057 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1038 NULL); 1058 e752x_devs[dev_idx].ctl_dev,
1059 NULL);
1039 1060
1040 if (dev == NULL) 1061 if (dev == NULL)
1041 goto fail; 1062 goto fail;
@@ -1316,7 +1337,8 @@ MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1316 1337
1317module_param(force_function_unhide, int, 0444); 1338module_param(force_function_unhide, int, 0444);
1318MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" 1339MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1319 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); 1340 " 1=force unhide and hope BIOS doesn't fight driver for "
1341 "Dev0:Fun1 access");
1320 1342
1321module_param(edac_op_state, int, 0444); 1343module_param(edac_op_state, int, 0444);
1322MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1344MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
@@ -1324,3 +1346,6 @@ MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1324module_param(sysbus_parity, int, 0444); 1346module_param(sysbus_parity, int, 0444);
1325MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking," 1347MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1326 " 1=enable system bus parity checking, default=auto-detect"); 1348 " 1=enable system bus parity checking, default=auto-detect");
1349module_param(report_non_memory_errors, int, 0644);
1350MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1351 "reporting, 1=enable non-memory error reporting");
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 021d18795145..ad218fe4942d 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -44,6 +44,25 @@ int edac_mc_get_poll_msec(void)
44 return edac_mc_poll_msec; 44 return edac_mc_poll_msec;
45} 45}
46 46
47static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
48{
49 long l;
50 int ret;
51
52 if (!val)
53 return -EINVAL;
54
55 ret = strict_strtol(val, 0, &l);
56 if (ret == -EINVAL || ((int)l != l))
57 return -EINVAL;
58 *((int *)kp->arg) = l;
59
60 /* notify edac_mc engine to reset the poll period */
61 edac_mc_reset_delay_period(l);
62
63 return 0;
64}
65
47/* Parameter declarations for above */ 66/* Parameter declarations for above */
48module_param(edac_mc_panic_on_ue, int, 0644); 67module_param(edac_mc_panic_on_ue, int, 0644);
49MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); 68MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
@@ -53,7 +72,8 @@ MODULE_PARM_DESC(edac_mc_log_ue,
53module_param(edac_mc_log_ce, int, 0644); 72module_param(edac_mc_log_ce, int, 0644);
54MODULE_PARM_DESC(edac_mc_log_ce, 73MODULE_PARM_DESC(edac_mc_log_ce,
55 "Log correctable error to console: 0=off 1=on"); 74 "Log correctable error to console: 0=off 1=on");
56module_param(edac_mc_poll_msec, int, 0644); 75module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
76 &edac_mc_poll_msec, 0644);
57MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); 77MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
58 78
59/* 79/*
@@ -103,16 +123,6 @@ static const char *edac_caps[] = {
103 123
104 124
105 125
106/*
107 * /sys/devices/system/edac/mc;
108 * data structures and methods
109 */
110static ssize_t memctrl_int_show(void *ptr, char *buffer)
111{
112 int *value = (int *)ptr;
113 return sprintf(buffer, "%u\n", *value);
114}
115
116static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) 126static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
117{ 127{
118 int *value = (int *)ptr; 128 int *value = (int *)ptr;
@@ -123,23 +133,6 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
123 return count; 133 return count;
124} 134}
125 135
126/*
127 * mc poll_msec time value
128 */
129static ssize_t poll_msec_int_store(void *ptr, const char *buffer, size_t count)
130{
131 int *value = (int *)ptr;
132
133 if (isdigit(*buffer)) {
134 *value = simple_strtoul(buffer, NULL, 0);
135
136 /* notify edac_mc engine to reset the poll period */
137 edac_mc_reset_delay_period(*value);
138 }
139
140 return count;
141}
142
143 136
144/* EDAC sysfs CSROW data structures and methods 137/* EDAC sysfs CSROW data structures and methods
145 */ 138 */
@@ -185,7 +178,11 @@ static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
185static ssize_t channel_dimm_label_show(struct csrow_info *csrow, 178static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
186 char *data, int channel) 179 char *data, int channel)
187{ 180{
188 return snprintf(data, EDAC_MC_LABEL_LEN, "%s", 181 /* if field has not been initialized, there is nothing to send */
182 if (!csrow->channels[channel].label[0])
183 return 0;
184
185 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
189 csrow->channels[channel].label); 186 csrow->channels[channel].label);
190} 187}
191 188
@@ -649,98 +646,10 @@ static struct kobj_type ktype_mci = {
649 .default_attrs = (struct attribute **)mci_attr, 646 .default_attrs = (struct attribute **)mci_attr,
650}; 647};
651 648
652/* show/store, tables, etc for the MC kset */
653
654
655struct memctrl_dev_attribute {
656 struct attribute attr;
657 void *value;
658 ssize_t(*show) (void *, char *);
659 ssize_t(*store) (void *, const char *, size_t);
660};
661
662/* Set of show/store abstract level functions for memory control object */
663static ssize_t memctrl_dev_show(struct kobject *kobj,
664 struct attribute *attr, char *buffer)
665{
666 struct memctrl_dev_attribute *memctrl_dev;
667 memctrl_dev = (struct memctrl_dev_attribute *)attr;
668
669 if (memctrl_dev->show)
670 return memctrl_dev->show(memctrl_dev->value, buffer);
671
672 return -EIO;
673}
674
675static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
676 const char *buffer, size_t count)
677{
678 struct memctrl_dev_attribute *memctrl_dev;
679 memctrl_dev = (struct memctrl_dev_attribute *)attr;
680
681 if (memctrl_dev->store)
682 return memctrl_dev->store(memctrl_dev->value, buffer, count);
683
684 return -EIO;
685}
686
687static struct sysfs_ops memctrlfs_ops = {
688 .show = memctrl_dev_show,
689 .store = memctrl_dev_store
690};
691
692#define MEMCTRL_ATTR(_name, _mode, _show, _store) \
693static struct memctrl_dev_attribute attr_##_name = { \
694 .attr = {.name = __stringify(_name), .mode = _mode }, \
695 .value = &_name, \
696 .show = _show, \
697 .store = _store, \
698};
699
700#define MEMCTRL_STRING_ATTR(_name, _data, _mode, _show, _store) \
701static struct memctrl_dev_attribute attr_##_name = { \
702 .attr = {.name = __stringify(_name), .mode = _mode }, \
703 .value = _data, \
704 .show = _show, \
705 .store = _store, \
706};
707
708/* csrow<id> control files */
709MEMCTRL_ATTR(edac_mc_panic_on_ue,
710 S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
711
712MEMCTRL_ATTR(edac_mc_log_ue,
713 S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
714
715MEMCTRL_ATTR(edac_mc_log_ce,
716 S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
717
718MEMCTRL_ATTR(edac_mc_poll_msec,
719 S_IRUGO | S_IWUSR, memctrl_int_show, poll_msec_int_store);
720
721/* Base Attributes of the memory ECC object */
722static struct memctrl_dev_attribute *memctrl_attr[] = {
723 &attr_edac_mc_panic_on_ue,
724 &attr_edac_mc_log_ue,
725 &attr_edac_mc_log_ce,
726 &attr_edac_mc_poll_msec,
727 NULL,
728};
729
730
731/* the ktype for the mc_kset internal kobj */
732static struct kobj_type ktype_mc_set_attribs = {
733 .sysfs_ops = &memctrlfs_ops,
734 .default_attrs = (struct attribute **)memctrl_attr,
735};
736
737/* EDAC memory controller sysfs kset: 649/* EDAC memory controller sysfs kset:
738 * /sys/devices/system/edac/mc 650 * /sys/devices/system/edac/mc
739 */ 651 */
740static struct kset mc_kset = { 652static struct kset *mc_kset;
741 .kobj = {.ktype = &ktype_mc_set_attribs },
742};
743
744 653
745/* 654/*
746 * edac_mc_register_sysfs_main_kobj 655 * edac_mc_register_sysfs_main_kobj
@@ -771,7 +680,7 @@ int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
771 } 680 }
772 681
773 /* this instance become part of the mc_kset */ 682 /* this instance become part of the mc_kset */
774 kobj_mci->kset = &mc_kset; 683 kobj_mci->kset = mc_kset;
775 684
776 /* register the mc<id> kobject to the mc_kset */ 685 /* register the mc<id> kobject to the mc_kset */
777 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL, 686 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
@@ -1001,12 +910,9 @@ int edac_sysfs_setup_mc_kset(void)
1001 } 910 }
1002 911
1003 /* Init the MC's kobject */ 912 /* Init the MC's kobject */
1004 kobject_set_name(&mc_kset.kobj, "mc"); 913 mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
1005 mc_kset.kobj.parent = &edac_class->kset.kobj; 914 if (!mc_kset) {
1006 915 err = -ENOMEM;
1007 /* register the mc_kset */
1008 err = kset_register(&mc_kset);
1009 if (err) {
1010 debugf1("%s() Failed to register '.../edac/mc'\n", __func__); 916 debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
1011 goto fail_out; 917 goto fail_out;
1012 } 918 }
@@ -1028,6 +934,6 @@ fail_out:
1028 */ 934 */
1029void edac_sysfs_teardown_mc_kset(void) 935void edac_sysfs_teardown_mc_kset(void)
1030{ 936{
1031 kset_unregister(&mc_kset); 937 kset_unregister(mc_kset);
1032} 938}
1033 939
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 2c1fa1bb6df2..5c153dccc95e 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -28,7 +28,7 @@ static int edac_pci_poll_msec = 1000; /* one second workq period */
28static atomic_t pci_parity_count = ATOMIC_INIT(0); 28static atomic_t pci_parity_count = ATOMIC_INIT(0);
29static atomic_t pci_nonparity_count = ATOMIC_INIT(0); 29static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
30 30
31static struct kobject edac_pci_top_main_kobj; 31static struct kobject *edac_pci_top_main_kobj;
32static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); 32static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33 33
34/* getter functions for the data variables */ 34/* getter functions for the data variables */
@@ -83,7 +83,7 @@ static void edac_pci_instance_release(struct kobject *kobj)
83 pci = to_instance(kobj); 83 pci = to_instance(kobj);
84 84
85 /* decrement reference count on top main kobj */ 85 /* decrement reference count on top main kobj */
86 kobject_put(&edac_pci_top_main_kobj); 86 kobject_put(edac_pci_top_main_kobj);
87 87
88 kfree(pci); /* Free the control struct */ 88 kfree(pci); /* Free the control struct */
89} 89}
@@ -166,7 +166,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
166 * track the number of PCI instances we have, and thus nest 166 * track the number of PCI instances we have, and thus nest
167 * properly on keeping the module loaded 167 * properly on keeping the module loaded
168 */ 168 */
169 main_kobj = kobject_get(&edac_pci_top_main_kobj); 169 main_kobj = kobject_get(edac_pci_top_main_kobj);
170 if (!main_kobj) { 170 if (!main_kobj) {
171 err = -ENODEV; 171 err = -ENODEV;
172 goto error_out; 172 goto error_out;
@@ -174,11 +174,11 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
174 174
175 /* And now register this new kobject under the main kobj */ 175 /* And now register this new kobject under the main kobj */
176 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance, 176 err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
177 &edac_pci_top_main_kobj, "pci%d", idx); 177 edac_pci_top_main_kobj, "pci%d", idx);
178 if (err != 0) { 178 if (err != 0) {
179 debugf2("%s() failed to register instance pci%d\n", 179 debugf2("%s() failed to register instance pci%d\n",
180 __func__, idx); 180 __func__, idx);
181 kobject_put(&edac_pci_top_main_kobj); 181 kobject_put(edac_pci_top_main_kobj);
182 goto error_out; 182 goto error_out;
183 } 183 }
184 184
@@ -316,9 +316,10 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
316 */ 316 */
317static void edac_pci_release_main_kobj(struct kobject *kobj) 317static void edac_pci_release_main_kobj(struct kobject *kobj)
318{ 318{
319
320 debugf0("%s() here to module_put(THIS_MODULE)\n", __func__); 319 debugf0("%s() here to module_put(THIS_MODULE)\n", __func__);
321 320
321 kfree(kobj);
322
322 /* last reference to top EDAC PCI kobject has been removed, 323 /* last reference to top EDAC PCI kobject has been removed,
323 * NOW release our ref count on the core module 324 * NOW release our ref count on the core module
324 */ 325 */
@@ -369,8 +370,16 @@ static int edac_pci_main_kobj_setup(void)
369 goto decrement_count_fail; 370 goto decrement_count_fail;
370 } 371 }
371 372
373 edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
374 if (!edac_pci_top_main_kobj) {
375 debugf1("Failed to allocate\n");
376 err = -ENOMEM;
377 goto kzalloc_fail;
378 }
379
372 /* Instanstiate the pci object */ 380 /* Instanstiate the pci object */
373 err = kobject_init_and_add(&edac_pci_top_main_kobj, &ktype_edac_pci_main_kobj, 381 err = kobject_init_and_add(edac_pci_top_main_kobj,
382 &ktype_edac_pci_main_kobj,
374 &edac_class->kset.kobj, "pci"); 383 &edac_class->kset.kobj, "pci");
375 if (err) { 384 if (err) {
376 debugf1("Failed to register '.../edac/pci'\n"); 385 debugf1("Failed to register '.../edac/pci'\n");
@@ -381,13 +390,16 @@ static int edac_pci_main_kobj_setup(void)
381 * for EDAC PCI, then edac_pci_main_kobj_teardown() 390 * for EDAC PCI, then edac_pci_main_kobj_teardown()
382 * must be used, for resources to be cleaned up properly 391 * must be used, for resources to be cleaned up properly
383 */ 392 */
384 kobject_uevent(&edac_pci_top_main_kobj, KOBJ_ADD); 393 kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
385 debugf1("Registered '.../edac/pci' kobject\n"); 394 debugf1("Registered '.../edac/pci' kobject\n");
386 395
387 return 0; 396 return 0;
388 397
389 /* Error unwind statck */ 398 /* Error unwind statck */
390kobject_init_and_add_fail: 399kobject_init_and_add_fail:
400 kfree(edac_pci_top_main_kobj);
401
402kzalloc_fail:
391 module_put(THIS_MODULE); 403 module_put(THIS_MODULE);
392 404
393decrement_count_fail: 405decrement_count_fail:
@@ -414,7 +426,7 @@ static void edac_pci_main_kobj_teardown(void)
414 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { 426 if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
415 debugf0("%s() called kobject_put on main kobj\n", 427 debugf0("%s() called kobject_put on main kobj\n",
416 __func__); 428 __func__);
417 kobject_put(&edac_pci_top_main_kobj); 429 kobject_put(edac_pci_top_main_kobj);
418 } 430 }
419} 431}
420 432
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
new file mode 100644
index 000000000000..22db05a67bfb
--- /dev/null
+++ b/drivers/edac/i5100_edac.c
@@ -0,0 +1,981 @@
1/*
2 * Intel 5100 Memory Controllers kernel module
3 *
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * This module is based on the following document:
8 *
9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
10 * http://download.intel.com/design/chipsets/datashts/318378.pdf
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/slab.h>
18#include <linux/edac.h>
19#include <linux/delay.h>
20#include <linux/mmzone.h>
21
22#include "edac_core.h"
23
24/* register addresses */
25
26/* device 16, func 1 */
27#define I5100_MC 0x40 /* Memory Control Register */
28#define I5100_MS 0x44 /* Memory Status Register */
29#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
30#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
31#define I5100_TOLM 0x6c /* Top of Low Memory */
32#define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
33#define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
34#define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
35#define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
36#define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
37#define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
38#define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
39#define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
40#define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
41#define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
42#define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
43#define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
44#define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
45#define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
46#define I5100_FERR_NF_MEM_M1ERR_MASK 1
47#define I5100_FERR_NF_MEM_ANY_MASK \
48 (I5100_FERR_NF_MEM_M16ERR_MASK | \
49 I5100_FERR_NF_MEM_M15ERR_MASK | \
50 I5100_FERR_NF_MEM_M14ERR_MASK | \
51 I5100_FERR_NF_MEM_M12ERR_MASK | \
52 I5100_FERR_NF_MEM_M11ERR_MASK | \
53 I5100_FERR_NF_MEM_M10ERR_MASK | \
54 I5100_FERR_NF_MEM_M6ERR_MASK | \
55 I5100_FERR_NF_MEM_M5ERR_MASK | \
56 I5100_FERR_NF_MEM_M4ERR_MASK | \
57 I5100_FERR_NF_MEM_M1ERR_MASK)
58#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
59#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
60
61/* device 21 and 22, func 0 */
62#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
63#define I5100_DMIR 0x15c /* DIMM Interleave Range */
64#define I5100_VALIDLOG 0x18c /* Valid Log Markers */
65#define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
66#define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
67#define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
68#define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
69#define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
70#define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
71#define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
72
73/* bit field accessors */
74
75static inline u32 i5100_mc_errdeten(u32 mc)
76{
77 return mc >> 5 & 1;
78}
79
80static inline u16 i5100_spddata_rdo(u16 a)
81{
82 return a >> 15 & 1;
83}
84
85static inline u16 i5100_spddata_sbe(u16 a)
86{
87 return a >> 13 & 1;
88}
89
90static inline u16 i5100_spddata_busy(u16 a)
91{
92 return a >> 12 & 1;
93}
94
95static inline u16 i5100_spddata_data(u16 a)
96{
97 return a & ((1 << 8) - 1);
98}
99
100static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
101 u32 data, u32 cmd)
102{
103 return ((dti & ((1 << 4) - 1)) << 28) |
104 ((ckovrd & 1) << 27) |
105 ((sa & ((1 << 3) - 1)) << 24) |
106 ((ba & ((1 << 8) - 1)) << 16) |
107 ((data & ((1 << 8) - 1)) << 8) |
108 (cmd & 1);
109}
110
111static inline u16 i5100_tolm_tolm(u16 a)
112{
113 return a >> 12 & ((1 << 4) - 1);
114}
115
116static inline u16 i5100_mir_limit(u16 a)
117{
118 return a >> 4 & ((1 << 12) - 1);
119}
120
121static inline u16 i5100_mir_way1(u16 a)
122{
123 return a >> 1 & 1;
124}
125
126static inline u16 i5100_mir_way0(u16 a)
127{
128 return a & 1;
129}
130
131static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
132{
133 return a >> 28 & 1;
134}
135
136static inline u32 i5100_ferr_nf_mem_any(u32 a)
137{
138 return a & I5100_FERR_NF_MEM_ANY_MASK;
139}
140
141static inline u32 i5100_nerr_nf_mem_any(u32 a)
142{
143 return i5100_ferr_nf_mem_any(a);
144}
145
146static inline u32 i5100_dmir_limit(u32 a)
147{
148 return a >> 16 & ((1 << 11) - 1);
149}
150
151static inline u32 i5100_dmir_rank(u32 a, u32 i)
152{
153 return a >> (4 * i) & ((1 << 2) - 1);
154}
155
156static inline u16 i5100_mtr_present(u16 a)
157{
158 return a >> 10 & 1;
159}
160
161static inline u16 i5100_mtr_ethrottle(u16 a)
162{
163 return a >> 9 & 1;
164}
165
166static inline u16 i5100_mtr_width(u16 a)
167{
168 return a >> 8 & 1;
169}
170
171static inline u16 i5100_mtr_numbank(u16 a)
172{
173 return a >> 6 & 1;
174}
175
176static inline u16 i5100_mtr_numrow(u16 a)
177{
178 return a >> 2 & ((1 << 2) - 1);
179}
180
181static inline u16 i5100_mtr_numcol(u16 a)
182{
183 return a & ((1 << 2) - 1);
184}
185
186
187static inline u32 i5100_validlog_redmemvalid(u32 a)
188{
189 return a >> 2 & 1;
190}
191
192static inline u32 i5100_validlog_recmemvalid(u32 a)
193{
194 return a >> 1 & 1;
195}
196
197static inline u32 i5100_validlog_nrecmemvalid(u32 a)
198{
199 return a & 1;
200}
201
202static inline u32 i5100_nrecmema_merr(u32 a)
203{
204 return a >> 15 & ((1 << 5) - 1);
205}
206
207static inline u32 i5100_nrecmema_bank(u32 a)
208{
209 return a >> 12 & ((1 << 3) - 1);
210}
211
212static inline u32 i5100_nrecmema_rank(u32 a)
213{
214 return a >> 8 & ((1 << 3) - 1);
215}
216
217static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
218{
219 return a & ((1 << 8) - 1);
220}
221
222static inline u32 i5100_nrecmemb_cas(u32 a)
223{
224 return a >> 16 & ((1 << 13) - 1);
225}
226
227static inline u32 i5100_nrecmemb_ras(u32 a)
228{
229 return a & ((1 << 16) - 1);
230}
231
232static inline u32 i5100_redmemb_ecc_locator(u32 a)
233{
234 return a & ((1 << 18) - 1);
235}
236
237static inline u32 i5100_recmema_merr(u32 a)
238{
239 return i5100_nrecmema_merr(a);
240}
241
242static inline u32 i5100_recmema_bank(u32 a)
243{
244 return i5100_nrecmema_bank(a);
245}
246
247static inline u32 i5100_recmema_rank(u32 a)
248{
249 return i5100_nrecmema_rank(a);
250}
251
252static inline u32 i5100_recmema_dm_buf_id(u32 a)
253{
254 return i5100_nrecmema_dm_buf_id(a);
255}
256
257static inline u32 i5100_recmemb_cas(u32 a)
258{
259 return i5100_nrecmemb_cas(a);
260}
261
262static inline u32 i5100_recmemb_ras(u32 a)
263{
264 return i5100_nrecmemb_ras(a);
265}
266
267/* some generic limits */
268#define I5100_MAX_RANKS_PER_CTLR 6
269#define I5100_MAX_CTLRS 2
270#define I5100_MAX_RANKS_PER_DIMM 4
271#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
272#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
273#define I5100_MAX_RANK_INTERLEAVE 4
274#define I5100_MAX_DMIRS 5
275
276struct i5100_priv {
277 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
278 int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
279
280 /*
281 * mainboard chip select map -- maps i5100 chip selects to
282 * DIMM slot chip selects. In the case of only 4 ranks per
283 * controller, the mapping is fairly obvious but not unique.
284 * we map -1 -> NC and assume both controllers use the same
285 * map...
286 *
287 */
288 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
289
290 /* memory interleave range */
291 struct {
292 u64 limit;
293 unsigned way[2];
294 } mir[I5100_MAX_CTLRS];
295
296 /* adjusted memory interleave range register */
297 unsigned amir[I5100_MAX_CTLRS];
298
299 /* dimm interleave range */
300 struct {
301 unsigned rank[I5100_MAX_RANK_INTERLEAVE];
302 u64 limit;
303 } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
304
305 /* memory technology registers... */
306 struct {
307 unsigned present; /* 0 or 1 */
308 unsigned ethrottle; /* 0 or 1 */
309 unsigned width; /* 4 or 8 bits */
310 unsigned numbank; /* 2 or 3 lines */
311 unsigned numrow; /* 13 .. 16 lines */
312 unsigned numcol; /* 11 .. 12 lines */
313 } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
314
315 u64 tolm; /* top of low memory in bytes */
316 unsigned ranksperctlr; /* number of ranks per controller */
317
318 struct pci_dev *mc; /* device 16 func 1 */
319 struct pci_dev *ch0mm; /* device 21 func 0 */
320 struct pci_dev *ch1mm; /* device 22 func 0 */
321};
322
323/* map a rank/ctlr to a slot number on the mainboard */
324static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
325 int ctlr, int rank)
326{
327 const struct i5100_priv *priv = mci->pvt_info;
328 int i;
329
330 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
331 int j;
332 const int numrank = priv->dimm_numrank[ctlr][i];
333
334 for (j = 0; j < numrank; j++)
335 if (priv->dimm_csmap[i][j] == rank)
336 return i * 2 + ctlr;
337 }
338
339 return -1;
340}
341
342static const char *i5100_err_msg(unsigned err)
343{
344 static const char *merrs[] = {
345 "unknown", /* 0 */
346 "uncorrectable data ECC on replay", /* 1 */
347 "unknown", /* 2 */
348 "unknown", /* 3 */
349 "aliased uncorrectable demand data ECC", /* 4 */
350 "aliased uncorrectable spare-copy data ECC", /* 5 */
351 "aliased uncorrectable patrol data ECC", /* 6 */
352 "unknown", /* 7 */
353 "unknown", /* 8 */
354 "unknown", /* 9 */
355 "non-aliased uncorrectable demand data ECC", /* 10 */
356 "non-aliased uncorrectable spare-copy data ECC", /* 11 */
357 "non-aliased uncorrectable patrol data ECC", /* 12 */
358 "unknown", /* 13 */
359 "correctable demand data ECC", /* 14 */
360 "correctable spare-copy data ECC", /* 15 */
361 "correctable patrol data ECC", /* 16 */
362 "unknown", /* 17 */
363 "SPD protocol error", /* 18 */
364 "unknown", /* 19 */
365 "spare copy initiated", /* 20 */
366 "spare copy completed", /* 21 */
367 };
368 unsigned i;
369
370 for (i = 0; i < ARRAY_SIZE(merrs); i++)
371 if (1 << i & err)
372 return merrs[i];
373
374 return "none";
375}
376
377/* convert csrow index into a rank (per controller -- 0..5) */
378static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
379{
380 const struct i5100_priv *priv = mci->pvt_info;
381
382 return csrow % priv->ranksperctlr;
383}
384
385/* convert csrow index into a controller (0..1) */
386static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
387{
388 const struct i5100_priv *priv = mci->pvt_info;
389
390 return csrow / priv->ranksperctlr;
391}
392
393static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
394 int ctlr, int rank)
395{
396 const struct i5100_priv *priv = mci->pvt_info;
397
398 return ctlr * priv->ranksperctlr + rank;
399}
400
401static void i5100_handle_ce(struct mem_ctl_info *mci,
402 int ctlr,
403 unsigned bank,
404 unsigned rank,
405 unsigned long syndrome,
406 unsigned cas,
407 unsigned ras,
408 const char *msg)
409{
410 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
411
412 printk(KERN_ERR
413 "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
414 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
415 ctlr, bank, rank, syndrome, cas, ras,
416 csrow, mci->csrows[csrow].channels[0].label, msg);
417
418 mci->ce_count++;
419 mci->csrows[csrow].ce_count++;
420 mci->csrows[csrow].channels[0].ce_count++;
421}
422
423static void i5100_handle_ue(struct mem_ctl_info *mci,
424 int ctlr,
425 unsigned bank,
426 unsigned rank,
427 unsigned long syndrome,
428 unsigned cas,
429 unsigned ras,
430 const char *msg)
431{
432 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
433
434 printk(KERN_ERR
435 "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
436 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
437 ctlr, bank, rank, syndrome, cas, ras,
438 csrow, mci->csrows[csrow].channels[0].label, msg);
439
440 mci->ue_count++;
441 mci->csrows[csrow].ue_count++;
442}
443
444static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
445 u32 ferr, u32 nerr)
446{
447 struct i5100_priv *priv = mci->pvt_info;
448 struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
449 u32 dw;
450 u32 dw2;
451 unsigned syndrome = 0;
452 unsigned ecc_loc = 0;
453 unsigned merr;
454 unsigned bank;
455 unsigned rank;
456 unsigned cas;
457 unsigned ras;
458
459 pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
460
461 if (i5100_validlog_redmemvalid(dw)) {
462 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
463 syndrome = dw2;
464 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
465 ecc_loc = i5100_redmemb_ecc_locator(dw2);
466 }
467
468 if (i5100_validlog_recmemvalid(dw)) {
469 const char *msg;
470
471 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
472 merr = i5100_recmema_merr(dw2);
473 bank = i5100_recmema_bank(dw2);
474 rank = i5100_recmema_rank(dw2);
475
476 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
477 cas = i5100_recmemb_cas(dw2);
478 ras = i5100_recmemb_ras(dw2);
479
480 /* FIXME: not really sure if this is what merr is...
481 */
482 if (!merr)
483 msg = i5100_err_msg(ferr);
484 else
485 msg = i5100_err_msg(nerr);
486
487 i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
488 }
489
490 if (i5100_validlog_nrecmemvalid(dw)) {
491 const char *msg;
492
493 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
494 merr = i5100_nrecmema_merr(dw2);
495 bank = i5100_nrecmema_bank(dw2);
496 rank = i5100_nrecmema_rank(dw2);
497
498 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
499 cas = i5100_nrecmemb_cas(dw2);
500 ras = i5100_nrecmemb_ras(dw2);
501
502 /* FIXME: not really sure if this is what merr is...
503 */
504 if (!merr)
505 msg = i5100_err_msg(ferr);
506 else
507 msg = i5100_err_msg(nerr);
508
509 i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
510 }
511
512 pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
513}
514
515static void i5100_check_error(struct mem_ctl_info *mci)
516{
517 struct i5100_priv *priv = mci->pvt_info;
518 u32 dw;
519
520
521 pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
522 if (i5100_ferr_nf_mem_any(dw)) {
523 u32 dw2;
524
525 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
526 if (dw2)
527 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM,
528 dw2);
529 pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
530
531 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
532 i5100_ferr_nf_mem_any(dw),
533 i5100_nerr_nf_mem_any(dw2));
534 }
535}
536
537static struct pci_dev *pci_get_device_func(unsigned vendor,
538 unsigned device,
539 unsigned func)
540{
541 struct pci_dev *ret = NULL;
542
543 while (1) {
544 ret = pci_get_device(vendor, device, ret);
545
546 if (!ret)
547 break;
548
549 if (PCI_FUNC(ret->devfn) == func)
550 break;
551 }
552
553 return ret;
554}
555
556static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
557 int csrow)
558{
559 struct i5100_priv *priv = mci->pvt_info;
560 const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
561 const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
562 unsigned addr_lines;
563
564 /* dimm present? */
565 if (!priv->mtr[ctlr][ctlr_rank].present)
566 return 0ULL;
567
568 addr_lines =
569 I5100_DIMM_ADDR_LINES +
570 priv->mtr[ctlr][ctlr_rank].numcol +
571 priv->mtr[ctlr][ctlr_rank].numrow +
572 priv->mtr[ctlr][ctlr_rank].numbank;
573
574 return (unsigned long)
575 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
576}
577
578static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
579{
580 struct i5100_priv *priv = mci->pvt_info;
581 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
582 int i;
583
584 for (i = 0; i < I5100_MAX_CTLRS; i++) {
585 int j;
586 struct pci_dev *pdev = mms[i];
587
588 for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
589 const unsigned addr =
590 (j < 4) ? I5100_MTR_0 + j * 2 :
591 I5100_MTR_4 + (j - 4) * 2;
592 u16 w;
593
594 pci_read_config_word(pdev, addr, &w);
595
596 priv->mtr[i][j].present = i5100_mtr_present(w);
597 priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
598 priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
599 priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
600 priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
601 priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
602 }
603 }
604}
605
606/*
607 * FIXME: make this into a real i2c adapter (so that dimm-decode
608 * will work)?
609 */
610static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
611 u8 ch, u8 slot, u8 addr, u8 *byte)
612{
613 struct i5100_priv *priv = mci->pvt_info;
614 u16 w;
615 unsigned long et;
616
617 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
618 if (i5100_spddata_busy(w))
619 return -1;
620
621 pci_write_config_dword(priv->mc, I5100_SPDCMD,
622 i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
623 0, 0));
624
625 /* wait up to 100ms */
626 et = jiffies + HZ / 10;
627 udelay(100);
628 while (1) {
629 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
630 if (!i5100_spddata_busy(w))
631 break;
632 udelay(100);
633 }
634
635 if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
636 return -1;
637
638 *byte = i5100_spddata_data(w);
639
640 return 0;
641}
642
643/*
644 * fill dimm chip select map
645 *
646 * FIXME:
647 * o only valid for 4 ranks per controller
648 * o not the only way to may chip selects to dimm slots
649 * o investigate if there is some way to obtain this map from the bios
650 */
651static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
652{
653 struct i5100_priv *priv = mci->pvt_info;
654 int i;
655
656 WARN_ON(priv->ranksperctlr != 4);
657
658 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
659 int j;
660
661 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
662 priv->dimm_csmap[i][j] = -1; /* default NC */
663 }
664
665 /* only 2 chip selects per slot... */
666 priv->dimm_csmap[0][0] = 0;
667 priv->dimm_csmap[0][1] = 3;
668 priv->dimm_csmap[1][0] = 1;
669 priv->dimm_csmap[1][1] = 2;
670 priv->dimm_csmap[2][0] = 2;
671 priv->dimm_csmap[3][0] = 3;
672}
673
674static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
675 struct mem_ctl_info *mci)
676{
677 struct i5100_priv *priv = mci->pvt_info;
678 int i;
679
680 for (i = 0; i < I5100_MAX_CTLRS; i++) {
681 int j;
682
683 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
684 u8 rank;
685
686 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
687 priv->dimm_numrank[i][j] = 0;
688 else
689 priv->dimm_numrank[i][j] = (rank & 3) + 1;
690 }
691 }
692
693 i5100_init_dimm_csmap(mci);
694}
695
696static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
697 struct mem_ctl_info *mci)
698{
699 u16 w;
700 u32 dw;
701 struct i5100_priv *priv = mci->pvt_info;
702 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
703 int i;
704
705 pci_read_config_word(pdev, I5100_TOLM, &w);
706 priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
707
708 pci_read_config_word(pdev, I5100_MIR0, &w);
709 priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
710 priv->mir[0].way[1] = i5100_mir_way1(w);
711 priv->mir[0].way[0] = i5100_mir_way0(w);
712
713 pci_read_config_word(pdev, I5100_MIR1, &w);
714 priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
715 priv->mir[1].way[1] = i5100_mir_way1(w);
716 priv->mir[1].way[0] = i5100_mir_way0(w);
717
718 pci_read_config_word(pdev, I5100_AMIR_0, &w);
719 priv->amir[0] = w;
720 pci_read_config_word(pdev, I5100_AMIR_1, &w);
721 priv->amir[1] = w;
722
723 for (i = 0; i < I5100_MAX_CTLRS; i++) {
724 int j;
725
726 for (j = 0; j < 5; j++) {
727 int k;
728
729 pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
730
731 priv->dmir[i][j].limit =
732 (u64) i5100_dmir_limit(dw) << 28;
733 for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
734 priv->dmir[i][j].rank[k] =
735 i5100_dmir_rank(dw, k);
736 }
737 }
738
739 i5100_init_mtr(mci);
740}
741
742static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
743{
744 int i;
745 unsigned long total_pages = 0UL;
746 struct i5100_priv *priv = mci->pvt_info;
747
748 for (i = 0; i < mci->nr_csrows; i++) {
749 const unsigned long npages = i5100_npages(mci, i);
750 const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
751 const unsigned rank = i5100_csrow_to_rank(mci, i);
752
753 if (!npages)
754 continue;
755
756 /*
757 * FIXME: these two are totally bogus -- I don't see how to
758 * map them correctly to this structure...
759 */
760 mci->csrows[i].first_page = total_pages;
761 mci->csrows[i].last_page = total_pages + npages - 1;
762 mci->csrows[i].page_mask = 0UL;
763
764 mci->csrows[i].nr_pages = npages;
765 mci->csrows[i].grain = 32;
766 mci->csrows[i].csrow_idx = i;
767 mci->csrows[i].dtype =
768 (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
769 mci->csrows[i].ue_count = 0;
770 mci->csrows[i].ce_count = 0;
771 mci->csrows[i].mtype = MEM_RDDR2;
772 mci->csrows[i].edac_mode = EDAC_SECDED;
773 mci->csrows[i].mci = mci;
774 mci->csrows[i].nr_channels = 1;
775 mci->csrows[i].channels[0].chan_idx = 0;
776 mci->csrows[i].channels[0].ce_count = 0;
777 mci->csrows[i].channels[0].csrow = mci->csrows + i;
778 snprintf(mci->csrows[i].channels[0].label,
779 sizeof(mci->csrows[i].channels[0].label),
780 "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
781
782 total_pages += npages;
783 }
784}
785
786static int __devinit i5100_init_one(struct pci_dev *pdev,
787 const struct pci_device_id *id)
788{
789 int rc;
790 struct mem_ctl_info *mci;
791 struct i5100_priv *priv;
792 struct pci_dev *ch0mm, *ch1mm;
793 int ret = 0;
794 u32 dw;
795 int ranksperch;
796
797 if (PCI_FUNC(pdev->devfn) != 1)
798 return -ENODEV;
799
800 rc = pci_enable_device(pdev);
801 if (rc < 0) {
802 ret = rc;
803 goto bail;
804 }
805
806 /* ECC enabled? */
807 pci_read_config_dword(pdev, I5100_MC, &dw);
808 if (!i5100_mc_errdeten(dw)) {
809 printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
810 ret = -ENODEV;
811 goto bail_pdev;
812 }
813
814 /* figure out how many ranks, from strapped state of 48GB_Mode input */
815 pci_read_config_dword(pdev, I5100_MS, &dw);
816 ranksperch = !!(dw & (1 << 8)) * 2 + 4;
817
818 if (ranksperch != 4) {
819 /* FIXME: get 6 ranks / controller to work - need hw... */
820 printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
821 ret = -ENODEV;
822 goto bail_pdev;
823 }
824
825 /* enable error reporting... */
826 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
827 dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
828 pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
829
830 /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
831 ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
832 PCI_DEVICE_ID_INTEL_5100_21, 0);
833 if (!ch0mm) {
834 ret = -ENODEV;
835 goto bail_pdev;
836 }
837
838 rc = pci_enable_device(ch0mm);
839 if (rc < 0) {
840 ret = rc;
841 goto bail_ch0;
842 }
843
844 /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
845 ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
846 PCI_DEVICE_ID_INTEL_5100_22, 0);
847 if (!ch1mm) {
848 ret = -ENODEV;
849 goto bail_disable_ch0;
850 }
851
852 rc = pci_enable_device(ch1mm);
853 if (rc < 0) {
854 ret = rc;
855 goto bail_ch1;
856 }
857
858 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
859 if (!mci) {
860 ret = -ENOMEM;
861 goto bail_disable_ch1;
862 }
863
864 mci->dev = &pdev->dev;
865
866 priv = mci->pvt_info;
867 priv->ranksperctlr = ranksperch;
868 priv->mc = pdev;
869 priv->ch0mm = ch0mm;
870 priv->ch1mm = ch1mm;
871
872 i5100_init_dimm_layout(pdev, mci);
873 i5100_init_interleaving(pdev, mci);
874
875 mci->mtype_cap = MEM_FLAG_FB_DDR2;
876 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
877 mci->edac_cap = EDAC_FLAG_SECDED;
878 mci->mod_name = "i5100_edac.c";
879 mci->mod_ver = "not versioned";
880 mci->ctl_name = "i5100";
881 mci->dev_name = pci_name(pdev);
882 mci->ctl_page_to_phys = NULL;
883
884 mci->edac_check = i5100_check_error;
885
886 i5100_init_csrows(mci);
887
888 /* this strange construction seems to be in every driver, dunno why */
889 switch (edac_op_state) {
890 case EDAC_OPSTATE_POLL:
891 case EDAC_OPSTATE_NMI:
892 break;
893 default:
894 edac_op_state = EDAC_OPSTATE_POLL;
895 break;
896 }
897
898 if (edac_mc_add_mc(mci)) {
899 ret = -ENODEV;
900 goto bail_mc;
901 }
902
903 return ret;
904
905bail_mc:
906 edac_mc_free(mci);
907
908bail_disable_ch1:
909 pci_disable_device(ch1mm);
910
911bail_ch1:
912 pci_dev_put(ch1mm);
913
914bail_disable_ch0:
915 pci_disable_device(ch0mm);
916
917bail_ch0:
918 pci_dev_put(ch0mm);
919
920bail_pdev:
921 pci_disable_device(pdev);
922
923bail:
924 return ret;
925}
926
927static void __devexit i5100_remove_one(struct pci_dev *pdev)
928{
929 struct mem_ctl_info *mci;
930 struct i5100_priv *priv;
931
932 mci = edac_mc_del_mc(&pdev->dev);
933
934 if (!mci)
935 return;
936
937 priv = mci->pvt_info;
938 pci_disable_device(pdev);
939 pci_disable_device(priv->ch0mm);
940 pci_disable_device(priv->ch1mm);
941 pci_dev_put(priv->ch0mm);
942 pci_dev_put(priv->ch1mm);
943
944 edac_mc_free(mci);
945}
946
947static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
948 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
949 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
950 { 0, }
951};
952MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
953
954static struct pci_driver i5100_driver = {
955 .name = KBUILD_BASENAME,
956 .probe = i5100_init_one,
957 .remove = __devexit_p(i5100_remove_one),
958 .id_table = i5100_pci_tbl,
959};
960
961static int __init i5100_init(void)
962{
963 int pci_rc;
964
965 pci_rc = pci_register_driver(&i5100_driver);
966
967 return (pci_rc < 0) ? pci_rc : 0;
968}
969
970static void __exit i5100_exit(void)
971{
972 pci_unregister_driver(&i5100_driver);
973}
974
975module_init(i5100_init);
976module_exit(i5100_exit);
977
978MODULE_LICENSE("GPL");
979MODULE_AUTHOR
980 ("Arthur Jones <ajones@riverbed.com>");
981MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index d49361bfe670..2265d9ca1535 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -195,14 +195,15 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
195 return IRQ_HANDLED; 195 return IRQ_HANDLED;
196} 196}
197 197
198static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev) 198static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
199 const struct of_device_id *match)
199{ 200{
200 struct edac_pci_ctl_info *pci; 201 struct edac_pci_ctl_info *pci;
201 struct mpc85xx_pci_pdata *pdata; 202 struct mpc85xx_pci_pdata *pdata;
202 struct resource *r; 203 struct resource r;
203 int res = 0; 204 int res = 0;
204 205
205 if (!devres_open_group(&pdev->dev, mpc85xx_pci_err_probe, GFP_KERNEL)) 206 if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
206 return -ENOMEM; 207 return -ENOMEM;
207 208
208 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err"); 209 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
@@ -212,34 +213,37 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
212 pdata = pci->pvt_info; 213 pdata = pci->pvt_info;
213 pdata->name = "mpc85xx_pci_err"; 214 pdata->name = "mpc85xx_pci_err";
214 pdata->irq = NO_IRQ; 215 pdata->irq = NO_IRQ;
215 platform_set_drvdata(pdev, pci); 216 dev_set_drvdata(&op->dev, pci);
216 pci->dev = &pdev->dev; 217 pci->dev = &op->dev;
217 pci->mod_name = EDAC_MOD_STR; 218 pci->mod_name = EDAC_MOD_STR;
218 pci->ctl_name = pdata->name; 219 pci->ctl_name = pdata->name;
219 pci->dev_name = pdev->dev.bus_id; 220 pci->dev_name = op->dev.bus_id;
220 221
221 if (edac_op_state == EDAC_OPSTATE_POLL) 222 if (edac_op_state == EDAC_OPSTATE_POLL)
222 pci->edac_check = mpc85xx_pci_check; 223 pci->edac_check = mpc85xx_pci_check;
223 224
224 pdata->edac_idx = edac_pci_idx++; 225 pdata->edac_idx = edac_pci_idx++;
225 226
226 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 227 res = of_address_to_resource(op->node, 0, &r);
227 if (!r) { 228 if (res) {
228 printk(KERN_ERR "%s: Unable to get resource for " 229 printk(KERN_ERR "%s: Unable to get resource for "
229 "PCI err regs\n", __func__); 230 "PCI err regs\n", __func__);
230 goto err; 231 goto err;
231 } 232 }
232 233
233 if (!devm_request_mem_region(&pdev->dev, r->start, 234 /* we only need the error registers */
234 r->end - r->start + 1, pdata->name)) { 235 r.start += 0xe00;
236
237 if (!devm_request_mem_region(&op->dev, r.start,
238 r.end - r.start + 1, pdata->name)) {
235 printk(KERN_ERR "%s: Error while requesting mem region\n", 239 printk(KERN_ERR "%s: Error while requesting mem region\n",
236 __func__); 240 __func__);
237 res = -EBUSY; 241 res = -EBUSY;
238 goto err; 242 goto err;
239 } 243 }
240 244
241 pdata->pci_vbase = devm_ioremap(&pdev->dev, r->start, 245 pdata->pci_vbase = devm_ioremap(&op->dev, r.start,
242 r->end - r->start + 1); 246 r.end - r.start + 1);
243 if (!pdata->pci_vbase) { 247 if (!pdata->pci_vbase) {
244 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); 248 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
245 res = -ENOMEM; 249 res = -ENOMEM;
@@ -266,14 +270,15 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
266 } 270 }
267 271
268 if (edac_op_state == EDAC_OPSTATE_INT) { 272 if (edac_op_state == EDAC_OPSTATE_INT) {
269 pdata->irq = platform_get_irq(pdev, 0); 273 pdata->irq = irq_of_parse_and_map(op->node, 0);
270 res = devm_request_irq(&pdev->dev, pdata->irq, 274 res = devm_request_irq(&op->dev, pdata->irq,
271 mpc85xx_pci_isr, IRQF_DISABLED, 275 mpc85xx_pci_isr, IRQF_DISABLED,
272 "[EDAC] PCI err", pci); 276 "[EDAC] PCI err", pci);
273 if (res < 0) { 277 if (res < 0) {
274 printk(KERN_ERR 278 printk(KERN_ERR
275 "%s: Unable to requiest irq %d for " 279 "%s: Unable to requiest irq %d for "
276 "MPC85xx PCI err\n", __func__, pdata->irq); 280 "MPC85xx PCI err\n", __func__, pdata->irq);
281 irq_dispose_mapping(pdata->irq);
277 res = -ENODEV; 282 res = -ENODEV;
278 goto err2; 283 goto err2;
279 } 284 }
@@ -282,23 +287,23 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
282 pdata->irq); 287 pdata->irq);
283 } 288 }
284 289
285 devres_remove_group(&pdev->dev, mpc85xx_pci_err_probe); 290 devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
286 debugf3("%s(): success\n", __func__); 291 debugf3("%s(): success\n", __func__);
287 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); 292 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
288 293
289 return 0; 294 return 0;
290 295
291err2: 296err2:
292 edac_pci_del_device(&pdev->dev); 297 edac_pci_del_device(&op->dev);
293err: 298err:
294 edac_pci_free_ctl_info(pci); 299 edac_pci_free_ctl_info(pci);
295 devres_release_group(&pdev->dev, mpc85xx_pci_err_probe); 300 devres_release_group(&op->dev, mpc85xx_pci_err_probe);
296 return res; 301 return res;
297} 302}
298 303
299static int mpc85xx_pci_err_remove(struct platform_device *pdev) 304static int mpc85xx_pci_err_remove(struct of_device *op)
300{ 305{
301 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); 306 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
302 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 307 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
303 308
304 debugf0("%s()\n", __func__); 309 debugf0("%s()\n", __func__);
@@ -318,12 +323,26 @@ static int mpc85xx_pci_err_remove(struct platform_device *pdev)
318 return 0; 323 return 0;
319} 324}
320 325
321static struct platform_driver mpc85xx_pci_err_driver = { 326static struct of_device_id mpc85xx_pci_err_of_match[] = {
327 {
328 .compatible = "fsl,mpc8540-pcix",
329 },
330 {
331 .compatible = "fsl,mpc8540-pci",
332 },
333 {},
334};
335
336static struct of_platform_driver mpc85xx_pci_err_driver = {
337 .owner = THIS_MODULE,
338 .name = "mpc85xx_pci_err",
339 .match_table = mpc85xx_pci_err_of_match,
322 .probe = mpc85xx_pci_err_probe, 340 .probe = mpc85xx_pci_err_probe,
323 .remove = __devexit_p(mpc85xx_pci_err_remove), 341 .remove = __devexit_p(mpc85xx_pci_err_remove),
324 .driver = { 342 .driver = {
325 .name = "mpc85xx_pci_err", 343 .name = "mpc85xx_pci_err",
326 } 344 .owner = THIS_MODULE,
345 },
327}; 346};
328 347
329#endif /* CONFIG_PCI */ 348#endif /* CONFIG_PCI */
@@ -1002,7 +1021,7 @@ static int __init mpc85xx_mc_init(void)
1002 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); 1021 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
1003 1022
1004#ifdef CONFIG_PCI 1023#ifdef CONFIG_PCI
1005 res = platform_driver_register(&mpc85xx_pci_err_driver); 1024 res = of_register_platform_driver(&mpc85xx_pci_err_driver);
1006 if (res) 1025 if (res)
1007 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); 1026 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
1008#endif 1027#endif
@@ -1025,7 +1044,7 @@ static void __exit mpc85xx_mc_exit(void)
1025{ 1044{
1026 mtspr(SPRN_HID1, orig_hid1); 1045 mtspr(SPRN_HID1, orig_hid1);
1027#ifdef CONFIG_PCI 1046#ifdef CONFIG_PCI
1028 platform_driver_unregister(&mpc85xx_pci_err_driver); 1047 of_unregister_platform_driver(&mpc85xx_pci_err_driver);
1029#endif 1048#endif
1030 of_unregister_platform_driver(&mpc85xx_l2_err_driver); 1049 of_unregister_platform_driver(&mpc85xx_l2_err_driver);
1031 of_unregister_platform_driver(&mpc85xx_mc_err_driver); 1050 of_unregister_platform_driver(&mpc85xx_mc_err_driver);
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index bf071f140a05..083ce8d0c63d 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -71,6 +71,35 @@ static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
71 return IRQ_HANDLED; 71 return IRQ_HANDLED;
72} 72}
73 73
74/*
75 * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
76 * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
77 * well. IOW, don't set bit 0.
78 */
79
80/* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
81static int __init mv64x60_pci_fixup(struct platform_device *pdev)
82{
83 struct resource *r;
84 void __iomem *pci_serr;
85
86 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
87 if (!r) {
88 printk(KERN_ERR "%s: Unable to get resource for "
89 "PCI err regs\n", __func__);
90 return -ENOENT;
91 }
92
93 pci_serr = ioremap(r->start, r->end - r->start + 1);
94 if (!pci_serr)
95 return -ENOMEM;
96
97 out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
98 iounmap(pci_serr);
99
100 return 0;
101}
102
74static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) 103static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
75{ 104{
76 struct edac_pci_ctl_info *pci; 105 struct edac_pci_ctl_info *pci;
@@ -128,6 +157,12 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
128 goto err; 157 goto err;
129 } 158 }
130 159
160 res = mv64x60_pci_fixup(pdev);
161 if (res < 0) {
162 printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
163 goto err;
164 }
165
131 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0); 166 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
132 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0); 167 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
133 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 168 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
@@ -612,7 +647,7 @@ static void get_total_mem(struct mv64x60_mc_pdata *pdata)
612 if (!np) 647 if (!np)
613 return; 648 return;
614 649
615 reg = get_property(np, "reg", NULL); 650 reg = of_get_property(np, "reg", NULL);
616 651
617 pdata->total_mem = reg[1]; 652 pdata->total_mem = reg[1];
618} 653}