aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c200
-rw-r--r--drivers/edac/amd76x_edac.c42
-rw-r--r--drivers/edac/cell_edac.c42
-rw-r--r--drivers/edac/cpc925_edac.c91
-rw-r--r--drivers/edac/e752x_edac.c116
-rw-r--r--drivers/edac/e7xxx_edac.c86
-rw-r--r--drivers/edac/edac_core.h47
-rw-r--r--drivers/edac/edac_device.c27
-rw-r--r--drivers/edac/edac_mc.c716
-rw-r--r--drivers/edac/edac_mc_sysfs.c70
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c6
-rw-r--r--drivers/edac/i3000_edac.c49
-rw-r--r--drivers/edac/i3200_edac.c56
-rw-r--r--drivers/edac/i5000_edac.c236
-rw-r--r--drivers/edac/i5100_edac.c106
-rw-r--r--drivers/edac/i5400_edac.c265
-rw-r--r--drivers/edac/i7300_edac.c115
-rw-r--r--drivers/edac/i7core_edac.c270
-rw-r--r--drivers/edac/i82443bxgx_edac.c41
-rw-r--r--drivers/edac/i82860_edac.c55
-rw-r--r--drivers/edac/i82875p_edac.c51
-rw-r--r--drivers/edac/i82975x_edac.c58
-rw-r--r--drivers/edac/mpc85xx_edac.c37
-rw-r--r--drivers/edac/mv64x60_edac.c47
-rw-r--r--drivers/edac/pasemi_edac.c49
-rw-r--r--drivers/edac/ppc4xx_edac.c50
-rw-r--r--drivers/edac/r82600_edac.c40
-rw-r--r--drivers/edac/sb_edac.c212
-rw-r--r--drivers/edac/tile_edac.c33
-rw-r--r--drivers/edac/x38_edac.c52
31 files changed, 1821 insertions, 1446 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c919c5d..7be9b7288e90 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -715,25 +715,6 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
715 input_addr_to_dram_addr(mci, input_addr)); 715 input_addr_to_dram_addr(mci, input_addr));
716} 716}
717 717
718/*
719 * Find the minimum and maximum InputAddr values that map to the given @csrow.
720 * Pass back these values in *input_addr_min and *input_addr_max.
721 */
722static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
723 u64 *input_addr_min, u64 *input_addr_max)
724{
725 struct amd64_pvt *pvt;
726 u64 base, mask;
727
728 pvt = mci->pvt_info;
729 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
730
731 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
732
733 *input_addr_min = base & ~mask;
734 *input_addr_max = base | mask;
735}
736
737/* Map the Error address to a PAGE and PAGE OFFSET. */ 718/* Map the Error address to a PAGE and PAGE OFFSET. */
738static inline void error_address_to_page_and_offset(u64 error_address, 719static inline void error_address_to_page_and_offset(u64 error_address,
739 u32 *page, u32 *offset) 720 u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1058 int channel, csrow; 1039 int channel, csrow;
1059 u32 page, offset; 1040 u32 page, offset;
1060 1041
1042 error_address_to_page_and_offset(sys_addr, &page, &offset);
1043
1044 /*
1045 * Find out which node the error address belongs to. This may be
1046 * different from the node that detected the error.
1047 */
1048 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1049 if (!src_mci) {
1050 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1051 (unsigned long)sys_addr);
1052 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1053 page, offset, syndrome,
1054 -1, -1, -1,
1055 EDAC_MOD_STR,
1056 "failed to map error addr to a node",
1057 NULL);
1058 return;
1059 }
1060
1061 /* Now map the sys_addr to a CSROW */
1062 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1063 if (csrow < 0) {
1064 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1065 page, offset, syndrome,
1066 -1, -1, -1,
1067 EDAC_MOD_STR,
1068 "failed to map error addr to a csrow",
1069 NULL);
1070 return;
1071 }
1072
1061 /* CHIPKILL enabled */ 1073 /* CHIPKILL enabled */
1062 if (pvt->nbcfg & NBCFG_CHIPKILL) { 1074 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1063 channel = get_channel_from_ecc_syndrome(mci, syndrome); 1075 channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1067 * 2 DIMMs is in error. So we need to ID 'both' of them 1079 * 2 DIMMs is in error. So we need to ID 'both' of them
1068 * as suspect. 1080 * as suspect.
1069 */ 1081 */
1070 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " 1082 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1071 "error reporting race\n", syndrome); 1083 "possible error reporting race\n",
1072 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1084 syndrome);
1085 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1086 page, offset, syndrome,
1087 csrow, -1, -1,
1088 EDAC_MOD_STR,
1089 "unknown syndrome - possible error reporting race",
1090 NULL);
1073 return; 1091 return;
1074 } 1092 }
1075 } else { 1093 } else {
@@ -1084,28 +1102,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1084 channel = ((sys_addr & BIT(3)) != 0); 1102 channel = ((sys_addr & BIT(3)) != 0);
1085 } 1103 }
1086 1104
1087 /* 1105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
1088 * Find out which node the error address belongs to. This may be 1106 page, offset, syndrome,
1089 * different from the node that detected the error. 1107 csrow, channel, -1,
1090 */ 1108 EDAC_MOD_STR, "", NULL);
1091 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1092 if (!src_mci) {
1093 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1094 (unsigned long)sys_addr);
1095 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1096 return;
1097 }
1098
1099 /* Now map the sys_addr to a CSROW */
1100 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1101 if (csrow < 0) {
1102 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1103 } else {
1104 error_address_to_page_and_offset(sys_addr, &page, &offset);
1105
1106 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1107 channel, EDAC_MOD_STR);
1108 }
1109} 1109}
1110 1110
1111static int ddr2_cs_size(unsigned i, bool dct_width) 1111static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1611 u32 page, offset; 1611 u32 page, offset;
1612 int nid, csrow, chan = 0; 1612 int nid, csrow, chan = 0;
1613 1613
1614 error_address_to_page_and_offset(sys_addr, &page, &offset);
1615
1614 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1616 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1615 1617
1616 if (csrow < 0) { 1618 if (csrow < 0) {
1617 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1619 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1620 page, offset, syndrome,
1621 -1, -1, -1,
1622 EDAC_MOD_STR,
1623 "failed to map error addr to a csrow",
1624 NULL);
1618 return; 1625 return;
1619 } 1626 }
1620 1627
1621 error_address_to_page_and_offset(sys_addr, &page, &offset);
1622
1623 /* 1628 /*
1624 * We need the syndromes for channel detection only when we're 1629 * We need the syndromes for channel detection only when we're
1625 * ganged. Otherwise @chan should already contain the channel at 1630 * ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1628 if (dct_ganging_enabled(pvt)) 1633 if (dct_ganging_enabled(pvt))
1629 chan = get_channel_from_ecc_syndrome(mci, syndrome); 1634 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1630 1635
1631 if (chan >= 0) 1636 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1632 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, 1637 page, offset, syndrome,
1633 EDAC_MOD_STR); 1638 csrow, chan, -1,
1634 else 1639 EDAC_MOD_STR, "", NULL);
1635 /*
1636 * Channel unknown, report all channels on this CSROW as failed.
1637 */
1638 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1639 edac_mc_handle_ce(mci, page, offset, syndrome,
1640 csrow, chan, EDAC_MOD_STR);
1641} 1640}
1642 1641
1643/* 1642/*
@@ -1918,7 +1917,12 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1918 /* Ensure that the Error Address is VALID */ 1917 /* Ensure that the Error Address is VALID */
1919 if (!(m->status & MCI_STATUS_ADDRV)) { 1918 if (!(m->status & MCI_STATUS_ADDRV)) {
1920 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1919 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1921 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1920 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1921 0, 0, 0,
1922 -1, -1, -1,
1923 EDAC_MOD_STR,
1924 "HW has no ERROR_ADDRESS available",
1925 NULL);
1922 return; 1926 return;
1923 } 1927 }
1924 1928
@@ -1942,11 +1946,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1942 1946
1943 if (!(m->status & MCI_STATUS_ADDRV)) { 1947 if (!(m->status & MCI_STATUS_ADDRV)) {
1944 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1948 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1945 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1949 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1950 0, 0, 0,
1951 -1, -1, -1,
1952 EDAC_MOD_STR,
1953 "HW has no ERROR_ADDRESS available",
1954 NULL);
1946 return; 1955 return;
1947 } 1956 }
1948 1957
1949 sys_addr = get_error_address(m); 1958 sys_addr = get_error_address(m);
1959 error_address_to_page_and_offset(sys_addr, &page, &offset);
1950 1960
1951 /* 1961 /*
1952 * Find out which node the error address belongs to. This may be 1962 * Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1956 if (!src_mci) { 1966 if (!src_mci) {
1957 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", 1967 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1958 (unsigned long)sys_addr); 1968 (unsigned long)sys_addr);
1959 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1969 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1970 page, offset, 0,
1971 -1, -1, -1,
1972 EDAC_MOD_STR,
1973 "ERROR ADDRESS NOT mapped to a MC", NULL);
1960 return; 1974 return;
1961 } 1975 }
1962 1976
@@ -1966,10 +1980,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1966 if (csrow < 0) { 1980 if (csrow < 0) {
1967 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", 1981 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1968 (unsigned long)sys_addr); 1982 (unsigned long)sys_addr);
1969 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1983 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1984 page, offset, 0,
1985 -1, -1, -1,
1986 EDAC_MOD_STR,
1987 "ERROR ADDRESS NOT mapped to CS",
1988 NULL);
1970 } else { 1989 } else {
1971 error_address_to_page_and_offset(sys_addr, &page, &offset); 1990 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1972 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); 1991 page, offset, 0,
1992 csrow, -1, -1,
1993 EDAC_MOD_STR, "", NULL);
1973 } 1994 }
1974} 1995}
1975 1996
@@ -2171,7 +2192,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2171 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); 2192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2172 2193
2173 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); 2194 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2174 debugf0(" nr_pages= %u channel-count = %d\n", 2195 debugf0(" nr_pages/channel= %u channel-count = %d\n",
2175 nr_pages, pvt->channel_count); 2196 nr_pages, pvt->channel_count);
2176 2197
2177 return nr_pages; 2198 return nr_pages;
@@ -2185,9 +2206,12 @@ static int init_csrows(struct mem_ctl_info *mci)
2185{ 2206{
2186 struct csrow_info *csrow; 2207 struct csrow_info *csrow;
2187 struct amd64_pvt *pvt = mci->pvt_info; 2208 struct amd64_pvt *pvt = mci->pvt_info;
2188 u64 input_addr_min, input_addr_max, sys_addr, base, mask; 2209 u64 base, mask;
2189 u32 val; 2210 u32 val;
2190 int i, empty = 1; 2211 int i, j, empty = 1;
2212 enum mem_type mtype;
2213 enum edac_type edac_mode;
2214 int nr_pages = 0;
2191 2215
2192 amd64_read_pci_cfg(pvt->F3, NBCFG, &val); 2216 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2193 2217
@@ -2211,41 +2235,32 @@ static int init_csrows(struct mem_ctl_info *mci)
2211 2235
2212 empty = 0; 2236 empty = 0;
2213 if (csrow_enabled(i, 0, pvt)) 2237 if (csrow_enabled(i, 0, pvt))
2214 csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2238 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2215 if (csrow_enabled(i, 1, pvt)) 2239 if (csrow_enabled(i, 1, pvt))
2216 csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i); 2240 nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2217 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2218 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2219 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2220 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2221 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2222 2241
2223 get_cs_base_and_mask(pvt, i, 0, &base, &mask); 2242 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2224 csrow->page_mask = ~mask;
2225 /* 8 bytes of resolution */ 2243 /* 8 bytes of resolution */
2226 2244
2227 csrow->mtype = amd64_determine_memory_type(pvt, i); 2245 mtype = amd64_determine_memory_type(pvt, i);
2228 2246
2229 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); 2247 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2230 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", 2248 debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
2231 (unsigned long)input_addr_min,
2232 (unsigned long)input_addr_max);
2233 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2234 (unsigned long)sys_addr, csrow->page_mask);
2235 debugf1(" nr_pages: %u first_page: 0x%lx "
2236 "last_page: 0x%lx\n",
2237 (unsigned)csrow->nr_pages,
2238 csrow->first_page, csrow->last_page);
2239 2249
2240 /* 2250 /*
2241 * determine whether CHIPKILL or JUST ECC or NO ECC is operating 2251 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2242 */ 2252 */
2243 if (pvt->nbcfg & NBCFG_ECC_ENABLE) 2253 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2244 csrow->edac_mode = 2254 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2245 (pvt->nbcfg & NBCFG_CHIPKILL) ? 2255 EDAC_S4ECD4ED : EDAC_SECDED;
2246 EDAC_S4ECD4ED : EDAC_SECDED;
2247 else 2256 else
2248 csrow->edac_mode = EDAC_NONE; 2257 edac_mode = EDAC_NONE;
2258
2259 for (j = 0; j < pvt->channel_count; j++) {
2260 csrow->channels[j].dimm->mtype = mtype;
2261 csrow->channels[j].dimm->edac_mode = edac_mode;
2262 csrow->channels[j].dimm->nr_pages = nr_pages;
2263 }
2249 } 2264 }
2250 2265
2251 return empty; 2266 return empty;
@@ -2540,6 +2555,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2540 struct amd64_pvt *pvt = NULL; 2555 struct amd64_pvt *pvt = NULL;
2541 struct amd64_family_type *fam_type = NULL; 2556 struct amd64_family_type *fam_type = NULL;
2542 struct mem_ctl_info *mci = NULL; 2557 struct mem_ctl_info *mci = NULL;
2558 struct edac_mc_layer layers[2];
2543 int err = 0, ret; 2559 int err = 0, ret;
2544 u8 nid = get_node_id(F2); 2560 u8 nid = get_node_id(F2);
2545 2561
@@ -2574,7 +2590,13 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2574 goto err_siblings; 2590 goto err_siblings;
2575 2591
2576 ret = -ENOMEM; 2592 ret = -ENOMEM;
2577 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); 2593 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2594 layers[0].size = pvt->csels[0].b_cnt;
2595 layers[0].is_virt_csrow = true;
2596 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2597 layers[1].size = pvt->channel_count;
2598 layers[1].is_virt_csrow = false;
2599 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2578 if (!mci) 2600 if (!mci)
2579 goto err_siblings; 2601 goto err_siblings;
2580 2602
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f8fd3c807bde..9774d443fa57 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -29,7 +29,6 @@
29 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) 29 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
30 30
31#define AMD76X_NR_CSROWS 8 31#define AMD76X_NR_CSROWS 8
32#define AMD76X_NR_CHANS 1
33#define AMD76X_NR_DIMMS 4 32#define AMD76X_NR_DIMMS 4
34 33
35/* AMD 76x register addresses - device 0 function 0 - PCI bridge */ 34/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
146 145
147 if (handle_errors) { 146 if (handle_errors) {
148 row = (info->ecc_mode_status >> 4) & 0xf; 147 row = (info->ecc_mode_status >> 4) & 0xf;
149 edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, 148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
150 row, mci->ctl_name); 149 mci->csrows[row].first_page, 0, 0,
150 row, 0, -1,
151 mci->ctl_name, "", NULL);
151 } 152 }
152 } 153 }
153 154
@@ -159,8 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
159 160
160 if (handle_errors) { 161 if (handle_errors) {
161 row = info->ecc_mode_status & 0xf; 162 row = info->ecc_mode_status & 0xf;
162 edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, 163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
163 0, row, 0, mci->ctl_name); 164 mci->csrows[row].first_page, 0, 0,
165 row, 0, -1,
166 mci->ctl_name, "", NULL);
164 } 167 }
165 } 168 }
166 169
@@ -186,11 +189,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
186 enum edac_type edac_mode) 189 enum edac_type edac_mode)
187{ 190{
188 struct csrow_info *csrow; 191 struct csrow_info *csrow;
192 struct dimm_info *dimm;
189 u32 mba, mba_base, mba_mask, dms; 193 u32 mba, mba_base, mba_mask, dms;
190 int index; 194 int index;
191 195
192 for (index = 0; index < mci->nr_csrows; index++) { 196 for (index = 0; index < mci->nr_csrows; index++) {
193 csrow = &mci->csrows[index]; 197 csrow = &mci->csrows[index];
198 dimm = csrow->channels[0].dimm;
194 199
195 /* find the DRAM Chip Select Base address and mask */ 200 /* find the DRAM Chip Select Base address and mask */
196 pci_read_config_dword(pdev, 201 pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
203 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; 208 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
204 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); 209 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
205 csrow->first_page = mba_base >> PAGE_SHIFT; 210 csrow->first_page = mba_base >> PAGE_SHIFT;
206 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; 211 dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
207 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 212 csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
208 csrow->page_mask = mba_mask >> PAGE_SHIFT; 213 csrow->page_mask = mba_mask >> PAGE_SHIFT;
209 csrow->grain = csrow->nr_pages << PAGE_SHIFT; 214 dimm->grain = dimm->nr_pages << PAGE_SHIFT;
210 csrow->mtype = MEM_RDDR; 215 dimm->mtype = MEM_RDDR;
211 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; 216 dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
212 csrow->edac_mode = edac_mode; 217 dimm->edac_mode = edac_mode;
213 } 218 }
214} 219}
215 220
@@ -230,7 +235,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
230 EDAC_SECDED, 235 EDAC_SECDED,
231 EDAC_SECDED 236 EDAC_SECDED
232 }; 237 };
233 struct mem_ctl_info *mci = NULL; 238 struct mem_ctl_info *mci;
239 struct edac_mc_layer layers[2];
234 u32 ems; 240 u32 ems;
235 u32 ems_mode; 241 u32 ems_mode;
236 struct amd76x_error_info discard; 242 struct amd76x_error_info discard;
@@ -238,11 +244,17 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
238 debugf0("%s()\n", __func__); 244 debugf0("%s()\n", __func__);
239 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); 245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
240 ems_mode = (ems >> 10) & 0x3; 246 ems_mode = (ems >> 10) & 0x3;
241 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
242 247
243 if (mci == NULL) { 248 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
249 layers[0].size = AMD76X_NR_CSROWS;
250 layers[0].is_virt_csrow = true;
251 layers[1].type = EDAC_MC_LAYER_CHANNEL;
252 layers[1].size = 1;
253 layers[1].is_virt_csrow = false;
254 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
255
256 if (mci == NULL)
244 return -ENOMEM; 257 return -ENOMEM;
245 }
246 258
247 debugf0("%s(): mci = %p\n", __func__, mci); 259 debugf0("%s(): mci = %p\n", __func__, mci);
248 mci->dev = &pdev->dev; 260 mci->dev = &pdev->dev;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 9a6a274e6925..69ee6aab5c71 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -48,8 +48,9 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
48 syndrome = (ar & 0x000000001fe00000ul) >> 21; 48 syndrome = (ar & 0x000000001fe00000ul) >> 21;
49 49
50 /* TODO: Decoding of the error address */ 50 /* TODO: Decoding of the error address */
51 edac_mc_handle_ce(mci, csrow->first_page + pfn, offset, 51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
52 syndrome, 0, chan, ""); 52 csrow->first_page + pfn, offset, syndrome,
53 0, chan, -1, "", "", NULL);
53} 54}
54 55
55static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) 56static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
69 offset = address & ~PAGE_MASK; 70 offset = address & ~PAGE_MASK;
70 71
71 /* TODO: Decoding of the error address */ 72 /* TODO: Decoding of the error address */
72 edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, ""); 73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
74 csrow->first_page + pfn, offset, 0,
75 0, chan, -1, "", "", NULL);
73} 76}
74 77
75static void cell_edac_check(struct mem_ctl_info *mci) 78static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@ static void cell_edac_check(struct mem_ctl_info *mci)
124static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) 127static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
125{ 128{
126 struct csrow_info *csrow = &mci->csrows[0]; 129 struct csrow_info *csrow = &mci->csrows[0];
130 struct dimm_info *dimm;
127 struct cell_edac_priv *priv = mci->pvt_info; 131 struct cell_edac_priv *priv = mci->pvt_info;
128 struct device_node *np; 132 struct device_node *np;
133 int j;
134 u32 nr_pages;
129 135
130 for (np = NULL; 136 for (np = NULL;
131 (np = of_find_node_by_name(np, "memory")) != NULL;) { 137 (np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
140 if (of_node_to_nid(np) != priv->node) 146 if (of_node_to_nid(np) != priv->node)
141 continue; 147 continue;
142 csrow->first_page = r.start >> PAGE_SHIFT; 148 csrow->first_page = r.start >> PAGE_SHIFT;
143 csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT; 149 nr_pages = resource_size(&r) >> PAGE_SHIFT;
144 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 150 csrow->last_page = csrow->first_page + nr_pages - 1;
145 csrow->mtype = MEM_XDR; 151
146 csrow->edac_mode = EDAC_SECDED; 152 for (j = 0; j < csrow->nr_channels; j++) {
153 dimm = csrow->channels[j].dimm;
154 dimm->mtype = MEM_XDR;
155 dimm->edac_mode = EDAC_SECDED;
156 dimm->nr_pages = nr_pages / csrow->nr_channels;
157 }
147 dev_dbg(mci->dev, 158 dev_dbg(mci->dev,
148 "Initialized on node %d, chanmask=0x%x," 159 "Initialized on node %d, chanmask=0x%x,"
149 " first_page=0x%lx, nr_pages=0x%x\n", 160 " first_page=0x%lx, nr_pages=0x%x\n",
150 priv->node, priv->chanmask, 161 priv->node, priv->chanmask,
151 csrow->first_page, csrow->nr_pages); 162 csrow->first_page, nr_pages);
152 break; 163 break;
153 } 164 }
154} 165}
@@ -157,9 +168,10 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
157{ 168{
158 struct cbe_mic_tm_regs __iomem *regs; 169 struct cbe_mic_tm_regs __iomem *regs;
159 struct mem_ctl_info *mci; 170 struct mem_ctl_info *mci;
171 struct edac_mc_layer layers[2];
160 struct cell_edac_priv *priv; 172 struct cell_edac_priv *priv;
161 u64 reg; 173 u64 reg;
162 int rc, chanmask; 174 int rc, chanmask, num_chans;
163 175
164 regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id)); 176 regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
165 if (regs == NULL) 177 if (regs == NULL)
@@ -184,8 +196,16 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
184 in_be64(&regs->mic_fir)); 196 in_be64(&regs->mic_fir));
185 197
186 /* Allocate & init EDAC MC data structure */ 198 /* Allocate & init EDAC MC data structure */
187 mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1, 199 num_chans = chanmask == 3 ? 2 : 1;
188 chanmask == 3 ? 2 : 1, pdev->id); 200
201 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
202 layers[0].size = 1;
203 layers[0].is_virt_csrow = true;
204 layers[1].type = EDAC_MC_LAYER_CHANNEL;
205 layers[1].size = num_chans;
206 layers[1].is_virt_csrow = false;
207 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
208 sizeof(struct cell_edac_priv));
189 if (mci == NULL) 209 if (mci == NULL)
190 return -ENOMEM; 210 return -ENOMEM;
191 priv = mci->pvt_info; 211 priv = mci->pvt_info;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a774c0ddaf5b..e22030a9de66 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -329,9 +329,10 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
329{ 329{
330 struct cpc925_mc_pdata *pdata = mci->pvt_info; 330 struct cpc925_mc_pdata *pdata = mci->pvt_info;
331 struct csrow_info *csrow; 331 struct csrow_info *csrow;
332 int index; 332 struct dimm_info *dimm;
333 int index, j;
333 u32 mbmr, mbbar, bba; 334 u32 mbmr, mbbar, bba;
334 unsigned long row_size, last_nr_pages = 0; 335 unsigned long row_size, nr_pages, last_nr_pages = 0;
335 336
336 get_total_mem(pdata); 337 get_total_mem(pdata);
337 338
@@ -350,36 +351,41 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
350 351
351 row_size = bba * (1UL << 28); /* 256M */ 352 row_size = bba * (1UL << 28); /* 256M */
352 csrow->first_page = last_nr_pages; 353 csrow->first_page = last_nr_pages;
353 csrow->nr_pages = row_size >> PAGE_SHIFT; 354 nr_pages = row_size >> PAGE_SHIFT;
354 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 355 csrow->last_page = csrow->first_page + nr_pages - 1;
355 last_nr_pages = csrow->last_page + 1; 356 last_nr_pages = csrow->last_page + 1;
356 357
357 csrow->mtype = MEM_RDDR; 358 for (j = 0; j < csrow->nr_channels; j++) {
358 csrow->edac_mode = EDAC_SECDED; 359 dimm = csrow->channels[j].dimm;
359 360
360 switch (csrow->nr_channels) { 361 dimm->nr_pages = nr_pages / csrow->nr_channels;
361 case 1: /* Single channel */ 362 dimm->mtype = MEM_RDDR;
362 csrow->grain = 32; /* four-beat burst of 32 bytes */ 363 dimm->edac_mode = EDAC_SECDED;
363 break; 364
364 case 2: /* Dual channel */ 365 switch (csrow->nr_channels) {
365 default: 366 case 1: /* Single channel */
366 csrow->grain = 64; /* four-beat burst of 64 bytes */ 367 dimm->grain = 32; /* four-beat burst of 32 bytes */
367 break; 368 break;
368 } 369 case 2: /* Dual channel */
369 370 default:
370 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { 371 dimm->grain = 64; /* four-beat burst of 64 bytes */
371 case 6: /* 0110, no way to differentiate X8 VS X16 */ 372 break;
372 case 5: /* 0101 */ 373 }
373 case 8: /* 1000 */ 374
374 csrow->dtype = DEV_X16; 375 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
375 break; 376 case 6: /* 0110, no way to differentiate X8 VS X16 */
376 case 7: /* 0111 */ 377 case 5: /* 0101 */
377 case 9: /* 1001 */ 378 case 8: /* 1000 */
378 csrow->dtype = DEV_X8; 379 dimm->dtype = DEV_X16;
379 break; 380 break;
380 default: 381 case 7: /* 0111 */
381 csrow->dtype = DEV_UNKNOWN; 382 case 9: /* 1001 */
382 break; 383 dimm->dtype = DEV_X8;
384 break;
385 default:
386 dimm->dtype = DEV_UNKNOWN;
387 break;
388 }
383 } 389 }
384 } 390 }
385} 391}
@@ -549,13 +555,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
549 if (apiexcp & CECC_EXCP_DETECTED) { 555 if (apiexcp & CECC_EXCP_DETECTED) {
550 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); 556 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
551 channel = cpc925_mc_find_channel(mci, syndrome); 557 channel = cpc925_mc_find_channel(mci, syndrome);
552 edac_mc_handle_ce(mci, pfn, offset, syndrome, 558 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
553 csrow, channel, mci->ctl_name); 559 pfn, offset, syndrome,
560 csrow, channel, -1,
561 mci->ctl_name, "", NULL);
554 } 562 }
555 563
556 if (apiexcp & UECC_EXCP_DETECTED) { 564 if (apiexcp & UECC_EXCP_DETECTED) {
557 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 565 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
558 edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name); 566 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
567 pfn, offset, 0,
568 csrow, -1, -1,
569 mci->ctl_name, "", NULL);
559 } 570 }
560 571
561 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); 572 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
927{ 938{
928 static int edac_mc_idx; 939 static int edac_mc_idx;
929 struct mem_ctl_info *mci; 940 struct mem_ctl_info *mci;
941 struct edac_mc_layer layers[2];
930 void __iomem *vbase; 942 void __iomem *vbase;
931 struct cpc925_mc_pdata *pdata; 943 struct cpc925_mc_pdata *pdata;
932 struct resource *r; 944 struct resource *r;
@@ -962,9 +974,16 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
962 goto err2; 974 goto err2;
963 } 975 }
964 976
965 nr_channels = cpc925_mc_get_channels(vbase); 977 nr_channels = cpc925_mc_get_channels(vbase) + 1;
966 mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata), 978
967 CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx); 979 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
980 layers[0].size = CPC925_NR_CSROWS;
981 layers[0].is_virt_csrow = true;
982 layers[1].type = EDAC_MC_LAYER_CHANNEL;
983 layers[1].size = nr_channels;
984 layers[1].is_virt_csrow = false;
985 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
986 sizeof(struct cpc925_mc_pdata));
968 if (!mci) { 987 if (!mci) {
969 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); 988 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
970 res = -ENOMEM; 989 res = -ENOMEM;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 41223261ede9..3186512c9739 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -4,7 +4,11 @@
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * See "enum e752x_chips" below for supported chipsets 7 * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
8 *
9 * Datasheets:
10 * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
11 * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
8 * 12 *
9 * Written by Tom Zimmerman 13 * Written by Tom Zimmerman
10 * 14 *
@@ -13,8 +17,6 @@
13 * Wang Zhenyu at intel.com 17 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com 18 * Dave Jiang at mvista.com
15 * 19 *
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
17 *
18 */ 20 */
19 21
20#include <linux/module.h> 22#include <linux/module.h>
@@ -187,6 +189,25 @@ enum e752x_chips {
187 I3100 = 3 189 I3100 = 3
188}; 190};
189 191
192/*
193 * Those chips Support single-rank and dual-rank memories only.
194 *
195 * On e752x chips, the odd rows are present only on dual-rank memories.
196 * Dividing the rank by two will provide the dimm#
197 *
198 * i3100 MC has a different mapping: it supports only 4 ranks.
199 *
200 * The mapping is (from 1 to n):
201 * slot single-ranked double-ranked
202 * dimm #1 -> rank #4 NA
203 * dimm #2 -> rank #3 NA
204 * dimm #3 -> rank #2 Ranks 2 and 3
205 * dimm #4 -> rank $1 Ranks 1 and 4
206 *
207 * FIXME: The current mapping for i3100 considers that it supports up to 8
208 * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
209 */
210
190struct e752x_pvt { 211struct e752x_pvt {
191 struct pci_dev *bridge_ck; 212 struct pci_dev *bridge_ck;
192 struct pci_dev *dev_d0f0; 213 struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
350 channel = !(error_one & 1); 371 channel = !(error_one & 1);
351 372
352 /* e752x mc reads 34:6 of the DRAM linear address */ 373 /* e752x mc reads 34:6 of the DRAM linear address */
353 edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4), 374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
354 sec1_syndrome, row, channel, "e752x CE"); 375 page, offset_in_page(sec1_add << 4), sec1_syndrome,
376 row, channel, -1,
377 "e752x CE", "", NULL);
355} 378}
356 379
357static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 380static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
385 edac_mc_find_csrow_by_page(mci, block_page); 408 edac_mc_find_csrow_by_page(mci, block_page);
386 409
387 /* e752x mc reads 34:6 of the DRAM linear address */ 410 /* e752x mc reads 34:6 of the DRAM linear address */
388 edac_mc_handle_ue(mci, block_page, 411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
389 offset_in_page(error_2b << 4), 412 block_page,
390 row, "e752x UE from Read"); 413 offset_in_page(error_2b << 4), 0,
414 row, -1, -1,
415 "e752x UE from Read", "", NULL);
416
391 } 417 }
392 if (error_one & 0x0404) { 418 if (error_one & 0x0404) {
393 error_2b = scrb_add; 419 error_2b = scrb_add;
@@ -401,9 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
401 edac_mc_find_csrow_by_page(mci, block_page); 427 edac_mc_find_csrow_by_page(mci, block_page);
402 428
403 /* e752x mc reads 34:6 of the DRAM linear address */ 429 /* e752x mc reads 34:6 of the DRAM linear address */
404 edac_mc_handle_ue(mci, block_page, 430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
405 offset_in_page(error_2b << 4), 431 block_page,
406 row, "e752x UE from Scruber"); 432 offset_in_page(error_2b << 4), 0,
433 row, -1, -1,
434 "e752x UE from Scruber", "", NULL);
407 } 435 }
408} 436}
409 437
@@ -426,7 +454,9 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
426 return; 454 return;
427 455
428 debugf3("%s()\n", __func__); 456 debugf3("%s()\n", __func__);
429 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); 457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
458 -1, -1, -1,
459 "e752x UE log memory write", "", NULL);
430} 460}
431 461
432static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, 462static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1044 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ 1074 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
1045 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ 1075 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
1046 u8 value; 1076 u8 value;
1047 u32 dra, drc, cumul_size; 1077 u32 dra, drc, cumul_size, i, nr_pages;
1048 1078
1049 dra = 0; 1079 dra = 0;
1050 for (index = 0; index < 4; index++) { 1080 for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1053 dra |= dra_reg << (index * 8); 1083 dra |= dra_reg << (index * 8);
1054 } 1084 }
1055 pci_read_config_dword(pdev, E752X_DRC, &drc); 1085 pci_read_config_dword(pdev, E752X_DRC, &drc);
1056 drc_chan = dual_channel_active(ddrcsr); 1086 drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
1057 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ 1087 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
1058 drc_ddim = (drc >> 20) & 0x3; 1088 drc_ddim = (drc >> 20) & 0x3;
1059 1089
@@ -1078,26 +1108,33 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1078 1108
1079 csrow->first_page = last_cumul_size; 1109 csrow->first_page = last_cumul_size;
1080 csrow->last_page = cumul_size - 1; 1110 csrow->last_page = cumul_size - 1;
1081 csrow->nr_pages = cumul_size - last_cumul_size; 1111 nr_pages = cumul_size - last_cumul_size;
1082 last_cumul_size = cumul_size; 1112 last_cumul_size = cumul_size;
1083 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 1113
1084 csrow->mtype = MEM_RDDR; /* only one type supported */ 1114 for (i = 0; i < csrow->nr_channels; i++) {
1085 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 1115 struct dimm_info *dimm = csrow->channels[i].dimm;
1086 1116
1087 /* 1117 debugf3("Initializing rank at (%i,%i)\n", index, i);
1088 * if single channel or x8 devices then SECDED 1118 dimm->nr_pages = nr_pages / csrow->nr_channels;
1089 * if dual channel and x4 then S4ECD4ED 1119 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1090 */ 1120 dimm->mtype = MEM_RDDR; /* only one type supported */
1091 if (drc_ddim) { 1121 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1092 if (drc_chan && mem_dev) { 1122
1093 csrow->edac_mode = EDAC_S4ECD4ED; 1123 /*
1094 mci->edac_cap |= EDAC_FLAG_S4ECD4ED; 1124 * if single channel or x8 devices then SECDED
1095 } else { 1125 * if dual channel and x4 then S4ECD4ED
1096 csrow->edac_mode = EDAC_SECDED; 1126 */
1097 mci->edac_cap |= EDAC_FLAG_SECDED; 1127 if (drc_ddim) {
1098 } 1128 if (drc_chan && mem_dev) {
1099 } else 1129 dimm->edac_mode = EDAC_S4ECD4ED;
1100 csrow->edac_mode = EDAC_NONE; 1130 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1131 } else {
1132 dimm->edac_mode = EDAC_SECDED;
1133 mci->edac_cap |= EDAC_FLAG_SECDED;
1134 }
1135 } else
1136 dimm->edac_mode = EDAC_NONE;
1137 }
1101 } 1138 }
1102} 1139}
1103 1140
@@ -1226,6 +1263,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1226 u16 pci_data; 1263 u16 pci_data;
1227 u8 stat8; 1264 u8 stat8;
1228 struct mem_ctl_info *mci; 1265 struct mem_ctl_info *mci;
1266 struct edac_mc_layer layers[2];
1229 struct e752x_pvt *pvt; 1267 struct e752x_pvt *pvt;
1230 u16 ddrcsr; 1268 u16 ddrcsr;
1231 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 1269 int drc_chan; /* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1252 /* Dual channel = 1, Single channel = 0 */ 1290 /* Dual channel = 1, Single channel = 0 */
1253 drc_chan = dual_channel_active(ddrcsr); 1291 drc_chan = dual_channel_active(ddrcsr);
1254 1292
1255 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0); 1293 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1256 1294 layers[0].size = E752X_NR_CSROWS;
1257 if (mci == NULL) { 1295 layers[0].is_virt_csrow = true;
1296 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1297 layers[1].size = drc_chan + 1;
1298 layers[1].is_virt_csrow = false;
1299 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1300 if (mci == NULL)
1258 return -ENOMEM; 1301 return -ENOMEM;
1259 }
1260 1302
1261 debugf3("%s(): init mci\n", __func__); 1303 debugf3("%s(): init mci\n", __func__);
1262 mci->mtype_cap = MEM_FLAG_RDDR; 1304 mci->mtype_cap = MEM_FLAG_RDDR;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 68dea87b72e6..9a9c1a546797 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -10,6 +10,9 @@
10 * Based on work by Dan Hollis <goemon at anime dot net> and others. 10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
11 * http://www.anime.net/~goemon/linux-ecc/ 11 * http://www.anime.net/~goemon/linux-ecc/
12 * 12 *
13 * Datasheet:
14 * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
15 *
13 * Contributors: 16 * Contributors:
14 * Eric Biederman (Linux Networx) 17 * Eric Biederman (Linux Networx)
15 * Tom Zimmerman (Linux Networx) 18 * Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
71#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ 74#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
72 75
73#define E7XXX_NR_CSROWS 8 /* number of csrows */ 76#define E7XXX_NR_CSROWS 8 /* number of csrows */
74#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ 77#define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
75 78
76/* E7XXX register addresses - device 0 function 0 */ 79/* E7XXX register addresses - device 0 function 0 */
77#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ 80#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
216 row = edac_mc_find_csrow_by_page(mci, page); 219 row = edac_mc_find_csrow_by_page(mci, page);
217 /* convert syndrome to channel */ 220 /* convert syndrome to channel */
218 channel = e7xxx_find_channel(syndrome); 221 channel = e7xxx_find_channel(syndrome);
219 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); 222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
223 row, channel, -1, "e7xxx CE", "", NULL);
220} 224}
221 225
222static void process_ce_no_info(struct mem_ctl_info *mci) 226static void process_ce_no_info(struct mem_ctl_info *mci)
223{ 227{
224 debugf3("%s()\n", __func__); 228 debugf3("%s()\n", __func__);
225 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
230 "e7xxx CE log register overflow", "", NULL);
226} 231}
227 232
228static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
236 /* FIXME - should use PAGE_SHIFT */ 241 /* FIXME - should use PAGE_SHIFT */
237 block_page = error_2b >> 6; /* convert to 4k address */ 242 block_page = error_2b >> 6; /* convert to 4k address */
238 row = edac_mc_find_csrow_by_page(mci, block_page); 243 row = edac_mc_find_csrow_by_page(mci, block_page);
239 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); 244
245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
246 row, -1, -1, "e7xxx UE", "", NULL);
240} 247}
241 248
242static void process_ue_no_info(struct mem_ctl_info *mci) 249static void process_ue_no_info(struct mem_ctl_info *mci)
243{ 250{
244 debugf3("%s()\n", __func__); 251 debugf3("%s()\n", __func__);
245 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); 252
253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
254 "e7xxx UE log register overflow", "", NULL);
246} 255}
247 256
248static void e7xxx_get_error_info(struct mem_ctl_info *mci, 257static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
347 int dev_idx, u32 drc) 356 int dev_idx, u32 drc)
348{ 357{
349 unsigned long last_cumul_size; 358 unsigned long last_cumul_size;
350 int index; 359 int index, j;
351 u8 value; 360 u8 value;
352 u32 dra, cumul_size; 361 u32 dra, cumul_size, nr_pages;
353 int drc_chan, drc_drbg, drc_ddim, mem_dev; 362 int drc_chan, drc_drbg, drc_ddim, mem_dev;
354 struct csrow_info *csrow; 363 struct csrow_info *csrow;
364 struct dimm_info *dimm;
355 365
356 pci_read_config_dword(pdev, E7XXX_DRA, &dra); 366 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
357 drc_chan = dual_channel_active(drc, dev_idx); 367 drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
379 389
380 csrow->first_page = last_cumul_size; 390 csrow->first_page = last_cumul_size;
381 csrow->last_page = cumul_size - 1; 391 csrow->last_page = cumul_size - 1;
382 csrow->nr_pages = cumul_size - last_cumul_size; 392 nr_pages = cumul_size - last_cumul_size;
383 last_cumul_size = cumul_size; 393 last_cumul_size = cumul_size;
384 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 394
385 csrow->mtype = MEM_RDDR; /* only one type supported */ 395 for (j = 0; j < drc_chan + 1; j++) {
386 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 396 dimm = csrow->channels[j].dimm;
387 397
388 /* 398 dimm->nr_pages = nr_pages / (drc_chan + 1);
389 * if single channel or x8 devices then SECDED 399 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
390 * if dual channel and x4 then S4ECD4ED 400 dimm->mtype = MEM_RDDR; /* only one type supported */
391 */ 401 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
392 if (drc_ddim) { 402
393 if (drc_chan && mem_dev) { 403 /*
394 csrow->edac_mode = EDAC_S4ECD4ED; 404 * if single channel or x8 devices then SECDED
395 mci->edac_cap |= EDAC_FLAG_S4ECD4ED; 405 * if dual channel and x4 then S4ECD4ED
396 } else { 406 */
397 csrow->edac_mode = EDAC_SECDED; 407 if (drc_ddim) {
398 mci->edac_cap |= EDAC_FLAG_SECDED; 408 if (drc_chan && mem_dev) {
399 } 409 dimm->edac_mode = EDAC_S4ECD4ED;
400 } else 410 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
401 csrow->edac_mode = EDAC_NONE; 411 } else {
412 dimm->edac_mode = EDAC_SECDED;
413 mci->edac_cap |= EDAC_FLAG_SECDED;
414 }
415 } else
416 dimm->edac_mode = EDAC_NONE;
417 }
402 } 418 }
403} 419}
404 420
@@ -406,6 +422,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
406{ 422{
407 u16 pci_data; 423 u16 pci_data;
408 struct mem_ctl_info *mci = NULL; 424 struct mem_ctl_info *mci = NULL;
425 struct edac_mc_layer layers[2];
409 struct e7xxx_pvt *pvt = NULL; 426 struct e7xxx_pvt *pvt = NULL;
410 u32 drc; 427 u32 drc;
411 int drc_chan; 428 int drc_chan;
@@ -416,8 +433,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
416 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 433 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
417 434
418 drc_chan = dual_channel_active(drc, dev_idx); 435 drc_chan = dual_channel_active(drc, dev_idx);
419 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0); 436 /*
420 437 * According with the datasheet, this device has a maximum of
438 * 4 DIMMS per channel, either single-rank or dual-rank. So, the
439 * total amount of dimms is 8 (E7XXX_NR_DIMMS).
440 * That means that the DIMM is mapped as CSROWs, and the channel
441 * will map the rank. So, an error to either channel should be
442 * attributed to the same dimm.
443 */
444 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
445 layers[0].size = E7XXX_NR_CSROWS;
446 layers[0].is_virt_csrow = true;
447 layers[1].type = EDAC_MC_LAYER_CHANNEL;
448 layers[1].size = drc_chan + 1;
449 layers[1].is_virt_csrow = false;
450 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
421 if (mci == NULL) 451 if (mci == NULL)
422 return -ENOMEM; 452 return -ENOMEM;
423 453
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 5b739411d62f..117490d4f835 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -447,8 +447,10 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
447 447
448#endif /* CONFIG_PCI */ 448#endif /* CONFIG_PCI */
449 449
450extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 450struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
451 unsigned nr_chans, int edac_index); 451 unsigned n_layers,
452 struct edac_mc_layer *layers,
453 unsigned sz_pvt);
452extern int edac_mc_add_mc(struct mem_ctl_info *mci); 454extern int edac_mc_add_mc(struct mem_ctl_info *mci);
453extern void edac_mc_free(struct mem_ctl_info *mci); 455extern void edac_mc_free(struct mem_ctl_info *mci);
454extern struct mem_ctl_info *edac_mc_find(int idx); 456extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
456extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); 458extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
457extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, 459extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
458 unsigned long page); 460 unsigned long page);
459 461void edac_mc_handle_error(const enum hw_event_mc_err_type type,
460/* 462 struct mem_ctl_info *mci,
461 * The no info errors are used when error overflows are reported. 463 const unsigned long page_frame_number,
462 * There are a limited number of error logging registers that can 464 const unsigned long offset_in_page,
463 * be exausted. When all registers are exhausted and an additional 465 const unsigned long syndrome,
464 * error occurs then an error overflow register records that an 466 const int layer0,
465 * error occurred and the type of error, but doesn't have any 467 const int layer1,
466 * further information. The ce/ue versions make for cleaner 468 const int layer2,
467 * reporting logic and function interface - reduces conditional 469 const char *msg,
468 * statement clutter and extra function arguments. 470 const char *other_detail,
469 */ 471 const void *mcelog);
470extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
471 unsigned long page_frame_number,
472 unsigned long offset_in_page,
473 unsigned long syndrome, int row, int channel,
474 const char *msg);
475extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
476 const char *msg);
477extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
478 unsigned long page_frame_number,
479 unsigned long offset_in_page, int row,
480 const char *msg);
481extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
482 const char *msg);
483extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
484 unsigned int channel0, unsigned int channel1,
485 char *msg);
486extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
487 unsigned int channel, char *msg);
488 472
489/* 473/*
490 * edac_device APIs 474 * edac_device APIs
@@ -496,6 +480,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
496extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, 480extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
497 int inst_nr, int block_nr, const char *msg); 481 int inst_nr, int block_nr, const char *msg);
498extern int edac_device_alloc_index(void); 482extern int edac_device_alloc_index(void);
483extern const char *edac_layer_name[];
499 484
500/* 485/*
501 * edac_pci APIs 486 * edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 45b8f4bdd773..ee3f1f810c1e 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -79,7 +79,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
79 unsigned total_size; 79 unsigned total_size;
80 unsigned count; 80 unsigned count;
81 unsigned instance, block, attr; 81 unsigned instance, block, attr;
82 void *pvt; 82 void *pvt, *p;
83 int err; 83 int err;
84 84
85 debugf4("%s() instances=%d blocks=%d\n", 85 debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
92 * to be at least as stringent as what the compiler would 92 * to be at least as stringent as what the compiler would
93 * provide if we could simply hardcode everything into a single struct. 93 * provide if we could simply hardcode everything into a single struct.
94 */ 94 */
95 dev_ctl = (struct edac_device_ctl_info *)NULL; 95 p = NULL;
96 dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
96 97
97 /* Calc the 'end' offset past end of ONE ctl_info structure 98 /* Calc the 'end' offset past end of ONE ctl_info structure
98 * which will become the start of the 'instance' array 99 * which will become the start of the 'instance' array
99 */ 100 */
100 dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst)); 101 dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
101 102
102 /* Calc the 'end' offset past the instance array within the ctl_info 103 /* Calc the 'end' offset past the instance array within the ctl_info
103 * which will become the start of the block array 104 * which will become the start of the block array
104 */ 105 */
105 dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk)); 106 count = nr_instances * nr_blocks;
107 dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
106 108
107 /* Calc the 'end' offset past the dev_blk array 109 /* Calc the 'end' offset past the dev_blk array
108 * which will become the start of the attrib array, if any. 110 * which will become the start of the attrib array, if any.
109 */ 111 */
110 count = nr_instances * nr_blocks; 112 /* calc how many nr_attrib we need */
111 dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib)); 113 if (nr_attrib > 0)
112
113 /* Check for case of when an attribute array is specified */
114 if (nr_attrib > 0) {
115 /* calc how many nr_attrib we need */
116 count *= nr_attrib; 114 count *= nr_attrib;
115 dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
117 116
118 /* Calc the 'end' offset past the attributes array */ 117 /* Calc the 'end' offset past the attributes array */
119 pvt = edac_align_ptr(&dev_attrib[count], sz_private); 118 pvt = edac_align_ptr(&p, sz_private, 1);
120 } else {
121 /* no attribute array specified */
122 pvt = edac_align_ptr(dev_attrib, sz_private);
123 }
124 119
125 /* 'pvt' now points to where the private data area is. 120 /* 'pvt' now points to where the private data area is.
126 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib) 121 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index feef7733fae7..10f375032e96 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -43,9 +43,26 @@ static void edac_mc_dump_channel(struct rank_info *chan)
43{ 43{
44 debugf4("\tchannel = %p\n", chan); 44 debugf4("\tchannel = %p\n", chan);
45 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); 45 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
46 debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
47 debugf4("\tchannel->label = '%s'\n", chan->label);
48 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 46 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
47 debugf4("\tchannel->dimm = %p\n", chan->dimm);
48}
49
50static void edac_mc_dump_dimm(struct dimm_info *dimm)
51{
52 int i;
53
54 debugf4("\tdimm = %p\n", dimm);
55 debugf4("\tdimm->label = '%s'\n", dimm->label);
56 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
57 debugf4("\tdimm location ");
58 for (i = 0; i < dimm->mci->n_layers; i++) {
59 printk(KERN_CONT "%d", dimm->location[i]);
60 if (i < dimm->mci->n_layers - 1)
61 printk(KERN_CONT ".");
62 }
63 printk(KERN_CONT "\n");
64 debugf4("\tdimm->grain = %d\n", dimm->grain);
65 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
49} 66}
50 67
51static void edac_mc_dump_csrow(struct csrow_info *csrow) 68static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@ static void edac_mc_dump_csrow(struct csrow_info *csrow)
55 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); 72 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
56 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); 73 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
57 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); 74 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
58 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
59 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); 75 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
60 debugf4("\tcsrow->channels = %p\n", csrow->channels); 76 debugf4("\tcsrow->channels = %p\n", csrow->channels);
61 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 77 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
70 debugf4("\tmci->edac_check = %p\n", mci->edac_check); 86 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
71 debugf3("\tmci->nr_csrows = %d, csrows = %p\n", 87 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
72 mci->nr_csrows, mci->csrows); 88 mci->nr_csrows, mci->csrows);
89 debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
90 mci->tot_dimms, mci->dimms);
73 debugf3("\tdev = %p\n", mci->dev); 91 debugf3("\tdev = %p\n", mci->dev);
74 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); 92 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
75 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 93 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@ const char *edac_mem_types[] = {
101}; 119};
102EXPORT_SYMBOL_GPL(edac_mem_types); 120EXPORT_SYMBOL_GPL(edac_mem_types);
103 121
104/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. 122/**
105 * Adjust 'ptr' so that its alignment is at least as stringent as what the 123 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
106 * compiler would provide for X and return the aligned result. 124 * @p: pointer to a pointer with the memory offset to be used. At
125 * return, this will be incremented to point to the next offset
126 * @size: Size of the data structure to be reserved
127 * @n_elems: Number of elements that should be reserved
107 * 128 *
108 * If 'size' is a constant, the compiler will optimize this whole function 129 * If 'size' is a constant, the compiler will optimize this whole function
109 * down to either a no-op or the addition of a constant to the value of 'ptr'. 130 * down to either a no-op or the addition of a constant to the value of '*p'.
131 *
132 * The 'p' pointer is absolutely needed to keep the proper advancing
133 * further in memory to the proper offsets when allocating the struct along
134 * with its embedded structs, as edac_device_alloc_ctl_info() does it
135 * above, for example.
136 *
137 * At return, the pointer 'p' will be incremented to be used on a next call
138 * to this function.
110 */ 139 */
111void *edac_align_ptr(void *ptr, unsigned size) 140void *edac_align_ptr(void **p, unsigned size, int n_elems)
112{ 141{
113 unsigned align, r; 142 unsigned align, r;
143 void *ptr = *p;
144
145 *p += size * n_elems;
114 146
115 /* Here we assume that the alignment of a "long long" is the most 147 /*
148 * 'p' can possibly be an unaligned item X such that sizeof(X) is
149 * 'size'. Adjust 'p' so that its alignment is at least as
150 * stringent as what the compiler would provide for X and return
151 * the aligned result.
152 * Here we assume that the alignment of a "long long" is the most
116 * stringent alignment that the compiler will ever provide by default. 153 * stringent alignment that the compiler will ever provide by default.
117 * As far as I know, this is a reasonable assumption. 154 * As far as I know, this is a reasonable assumption.
118 */ 155 */
@@ -132,14 +169,18 @@ void *edac_align_ptr(void *ptr, unsigned size)
132 if (r == 0) 169 if (r == 0)
133 return (char *)ptr; 170 return (char *)ptr;
134 171
172 *p += align - r;
173
135 return (void *)(((unsigned long)ptr) + align - r); 174 return (void *)(((unsigned long)ptr) + align - r);
136} 175}
137 176
138/** 177/**
139 * edac_mc_alloc: Allocate a struct mem_ctl_info structure 178 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
140 * @size_pvt: size of private storage needed 179 * @mc_num: Memory controller number
141 * @nr_csrows: Number of CWROWS needed for this MC 180 * @n_layers: Number of MC hierarchy layers
142 * @nr_chans: Number of channels for the MC 181 * layers: Describes each layer as seen by the Memory Controller
182 * @size_pvt: size of private storage needed
183 *
143 * 184 *
144 * Everything is kmalloc'ed as one big chunk - more efficient. 185 * Everything is kmalloc'ed as one big chunk - more efficient.
145 * Only can be used if all structures have the same lifetime - otherwise 186 * Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@ void *edac_align_ptr(void *ptr, unsigned size)
147 * 188 *
148 * Use edac_mc_free() to free mc structures allocated by this function. 189 * Use edac_mc_free() to free mc structures allocated by this function.
149 * 190 *
191 * NOTE: drivers handle multi-rank memories in different ways: in some
192 * drivers, one multi-rank memory stick is mapped as one entry, while, in
193 * others, a single multi-rank memory stick would be mapped into several
194 * entries. Currently, this function will allocate multiple struct dimm_info
195 * on such scenarios, as grouping the multiple ranks require drivers change.
196 *
150 * Returns: 197 * Returns:
151 * NULL allocation failed 198 * On failure: NULL
152 * struct mem_ctl_info pointer 199 * On success: struct mem_ctl_info pointer
153 */ 200 */
154struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 201struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
155 unsigned nr_chans, int edac_index) 202 unsigned n_layers,
203 struct edac_mc_layer *layers,
204 unsigned sz_pvt)
156{ 205{
157 struct mem_ctl_info *mci; 206 struct mem_ctl_info *mci;
158 struct csrow_info *csi, *csrow; 207 struct edac_mc_layer *layer;
208 struct csrow_info *csi, *csr;
159 struct rank_info *chi, *chp, *chan; 209 struct rank_info *chi, *chp, *chan;
160 void *pvt; 210 struct dimm_info *dimm;
161 unsigned size; 211 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
162 int row, chn; 212 unsigned pos[EDAC_MAX_LAYERS];
163 int err; 213 unsigned size, tot_dimms = 1, count = 1;
214 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
215 void *pvt, *p, *ptr = NULL;
216 int i, j, err, row, chn, n, len;
217 bool per_rank = false;
218
219 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
220 /*
221 * Calculate the total amount of dimms and csrows/cschannels while
222 * in the old API emulation mode
223 */
224 for (i = 0; i < n_layers; i++) {
225 tot_dimms *= layers[i].size;
226 if (layers[i].is_virt_csrow)
227 tot_csrows *= layers[i].size;
228 else
229 tot_channels *= layers[i].size;
230
231 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
232 per_rank = true;
233 }
164 234
165 /* Figure out the offsets of the various items from the start of an mc 235 /* Figure out the offsets of the various items from the start of an mc
166 * structure. We want the alignment of each item to be at least as 236 * structure. We want the alignment of each item to be at least as
167 * stringent as what the compiler would provide if we could simply 237 * stringent as what the compiler would provide if we could simply
168 * hardcode everything into a single struct. 238 * hardcode everything into a single struct.
169 */ 239 */
170 mci = (struct mem_ctl_info *)0; 240 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
171 csi = edac_align_ptr(&mci[1], sizeof(*csi)); 241 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
172 chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi)); 242 csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
173 pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt); 243 chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
244 dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
245 for (i = 0; i < n_layers; i++) {
246 count *= layers[i].size;
247 debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
248 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
249 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
250 tot_errcount += 2 * count;
251 }
252
253 debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
254 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
174 size = ((unsigned long)pvt) + sz_pvt; 255 size = ((unsigned long)pvt) + sz_pvt;
175 256
257 debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
258 __func__, size,
259 tot_dimms,
260 per_rank ? "ranks" : "dimms",
261 tot_csrows * tot_channels);
176 mci = kzalloc(size, GFP_KERNEL); 262 mci = kzalloc(size, GFP_KERNEL);
177 if (mci == NULL) 263 if (mci == NULL)
178 return NULL; 264 return NULL;
@@ -180,28 +266,103 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
180 /* Adjust pointers so they point within the memory we just allocated 266 /* Adjust pointers so they point within the memory we just allocated
181 * rather than an imaginary chunk of memory located at address 0. 267 * rather than an imaginary chunk of memory located at address 0.
182 */ 268 */
269 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
183 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi)); 270 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
184 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi)); 271 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
272 dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
273 for (i = 0; i < n_layers; i++) {
274 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
275 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
276 }
185 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; 277 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
186 278
187 /* setup index and various internal pointers */ 279 /* setup index and various internal pointers */
188 mci->mc_idx = edac_index; 280 mci->mc_idx = mc_num;
189 mci->csrows = csi; 281 mci->csrows = csi;
282 mci->dimms = dimm;
283 mci->tot_dimms = tot_dimms;
190 mci->pvt_info = pvt; 284 mci->pvt_info = pvt;
191 mci->nr_csrows = nr_csrows; 285 mci->n_layers = n_layers;
192 286 mci->layers = layer;
193 for (row = 0; row < nr_csrows; row++) { 287 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
194 csrow = &csi[row]; 288 mci->nr_csrows = tot_csrows;
195 csrow->csrow_idx = row; 289 mci->num_cschannel = tot_channels;
196 csrow->mci = mci; 290 mci->mem_is_per_rank = per_rank;
197 csrow->nr_channels = nr_chans;
198 chp = &chi[row * nr_chans];
199 csrow->channels = chp;
200 291
201 for (chn = 0; chn < nr_chans; chn++) { 292 /*
293 * Fill the csrow struct
294 */
295 for (row = 0; row < tot_csrows; row++) {
296 csr = &csi[row];
297 csr->csrow_idx = row;
298 csr->mci = mci;
299 csr->nr_channels = tot_channels;
300 chp = &chi[row * tot_channels];
301 csr->channels = chp;
302
303 for (chn = 0; chn < tot_channels; chn++) {
202 chan = &chp[chn]; 304 chan = &chp[chn];
203 chan->chan_idx = chn; 305 chan->chan_idx = chn;
204 chan->csrow = csrow; 306 chan->csrow = csr;
307 }
308 }
309
310 /*
311 * Fill the dimm struct
312 */
313 memset(&pos, 0, sizeof(pos));
314 row = 0;
315 chn = 0;
316 debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
317 per_rank ? "ranks" : "dimms");
318 for (i = 0; i < tot_dimms; i++) {
319 chan = &csi[row].channels[chn];
320 dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
321 pos[0], pos[1], pos[2]);
322 dimm->mci = mci;
323
324 debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
325 i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
326 pos[0], pos[1], pos[2], row, chn);
327
328 /*
329 * Copy DIMM location and initialize it.
330 */
331 len = sizeof(dimm->label);
332 p = dimm->label;
333 n = snprintf(p, len, "mc#%u", mc_num);
334 p += n;
335 len -= n;
336 for (j = 0; j < n_layers; j++) {
337 n = snprintf(p, len, "%s#%u",
338 edac_layer_name[layers[j].type],
339 pos[j]);
340 p += n;
341 len -= n;
342 dimm->location[j] = pos[j];
343
344 if (len <= 0)
345 break;
346 }
347
348 /* Link it to the csrows old API data */
349 chan->dimm = dimm;
350 dimm->csrow = row;
351 dimm->cschannel = chn;
352
353 /* Increment csrow location */
354 row++;
355 if (row == tot_csrows) {
356 row = 0;
357 chn++;
358 }
359
360 /* Increment dimm location */
361 for (j = n_layers - 1; j >= 0; j--) {
362 pos[j]++;
363 if (pos[j] < layers[j].size)
364 break;
365 pos[j] = 0;
205 } 366 }
206 } 367 }
207 368
@@ -490,7 +651,6 @@ EXPORT_SYMBOL(edac_mc_find);
490 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and 651 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
491 * create sysfs entries associated with mci structure 652 * create sysfs entries associated with mci structure
492 * @mci: pointer to the mci structure to be added to the list 653 * @mci: pointer to the mci structure to be added to the list
493 * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
494 * 654 *
495 * Return: 655 * Return:
496 * 0 Success 656 * 0 Success
@@ -517,6 +677,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
517 edac_mc_dump_channel(&mci->csrows[i]. 677 edac_mc_dump_channel(&mci->csrows[i].
518 channels[j]); 678 channels[j]);
519 } 679 }
680 for (i = 0; i < mci->tot_dimms; i++)
681 edac_mc_dump_dimm(&mci->dimms[i]);
520 } 682 }
521#endif 683#endif
522 mutex_lock(&mem_ctls_mutex); 684 mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
636int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) 798int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
637{ 799{
638 struct csrow_info *csrows = mci->csrows; 800 struct csrow_info *csrows = mci->csrows;
639 int row, i; 801 int row, i, j, n;
640 802
641 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); 803 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
642 row = -1; 804 row = -1;
643 805
644 for (i = 0; i < mci->nr_csrows; i++) { 806 for (i = 0; i < mci->nr_csrows; i++) {
645 struct csrow_info *csrow = &csrows[i]; 807 struct csrow_info *csrow = &csrows[i];
646 808 n = 0;
647 if (csrow->nr_pages == 0) 809 for (j = 0; j < csrow->nr_channels; j++) {
810 struct dimm_info *dimm = csrow->channels[j].dimm;
811 n += dimm->nr_pages;
812 }
813 if (n == 0)
648 continue; 814 continue;
649 815
650 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " 816 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
670} 836}
671EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); 837EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
672 838
673/* FIXME - setable log (warning/emerg) levels */ 839const char *edac_layer_name[] = {
674/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ 840 [EDAC_MC_LAYER_BRANCH] = "branch",
675void edac_mc_handle_ce(struct mem_ctl_info *mci, 841 [EDAC_MC_LAYER_CHANNEL] = "channel",
676 unsigned long page_frame_number, 842 [EDAC_MC_LAYER_SLOT] = "slot",
677 unsigned long offset_in_page, unsigned long syndrome, 843 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
678 int row, int channel, const char *msg) 844};
679{ 845EXPORT_SYMBOL_GPL(edac_layer_name);
680 unsigned long remapped_page;
681 846
682 debugf3("MC%d: %s()\n", mci->mc_idx, __func__); 847static void edac_inc_ce_error(struct mem_ctl_info *mci,
848 bool enable_per_layer_report,
849 const int pos[EDAC_MAX_LAYERS])
850{
851 int i, index = 0;
683 852
684 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 853 mci->ce_mc++;
685 if (row >= mci->nr_csrows || row < 0) {
686 /* something is wrong */
687 edac_mc_printk(mci, KERN_ERR,
688 "INTERNAL ERROR: row out of range "
689 "(%d >= %d)\n", row, mci->nr_csrows);
690 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
691 return;
692 }
693 854
694 if (channel >= mci->csrows[row].nr_channels || channel < 0) { 855 if (!enable_per_layer_report) {
695 /* something is wrong */ 856 mci->ce_noinfo_count++;
696 edac_mc_printk(mci, KERN_ERR,
697 "INTERNAL ERROR: channel out of range "
698 "(%d >= %d)\n", channel,
699 mci->csrows[row].nr_channels);
700 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
701 return; 857 return;
702 } 858 }
703 859
704 if (edac_mc_get_log_ce()) 860 for (i = 0; i < mci->n_layers; i++) {
705 /* FIXME - put in DIMM location */ 861 if (pos[i] < 0)
706 edac_mc_printk(mci, KERN_WARNING, 862 break;
707 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome " 863 index += pos[i];
708 "0x%lx, row %d, channel %d, label \"%s\": %s\n", 864 mci->ce_per_layer[i][index]++;
709 page_frame_number, offset_in_page,
710 mci->csrows[row].grain, syndrome, row, channel,
711 mci->csrows[row].channels[channel].label, msg);
712
713 mci->ce_count++;
714 mci->csrows[row].ce_count++;
715 mci->csrows[row].channels[channel].ce_count++;
716
717 if (mci->scrub_mode & SCRUB_SW_SRC) {
718 /*
719 * Some MC's can remap memory so that it is still available
720 * at a different address when PCI devices map into memory.
721 * MC's that can't do this lose the memory where PCI devices
722 * are mapped. This mapping is MC dependent and so we call
723 * back into the MC driver for it to map the MC page to
724 * a physical (CPU) page which can then be mapped to a virtual
725 * page - which can then be scrubbed.
726 */
727 remapped_page = mci->ctl_page_to_phys ?
728 mci->ctl_page_to_phys(mci, page_frame_number) :
729 page_frame_number;
730 865
731 edac_mc_scrub_block(remapped_page, offset_in_page, 866 if (i < mci->n_layers - 1)
732 mci->csrows[row].grain); 867 index *= mci->layers[i + 1].size;
733 } 868 }
734} 869}
735EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
736 870
737void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) 871static void edac_inc_ue_error(struct mem_ctl_info *mci,
872 bool enable_per_layer_report,
873 const int pos[EDAC_MAX_LAYERS])
738{ 874{
739 if (edac_mc_get_log_ce()) 875 int i, index = 0;
740 edac_mc_printk(mci, KERN_WARNING,
741 "CE - no information available: %s\n", msg);
742 876
743 mci->ce_noinfo_count++; 877 mci->ue_mc++;
744 mci->ce_count++;
745}
746EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
747 878
748void edac_mc_handle_ue(struct mem_ctl_info *mci, 879 if (!enable_per_layer_report) {
749 unsigned long page_frame_number, 880 mci->ce_noinfo_count++;
750 unsigned long offset_in_page, int row, const char *msg)
751{
752 int len = EDAC_MC_LABEL_LEN * 4;
753 char labels[len + 1];
754 char *pos = labels;
755 int chan;
756 int chars;
757
758 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
759
760 /* FIXME - maybe make panic on INTERNAL ERROR an option */
761 if (row >= mci->nr_csrows || row < 0) {
762 /* something is wrong */
763 edac_mc_printk(mci, KERN_ERR,
764 "INTERNAL ERROR: row out of range "
765 "(%d >= %d)\n", row, mci->nr_csrows);
766 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
767 return; 881 return;
768 } 882 }
769 883
770 chars = snprintf(pos, len + 1, "%s", 884 for (i = 0; i < mci->n_layers; i++) {
771 mci->csrows[row].channels[0].label); 885 if (pos[i] < 0)
772 len -= chars; 886 break;
773 pos += chars; 887 index += pos[i];
888 mci->ue_per_layer[i][index]++;
774 889
775 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); 890 if (i < mci->n_layers - 1)
776 chan++) { 891 index *= mci->layers[i + 1].size;
777 chars = snprintf(pos, len + 1, ":%s",
778 mci->csrows[row].channels[chan].label);
779 len -= chars;
780 pos += chars;
781 } 892 }
893}
782 894
783 if (edac_mc_get_log_ue()) 895static void edac_ce_error(struct mem_ctl_info *mci,
784 edac_mc_printk(mci, KERN_EMERG, 896 const int pos[EDAC_MAX_LAYERS],
785 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, " 897 const char *msg,
786 "labels \"%s\": %s\n", page_frame_number, 898 const char *location,
787 offset_in_page, mci->csrows[row].grain, row, 899 const char *label,
788 labels, msg); 900 const char *detail,
901 const char *other_detail,
902 const bool enable_per_layer_report,
903 const unsigned long page_frame_number,
904 const unsigned long offset_in_page,
905 u32 grain)
906{
907 unsigned long remapped_page;
789 908
790 if (edac_mc_get_panic_on_ue()) 909 if (edac_mc_get_log_ce()) {
791 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, " 910 if (other_detail && *other_detail)
792 "row %d, labels \"%s\": %s\n", mci->mc_idx, 911 edac_mc_printk(mci, KERN_WARNING,
793 page_frame_number, offset_in_page, 912 "CE %s on %s (%s%s - %s)\n",
794 mci->csrows[row].grain, row, labels, msg); 913 msg, label, location,
914 detail, other_detail);
915 else
916 edac_mc_printk(mci, KERN_WARNING,
917 "CE %s on %s (%s%s)\n",
918 msg, label, location,
919 detail);
920 }
921 edac_inc_ce_error(mci, enable_per_layer_report, pos);
795 922
796 mci->ue_count++; 923 if (mci->scrub_mode & SCRUB_SW_SRC) {
797 mci->csrows[row].ue_count++; 924 /*
925 * Some memory controllers (called MCs below) can remap
926 * memory so that it is still available at a different
927 * address when PCI devices map into memory.
928 * MC's that can't do this, lose the memory where PCI
929 * devices are mapped. This mapping is MC-dependent
930 * and so we call back into the MC driver for it to
931 * map the MC page to a physical (CPU) page which can
932 * then be mapped to a virtual page - which can then
933 * be scrubbed.
934 */
935 remapped_page = mci->ctl_page_to_phys ?
936 mci->ctl_page_to_phys(mci, page_frame_number) :
937 page_frame_number;
938
939 edac_mc_scrub_block(remapped_page,
940 offset_in_page, grain);
941 }
798} 942}
799EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
800 943
801void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) 944static void edac_ue_error(struct mem_ctl_info *mci,
945 const int pos[EDAC_MAX_LAYERS],
946 const char *msg,
947 const char *location,
948 const char *label,
949 const char *detail,
950 const char *other_detail,
951 const bool enable_per_layer_report)
802{ 952{
803 if (edac_mc_get_panic_on_ue()) 953 if (edac_mc_get_log_ue()) {
804 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); 954 if (other_detail && *other_detail)
955 edac_mc_printk(mci, KERN_WARNING,
956 "UE %s on %s (%s%s - %s)\n",
957 msg, label, location, detail,
958 other_detail);
959 else
960 edac_mc_printk(mci, KERN_WARNING,
961 "UE %s on %s (%s%s)\n",
962 msg, label, location, detail);
963 }
805 964
806 if (edac_mc_get_log_ue()) 965 if (edac_mc_get_panic_on_ue()) {
807 edac_mc_printk(mci, KERN_WARNING, 966 if (other_detail && *other_detail)
808 "UE - no information available: %s\n", msg); 967 panic("UE %s on %s (%s%s - %s)\n",
809 mci->ue_noinfo_count++; 968 msg, label, location, detail, other_detail);
810 mci->ue_count++; 969 else
970 panic("UE %s on %s (%s%s)\n",
971 msg, label, location, detail);
972 }
973
974 edac_inc_ue_error(mci, enable_per_layer_report, pos);
811} 975}
812EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
813 976
814/************************************************************* 977#define OTHER_LABEL " or "
815 * On Fully Buffered DIMM modules, this help function is 978void edac_mc_handle_error(const enum hw_event_mc_err_type type,
816 * called to process UE events 979 struct mem_ctl_info *mci,
817 */ 980 const unsigned long page_frame_number,
818void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, 981 const unsigned long offset_in_page,
819 unsigned int csrow, 982 const unsigned long syndrome,
820 unsigned int channela, 983 const int layer0,
821 unsigned int channelb, char *msg) 984 const int layer1,
985 const int layer2,
986 const char *msg,
987 const char *other_detail,
988 const void *mcelog)
822{ 989{
823 int len = EDAC_MC_LABEL_LEN * 4; 990 /* FIXME: too much for stack: move it to some pre-alocated area */
824 char labels[len + 1]; 991 char detail[80], location[80];
825 char *pos = labels; 992 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
826 int chars; 993 char *p;
994 int row = -1, chan = -1;
995 int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
996 int i;
997 u32 grain;
998 bool enable_per_layer_report = false;
827 999
828 if (csrow >= mci->nr_csrows) { 1000 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
829 /* something is wrong */
830 edac_mc_printk(mci, KERN_ERR,
831 "INTERNAL ERROR: row out of range (%d >= %d)\n",
832 csrow, mci->nr_csrows);
833 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
834 return;
835 }
836 1001
837 if (channela >= mci->csrows[csrow].nr_channels) { 1002 /*
838 /* something is wrong */ 1003 * Check if the event report is consistent and if the memory
839 edac_mc_printk(mci, KERN_ERR, 1004 * location is known. If it is known, enable_per_layer_report will be
840 "INTERNAL ERROR: channel-a out of range " 1005 * true, the DIMM(s) label info will be filled and the per-layer
841 "(%d >= %d)\n", 1006 * error counters will be incremented.
842 channela, mci->csrows[csrow].nr_channels); 1007 */
843 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); 1008 for (i = 0; i < mci->n_layers; i++) {
844 return; 1009 if (pos[i] >= (int)mci->layers[i].size) {
1010 if (type == HW_EVENT_ERR_CORRECTED)
1011 p = "CE";
1012 else
1013 p = "UE";
1014
1015 edac_mc_printk(mci, KERN_ERR,
1016 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1017 edac_layer_name[mci->layers[i].type],
1018 pos[i], mci->layers[i].size);
1019 /*
1020 * Instead of just returning it, let's use what's
1021 * known about the error. The increment routines and
1022 * the DIMM filter logic will do the right thing by
1023 * pointing the likely damaged DIMMs.
1024 */
1025 pos[i] = -1;
1026 }
1027 if (pos[i] >= 0)
1028 enable_per_layer_report = true;
845 } 1029 }
846 1030
847 if (channelb >= mci->csrows[csrow].nr_channels) { 1031 /*
848 /* something is wrong */ 1032 * Get the dimm label/grain that applies to the match criteria.
849 edac_mc_printk(mci, KERN_ERR, 1033 * As the error algorithm may not be able to point to just one memory
850 "INTERNAL ERROR: channel-b out of range " 1034 * stick, the logic here will get all possible labels that could
851 "(%d >= %d)\n", 1035 * pottentially be affected by the error.
852 channelb, mci->csrows[csrow].nr_channels); 1036 * On FB-DIMM memory controllers, for uncorrected errors, it is common
853 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); 1037 * to have only the MC channel and the MC dimm (also called "branch")
854 return; 1038 * but the channel is not known, as the memory is arranged in pairs,
855 } 1039 * where each memory belongs to a separate channel within the same
1040 * branch.
1041 */
1042 grain = 0;
1043 p = label;
1044 *p = '\0';
1045 for (i = 0; i < mci->tot_dimms; i++) {
1046 struct dimm_info *dimm = &mci->dimms[i];
856 1047
857 mci->ue_count++; 1048 if (layer0 >= 0 && layer0 != dimm->location[0])
858 mci->csrows[csrow].ue_count++; 1049 continue;
1050 if (layer1 >= 0 && layer1 != dimm->location[1])
1051 continue;
1052 if (layer2 >= 0 && layer2 != dimm->location[2])
1053 continue;
859 1054
860 /* Generate the DIMM labels from the specified channels */ 1055 /* get the max grain, over the error match range */
861 chars = snprintf(pos, len + 1, "%s", 1056 if (dimm->grain > grain)
862 mci->csrows[csrow].channels[channela].label); 1057 grain = dimm->grain;
863 len -= chars;
864 pos += chars;
865 chars = snprintf(pos, len + 1, "-%s",
866 mci->csrows[csrow].channels[channelb].label);
867 1058
868 if (edac_mc_get_log_ue()) 1059 /*
869 edac_mc_printk(mci, KERN_EMERG, 1060 * If the error is memory-controller wide, there's no need to
870 "UE row %d, channel-a= %d channel-b= %d " 1061 * seek for the affected DIMMs because the whole
871 "labels \"%s\": %s\n", csrow, channela, channelb, 1062 * channel/memory controller/... may be affected.
872 labels, msg); 1063 * Also, don't show errors for empty DIMM slots.
1064 */
1065 if (enable_per_layer_report && dimm->nr_pages) {
1066 if (p != label) {
1067 strcpy(p, OTHER_LABEL);
1068 p += strlen(OTHER_LABEL);
1069 }
1070 strcpy(p, dimm->label);
1071 p += strlen(p);
1072 *p = '\0';
1073
1074 /*
1075 * get csrow/channel of the DIMM, in order to allow
1076 * incrementing the compat API counters
1077 */
1078 debugf4("%s: %s csrows map: (%d,%d)\n",
1079 __func__,
1080 mci->mem_is_per_rank ? "rank" : "dimm",
1081 dimm->csrow, dimm->cschannel);
1082
1083 if (row == -1)
1084 row = dimm->csrow;
1085 else if (row >= 0 && row != dimm->csrow)
1086 row = -2;
1087
1088 if (chan == -1)
1089 chan = dimm->cschannel;
1090 else if (chan >= 0 && chan != dimm->cschannel)
1091 chan = -2;
1092 }
1093 }
873 1094
874 if (edac_mc_get_panic_on_ue()) 1095 if (!enable_per_layer_report) {
875 panic("UE row %d, channel-a= %d channel-b= %d " 1096 strcpy(label, "any memory");
876 "labels \"%s\": %s\n", csrow, channela, 1097 } else {
877 channelb, labels, msg); 1098 debugf4("%s: csrow/channel to increment: (%d,%d)\n",
878} 1099 __func__, row, chan);
879EXPORT_SYMBOL(edac_mc_handle_fbd_ue); 1100 if (p == label)
1101 strcpy(label, "unknown memory");
1102 if (type == HW_EVENT_ERR_CORRECTED) {
1103 if (row >= 0) {
1104 mci->csrows[row].ce_count++;
1105 if (chan >= 0)
1106 mci->csrows[row].channels[chan].ce_count++;
1107 }
1108 } else
1109 if (row >= 0)
1110 mci->csrows[row].ue_count++;
1111 }
880 1112
881/************************************************************* 1113 /* Fill the RAM location data */
882 * On Fully Buffered DIMM modules, this help function is 1114 p = location;
883 * called to process CE events 1115 for (i = 0; i < mci->n_layers; i++) {
884 */ 1116 if (pos[i] < 0)
885void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, 1117 continue;
886 unsigned int csrow, unsigned int channel, char *msg)
887{
888 1118
889 /* Ensure boundary values */ 1119 p += sprintf(p, "%s:%d ",
890 if (csrow >= mci->nr_csrows) { 1120 edac_layer_name[mci->layers[i].type],
891 /* something is wrong */ 1121 pos[i]);
892 edac_mc_printk(mci, KERN_ERR,
893 "INTERNAL ERROR: row out of range (%d >= %d)\n",
894 csrow, mci->nr_csrows);
895 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
896 return;
897 }
898 if (channel >= mci->csrows[csrow].nr_channels) {
899 /* something is wrong */
900 edac_mc_printk(mci, KERN_ERR,
901 "INTERNAL ERROR: channel out of range (%d >= %d)\n",
902 channel, mci->csrows[csrow].nr_channels);
903 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
904 return;
905 } 1122 }
906 1123
907 if (edac_mc_get_log_ce()) 1124 /* Memory type dependent details about the error */
908 /* FIXME - put in DIMM location */ 1125 if (type == HW_EVENT_ERR_CORRECTED) {
909 edac_mc_printk(mci, KERN_WARNING, 1126 snprintf(detail, sizeof(detail),
910 "CE row %d, channel %d, label \"%s\": %s\n", 1127 "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
911 csrow, channel, 1128 page_frame_number, offset_in_page,
912 mci->csrows[csrow].channels[channel].label, msg); 1129 grain, syndrome);
1130 edac_ce_error(mci, pos, msg, location, label, detail,
1131 other_detail, enable_per_layer_report,
1132 page_frame_number, offset_in_page, grain);
1133 } else {
1134 snprintf(detail, sizeof(detail),
1135 "page:0x%lx offset:0x%lx grain:%d",
1136 page_frame_number, offset_in_page, grain);
913 1137
914 mci->ce_count++; 1138 edac_ue_error(mci, pos, msg, location, label, detail,
915 mci->csrows[csrow].ce_count++; 1139 other_detail, enable_per_layer_report);
916 mci->csrows[csrow].channels[channel].ce_count++; 1140 }
917} 1141}
918EXPORT_SYMBOL(edac_mc_handle_fbd_ce); 1142EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e9a28f576d14..f6a29b0eedc8 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -144,25 +144,31 @@ static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
144static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, 144static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
145 int private) 145 int private)
146{ 146{
147 return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); 147 int i;
148 u32 nr_pages = 0;
149
150 for (i = 0; i < csrow->nr_channels; i++)
151 nr_pages += csrow->channels[i].dimm->nr_pages;
152
153 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
148} 154}
149 155
150static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, 156static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
151 int private) 157 int private)
152{ 158{
153 return sprintf(data, "%s\n", mem_types[csrow->mtype]); 159 return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
154} 160}
155 161
156static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, 162static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
157 int private) 163 int private)
158{ 164{
159 return sprintf(data, "%s\n", dev_types[csrow->dtype]); 165 return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
160} 166}
161 167
162static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, 168static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
163 int private) 169 int private)
164{ 170{
165 return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]); 171 return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
166} 172}
167 173
168/* show/store functions for DIMM Label attributes */ 174/* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@ static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
170 char *data, int channel) 176 char *data, int channel)
171{ 177{
172 /* if field has not been initialized, there is nothing to send */ 178 /* if field has not been initialized, there is nothing to send */
173 if (!csrow->channels[channel].label[0]) 179 if (!csrow->channels[channel].dimm->label[0])
174 return 0; 180 return 0;
175 181
176 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 182 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
177 csrow->channels[channel].label); 183 csrow->channels[channel].dimm->label);
178} 184}
179 185
180static ssize_t channel_dimm_label_store(struct csrow_info *csrow, 186static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@ static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
184 ssize_t max_size = 0; 190 ssize_t max_size = 0;
185 191
186 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 192 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
187 strncpy(csrow->channels[channel].label, data, max_size); 193 strncpy(csrow->channels[channel].dimm->label, data, max_size);
188 csrow->channels[channel].label[max_size] = '\0'; 194 csrow->channels[channel].dimm->label[max_size] = '\0';
189 195
190 return max_size; 196 return max_size;
191} 197}
@@ -419,8 +425,8 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
419 425
420 mci->ue_noinfo_count = 0; 426 mci->ue_noinfo_count = 0;
421 mci->ce_noinfo_count = 0; 427 mci->ce_noinfo_count = 0;
422 mci->ue_count = 0; 428 mci->ue_mc = 0;
423 mci->ce_count = 0; 429 mci->ce_mc = 0;
424 430
425 for (row = 0; row < mci->nr_csrows; row++) { 431 for (row = 0; row < mci->nr_csrows; row++) {
426 struct csrow_info *ri = &mci->csrows[row]; 432 struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
489/* default attribute files for the MCI object */ 495/* default attribute files for the MCI object */
490static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) 496static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
491{ 497{
492 return sprintf(data, "%d\n", mci->ue_count); 498 return sprintf(data, "%d\n", mci->ue_mc);
493} 499}
494 500
495static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) 501static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
496{ 502{
497 return sprintf(data, "%d\n", mci->ce_count); 503 return sprintf(data, "%d\n", mci->ce_mc);
498} 504}
499 505
500static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) 506static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@ static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
519 525
520static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) 526static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
521{ 527{
522 int total_pages, csrow_idx; 528 int total_pages = 0, csrow_idx, j;
523 529
524 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows; 530 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
525 csrow_idx++) {
526 struct csrow_info *csrow = &mci->csrows[csrow_idx]; 531 struct csrow_info *csrow = &mci->csrows[csrow_idx];
527 532
528 if (!csrow->nr_pages) 533 for (j = 0; j < csrow->nr_channels; j++) {
529 continue; 534 struct dimm_info *dimm = csrow->channels[j].dimm;
530 535
531 total_pages += csrow->nr_pages; 536 total_pages += dimm->nr_pages;
537 }
532 } 538 }
533 539
534 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); 540 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
900 */ 906 */
901int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) 907int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
902{ 908{
903 int i; 909 int i, j;
904 int err; 910 int err;
905 struct csrow_info *csrow; 911 struct csrow_info *csrow;
906 struct kobject *kobj_mci = &mci->edac_mci_kobj; 912 struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
934 /* Make directories for each CSROW object under the mc<id> kobject 940 /* Make directories for each CSROW object under the mc<id> kobject
935 */ 941 */
936 for (i = 0; i < mci->nr_csrows; i++) { 942 for (i = 0; i < mci->nr_csrows; i++) {
943 int nr_pages = 0;
944
937 csrow = &mci->csrows[i]; 945 csrow = &mci->csrows[i];
946 for (j = 0; j < csrow->nr_channels; j++)
947 nr_pages += csrow->channels[j].dimm->nr_pages;
938 948
939 /* Only expose populated CSROWs */ 949 if (nr_pages > 0) {
940 if (csrow->nr_pages > 0) {
941 err = edac_create_csrow_object(mci, csrow, i); 950 err = edac_create_csrow_object(mci, csrow, i);
942 if (err) { 951 if (err) {
943 debugf1("%s() failure: create csrow %d obj\n", 952 debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
949 958
950 return 0; 959 return 0;
951 960
952 /* CSROW error: backout what has already been registered, */
953fail1: 961fail1:
954 for (i--; i >= 0; i--) { 962 for (i--; i >= 0; i--) {
955 if (csrow->nr_pages > 0) { 963 int nr_pages = 0;
964
965 csrow = &mci->csrows[i];
966 for (j = 0; j < csrow->nr_channels; j++)
967 nr_pages += csrow->channels[j].dimm->nr_pages;
968 if (nr_pages > 0)
956 kobject_put(&mci->csrows[i].kobj); 969 kobject_put(&mci->csrows[i].kobj);
957 }
958 } 970 }
959 971
960 /* remove the mci instance's attributes, if any */ 972 /* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@ fail0:
973 */ 985 */
974void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) 986void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
975{ 987{
976 int i; 988 struct csrow_info *csrow;
989 int i, j;
977 990
978 debugf0("%s()\n", __func__); 991 debugf0("%s()\n", __func__);
979 992
980 /* remove all csrow kobjects */ 993 /* remove all csrow kobjects */
981 debugf4("%s() unregister this mci kobj\n", __func__); 994 debugf4("%s() unregister this mci kobj\n", __func__);
982 for (i = 0; i < mci->nr_csrows; i++) { 995 for (i = 0; i < mci->nr_csrows; i++) {
983 if (mci->csrows[i].nr_pages > 0) { 996 int nr_pages = 0;
997
998 csrow = &mci->csrows[i];
999 for (j = 0; j < csrow->nr_channels; j++)
1000 nr_pages += csrow->channels[j].dimm->nr_pages;
1001 if (nr_pages > 0) {
984 debugf0("%s() unreg csrow-%d\n", __func__, i); 1002 debugf0("%s() unreg csrow-%d\n", __func__, i);
985 kobject_put(&mci->csrows[i].kobj); 1003 kobject_put(&mci->csrows[i].kobj);
986 } 1004 }
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 00f81b47a51f..0ea7d14cb930 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -50,7 +50,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
50 *edac_dev, unsigned long value); 50 *edac_dev, unsigned long value);
51extern void edac_mc_reset_delay_period(int value); 51extern void edac_mc_reset_delay_period(int value);
52 52
53extern void *edac_align_ptr(void *ptr, unsigned size); 53extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
54 54
55/* 55/*
56 * EDAC PCI functions 56 * EDAC PCI functions
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 63af1c5673d1..f1ac86649886 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -42,13 +42,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
42 const char *edac_pci_name) 42 const char *edac_pci_name)
43{ 43{
44 struct edac_pci_ctl_info *pci; 44 struct edac_pci_ctl_info *pci;
45 void *pvt; 45 void *p = NULL, *pvt;
46 unsigned int size; 46 unsigned int size;
47 47
48 debugf1("%s()\n", __func__); 48 debugf1("%s()\n", __func__);
49 49
50 pci = (struct edac_pci_ctl_info *)0; 50 pci = edac_align_ptr(&p, sizeof(*pci), 1);
51 pvt = edac_align_ptr(&pci[1], sz_pvt); 51 pvt = edac_align_ptr(&p, 1, sz_pvt);
52 size = ((unsigned long)pvt) + sz_pvt; 52 size = ((unsigned long)pvt) + sz_pvt;
53 53
54 /* Alloc the needed control struct memory */ 54 /* Alloc the needed control struct memory */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 277689a68841..8ad1744faacd 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -245,7 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
245 return 1; 245 return 1;
246 246
247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
248 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
249 -1, -1, -1,
250 "UE overwrote CE", "", NULL);
249 info->errsts = info->errsts2; 251 info->errsts = info->errsts2;
250 } 252 }
251 253
@@ -256,10 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
256 row = edac_mc_find_csrow_by_page(mci, pfn); 258 row = edac_mc_find_csrow_by_page(mci, pfn);
257 259
258 if (info->errsts & I3000_ERRSTS_UE) 260 if (info->errsts & I3000_ERRSTS_UE)
259 edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE"); 261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
262 pfn, offset, 0,
263 row, -1, -1,
264 "i3000 UE", "", NULL);
260 else 265 else
261 edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row, 266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
262 multi_chan ? channel : 0, "i3000 CE"); 267 pfn, offset, info->derrsyn,
268 row, multi_chan ? channel : 0, -1,
269 "i3000 CE", "", NULL);
263 270
264 return 1; 271 return 1;
265} 272}
@@ -304,9 +311,10 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
304static int i3000_probe1(struct pci_dev *pdev, int dev_idx) 311static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
305{ 312{
306 int rc; 313 int rc;
307 int i; 314 int i, j;
308 struct mem_ctl_info *mci = NULL; 315 struct mem_ctl_info *mci = NULL;
309 unsigned long last_cumul_size; 316 struct edac_mc_layer layers[2];
317 unsigned long last_cumul_size, nr_pages;
310 int interleaved, nr_channels; 318 int interleaved, nr_channels;
311 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS]; 319 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
312 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2]; 320 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
347 */ 355 */
348 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb); 356 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
349 nr_channels = interleaved ? 2 : 1; 357 nr_channels = interleaved ? 2 : 1;
350 mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0); 358
359 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
360 layers[0].size = I3000_RANKS / nr_channels;
361 layers[0].is_virt_csrow = true;
362 layers[1].type = EDAC_MC_LAYER_CHANNEL;
363 layers[1].size = nr_channels;
364 layers[1].is_virt_csrow = false;
365 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
351 if (!mci) 366 if (!mci)
352 return -ENOMEM; 367 return -ENOMEM;
353 368
@@ -386,19 +401,23 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
386 cumul_size <<= 1; 401 cumul_size <<= 1;
387 debugf3("MC: %s(): (%d) cumul_size 0x%x\n", 402 debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
388 __func__, i, cumul_size); 403 __func__, i, cumul_size);
389 if (cumul_size == last_cumul_size) { 404 if (cumul_size == last_cumul_size)
390 csrow->mtype = MEM_EMPTY;
391 continue; 405 continue;
392 }
393 406
394 csrow->first_page = last_cumul_size; 407 csrow->first_page = last_cumul_size;
395 csrow->last_page = cumul_size - 1; 408 csrow->last_page = cumul_size - 1;
396 csrow->nr_pages = cumul_size - last_cumul_size; 409 nr_pages = cumul_size - last_cumul_size;
397 last_cumul_size = cumul_size; 410 last_cumul_size = cumul_size;
398 csrow->grain = I3000_DEAP_GRAIN; 411
399 csrow->mtype = MEM_DDR2; 412 for (j = 0; j < nr_channels; j++) {
400 csrow->dtype = DEV_UNKNOWN; 413 struct dimm_info *dimm = csrow->channels[j].dimm;
401 csrow->edac_mode = EDAC_UNKNOWN; 414
415 dimm->nr_pages = nr_pages / nr_channels;
416 dimm->grain = I3000_DEAP_GRAIN;
417 dimm->mtype = MEM_DDR2;
418 dimm->dtype = DEV_UNKNOWN;
419 dimm->edac_mode = EDAC_UNKNOWN;
420 }
402 } 421 }
403 422
404 /* 423 /*
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 046808c6357d..bbe43ef71823 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -23,6 +23,7 @@
23 23
24#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 24#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
25 25
26#define I3200_DIMMS 4
26#define I3200_RANKS 8 27#define I3200_RANKS 8
27#define I3200_RANKS_PER_CHANNEL 4 28#define I3200_RANKS_PER_CHANNEL 4
28#define I3200_CHANNELS 2 29#define I3200_CHANNELS 2
@@ -217,21 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
217 return; 218 return;
218 219
219 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
220 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
222 -1, -1, -1, "UE overwrote CE", "", NULL);
221 info->errsts = info->errsts2; 223 info->errsts = info->errsts2;
222 } 224 }
223 225
224 for (channel = 0; channel < nr_channels; channel++) { 226 for (channel = 0; channel < nr_channels; channel++) {
225 log = info->eccerrlog[channel]; 227 log = info->eccerrlog[channel];
226 if (log & I3200_ECCERRLOG_UE) { 228 if (log & I3200_ECCERRLOG_UE) {
227 edac_mc_handle_ue(mci, 0, 0, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
228 eccerrlog_row(channel, log), 230 0, 0, 0,
229 "i3200 UE"); 231 eccerrlog_row(channel, log),
232 -1, -1,
233 "i3000 UE", "", NULL);
230 } else if (log & I3200_ECCERRLOG_CE) { 234 } else if (log & I3200_ECCERRLOG_CE) {
231 edac_mc_handle_ce(mci, 0, 0, 235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
232 eccerrlog_syndrome(log), 236 0, 0, eccerrlog_syndrome(log),
233 eccerrlog_row(channel, log), 0, 237 eccerrlog_row(channel, log),
234 "i3200 CE"); 238 -1, -1,
239 "i3000 UE", "", NULL);
235 } 240 }
236 } 241 }
237} 242}
@@ -319,9 +324,9 @@ static unsigned long drb_to_nr_pages(
319static int i3200_probe1(struct pci_dev *pdev, int dev_idx) 324static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
320{ 325{
321 int rc; 326 int rc;
322 int i; 327 int i, j;
323 struct mem_ctl_info *mci = NULL; 328 struct mem_ctl_info *mci = NULL;
324 unsigned long last_page; 329 struct edac_mc_layer layers[2];
325 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; 330 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
326 bool stacked; 331 bool stacked;
327 void __iomem *window; 332 void __iomem *window;
@@ -336,8 +341,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
336 i3200_get_drbs(window, drbs); 341 i3200_get_drbs(window, drbs);
337 nr_channels = how_many_channels(pdev); 342 nr_channels = how_many_channels(pdev);
338 343
339 mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS, 344 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
340 nr_channels, 0); 345 layers[0].size = I3200_DIMMS;
346 layers[0].is_virt_csrow = true;
347 layers[1].type = EDAC_MC_LAYER_CHANNEL;
348 layers[1].size = nr_channels;
349 layers[1].is_virt_csrow = false;
350 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
351 sizeof(struct i3200_priv));
341 if (!mci) 352 if (!mci)
342 return -ENOMEM; 353 return -ENOMEM;
343 354
@@ -366,7 +377,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
366 * cumulative; the last one will contain the total memory 377 * cumulative; the last one will contain the total memory
367 * contained in all ranks. 378 * contained in all ranks.
368 */ 379 */
369 last_page = -1UL;
370 for (i = 0; i < mci->nr_csrows; i++) { 380 for (i = 0; i < mci->nr_csrows; i++) {
371 unsigned long nr_pages; 381 unsigned long nr_pages;
372 struct csrow_info *csrow = &mci->csrows[i]; 382 struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
375 i / I3200_RANKS_PER_CHANNEL, 385 i / I3200_RANKS_PER_CHANNEL,
376 i % I3200_RANKS_PER_CHANNEL); 386 i % I3200_RANKS_PER_CHANNEL);
377 387
378 if (nr_pages == 0) { 388 if (nr_pages == 0)
379 csrow->mtype = MEM_EMPTY;
380 continue; 389 continue;
381 }
382 390
383 csrow->first_page = last_page + 1; 391 for (j = 0; j < nr_channels; j++) {
384 last_page += nr_pages; 392 struct dimm_info *dimm = csrow->channels[j].dimm;
385 csrow->last_page = last_page;
386 csrow->nr_pages = nr_pages;
387 393
388 csrow->grain = nr_pages << PAGE_SHIFT; 394 dimm->nr_pages = nr_pages / nr_channels;
389 csrow->mtype = MEM_DDR2; 395 dimm->grain = nr_pages << PAGE_SHIFT;
390 csrow->dtype = DEV_UNKNOWN; 396 dimm->mtype = MEM_DDR2;
391 csrow->edac_mode = EDAC_UNKNOWN; 397 dimm->dtype = DEV_UNKNOWN;
398 dimm->edac_mode = EDAC_UNKNOWN;
399 }
392 } 400 }
393 401
394 i3200_clear_error_info(mci); 402 i3200_clear_error_info(mci);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a2680d8e744b..11ea835f155a 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -270,7 +270,8 @@
270#define MTR3 0x8C 270#define MTR3 0x8C
271 271
272#define NUM_MTRS 4 272#define NUM_MTRS 4
273#define CHANNELS_PER_BRANCH (2) 273#define CHANNELS_PER_BRANCH 2
274#define MAX_BRANCHES 2
274 275
275/* Defines to extract the vaious fields from the 276/* Defines to extract the vaious fields from the
276 * MTRx - Memory Technology Registers 277 * MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
473 char msg[EDAC_MC_LABEL_LEN + 1 + 160]; 474 char msg[EDAC_MC_LABEL_LEN + 1 + 160];
474 char *specific = NULL; 475 char *specific = NULL;
475 u32 allErrors; 476 u32 allErrors;
476 int branch;
477 int channel; 477 int channel;
478 int bank; 478 int bank;
479 int rank; 479 int rank;
@@ -485,8 +485,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
485 if (!allErrors) 485 if (!allErrors)
486 return; /* if no error, return now */ 486 return; /* if no error, return now */
487 487
488 branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd); 488 channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
489 channel = branch;
490 489
491 /* Use the NON-Recoverable macros to extract data */ 490 /* Use the NON-Recoverable macros to extract data */
492 bank = NREC_BANK(info->nrecmema); 491 bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
495 ras = NREC_RAS(info->nrecmemb); 494 ras = NREC_RAS(info->nrecmemb);
496 cas = NREC_CAS(info->nrecmemb); 495 cas = NREC_CAS(info->nrecmemb);
497 496
498 debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " 497 debugf0("\t\tCSROW= %d Channel= %d "
499 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 498 "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
500 rank, channel, channel + 1, branch >> 1, bank, 499 rank, channel, bank,
501 rdwr ? "Write" : "Read", ras, cas); 500 rdwr ? "Write" : "Read", ras, cas);
502 501
503 /* Only 1 bit will be on */ 502 /* Only 1 bit will be on */
@@ -533,13 +532,14 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
533 532
534 /* Form out message */ 533 /* Form out message */
535 snprintf(msg, sizeof(msg), 534 snprintf(msg, sizeof(msg),
536 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d " 535 "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
537 "FATAL Err=0x%x (%s))", 536 bank, ras, cas, allErrors, specific);
538 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
539 allErrors, specific);
540 537
541 /* Call the helper to output message */ 538 /* Call the helper to output message */
542 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); 539 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
540 channel >> 1, channel & 1, rank,
541 rdwr ? "Write error" : "Read error",
542 msg, NULL);
543} 543}
544 544
545/* 545/*
@@ -633,13 +633,14 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
633 633
634 /* Form out message */ 634 /* Form out message */
635 snprintf(msg, sizeof(msg), 635 snprintf(msg, sizeof(msg),
636 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " 636 "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
637 "CAS=%d, UE Err=0x%x (%s))", 637 rank, bank, ras, cas, ue_errors, specific);
638 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
639 ue_errors, specific);
640 638
641 /* Call the helper to output message */ 639 /* Call the helper to output message */
642 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); 640 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
641 channel >> 1, -1, rank,
642 rdwr ? "Write error" : "Read error",
643 msg, NULL);
643 } 644 }
644 645
645 /* Check correctable errors */ 646 /* Check correctable errors */
@@ -685,13 +686,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
685 686
686 /* Form out message */ 687 /* Form out message */
687 snprintf(msg, sizeof(msg), 688 snprintf(msg, sizeof(msg),
688 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " 689 "Rank=%d Bank=%d RDWR=%s RAS=%d "
689 "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank, 690 "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
690 rdwr ? "Write" : "Read", ras, cas, ce_errors, 691 rdwr ? "Write" : "Read", ras, cas, ce_errors,
691 specific); 692 specific);
692 693
693 /* Call the helper to output message */ 694 /* Call the helper to output message */
694 edac_mc_handle_fbd_ce(mci, rank, channel, msg); 695 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
696 channel >> 1, channel % 2, rank,
697 rdwr ? "Write error" : "Read error",
698 msg, NULL);
695 } 699 }
696 700
697 if (!misc_messages) 701 if (!misc_messages)
@@ -731,11 +735,12 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
731 735
732 /* Form out message */ 736 /* Form out message */
733 snprintf(msg, sizeof(msg), 737 snprintf(msg, sizeof(msg),
734 "(Branch=%d Err=%#x (%s))", branch >> 1, 738 "Err=%#x (%s)", misc_errors, specific);
735 misc_errors, specific);
736 739
737 /* Call the helper to output message */ 740 /* Call the helper to output message */
738 edac_mc_handle_fbd_ce(mci, 0, 0, msg); 741 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
742 branch >> 1, -1, -1,
743 "Misc error", msg, NULL);
739 } 744 }
740} 745}
741 746
@@ -956,14 +961,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
956 * 961 *
957 * return the proper MTR register as determine by the csrow and channel desired 962 * return the proper MTR register as determine by the csrow and channel desired
958 */ 963 */
959static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel) 964static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
960{ 965{
961 int mtr; 966 int mtr;
962 967
963 if (channel < CHANNELS_PER_BRANCH) 968 if (channel < CHANNELS_PER_BRANCH)
964 mtr = pvt->b0_mtr[csrow >> 1]; 969 mtr = pvt->b0_mtr[slot];
965 else 970 else
966 mtr = pvt->b1_mtr[csrow >> 1]; 971 mtr = pvt->b1_mtr[slot];
967 972
968 return mtr; 973 return mtr;
969} 974}
@@ -988,37 +993,34 @@ static void decode_mtr(int slot_row, u16 mtr)
988 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 993 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
989} 994}
990 995
991static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel, 996static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
992 struct i5000_dimm_info *dinfo) 997 struct i5000_dimm_info *dinfo)
993{ 998{
994 int mtr; 999 int mtr;
995 int amb_present_reg; 1000 int amb_present_reg;
996 int addrBits; 1001 int addrBits;
997 1002
998 mtr = determine_mtr(pvt, csrow, channel); 1003 mtr = determine_mtr(pvt, slot, channel);
999 if (MTR_DIMMS_PRESENT(mtr)) { 1004 if (MTR_DIMMS_PRESENT(mtr)) {
1000 amb_present_reg = determine_amb_present_reg(pvt, channel); 1005 amb_present_reg = determine_amb_present_reg(pvt, channel);
1001 1006
1002 /* Determine if there is a DIMM present in this DIMM slot */ 1007 /* Determine if there is a DIMM present in this DIMM slot */
1003 if (amb_present_reg & (1 << (csrow >> 1))) { 1008 if (amb_present_reg) {
1004 dinfo->dual_rank = MTR_DIMM_RANK(mtr); 1009 dinfo->dual_rank = MTR_DIMM_RANK(mtr);
1005 1010
1006 if (!((dinfo->dual_rank == 0) && 1011 /* Start with the number of bits for a Bank
1007 ((csrow & 0x1) == 0x1))) { 1012 * on the DRAM */
1008 /* Start with the number of bits for a Bank 1013 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
1009 * on the DRAM */ 1014 /* Add the number of ROW bits */
1010 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); 1015 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
1011 /* Add thenumber of ROW bits */ 1016 /* add the number of COLUMN bits */
1012 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); 1017 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
1013 /* add the number of COLUMN bits */ 1018
1014 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 1019 addrBits += 6; /* add 64 bits per DIMM */
1015 1020 addrBits -= 20; /* divide by 2^^20 */
1016 addrBits += 6; /* add 64 bits per DIMM */ 1021 addrBits -= 3; /* 8 bits per bytes */
1017 addrBits -= 20; /* divide by 2^^20 */ 1022
1018 addrBits -= 3; /* 8 bits per bytes */ 1023 dinfo->megabytes = 1 << addrBits;
1019
1020 dinfo->megabytes = 1 << addrBits;
1021 }
1022 } 1024 }
1023 } 1025 }
1024} 1026}
@@ -1032,10 +1034,9 @@ static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
1032static void calculate_dimm_size(struct i5000_pvt *pvt) 1034static void calculate_dimm_size(struct i5000_pvt *pvt)
1033{ 1035{
1034 struct i5000_dimm_info *dinfo; 1036 struct i5000_dimm_info *dinfo;
1035 int csrow, max_csrows; 1037 int slot, channel, branch;
1036 char *p, *mem_buffer; 1038 char *p, *mem_buffer;
1037 int space, n; 1039 int space, n;
1038 int channel;
1039 1040
1040 /* ================= Generate some debug output ================= */ 1041 /* ================= Generate some debug output ================= */
1041 space = PAGE_SIZE; 1042 space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1046 return; 1047 return;
1047 } 1048 }
1048 1049
1049 n = snprintf(p, space, "\n"); 1050 /* Scan all the actual slots
1050 p += n;
1051 space -= n;
1052
1053 /* Scan all the actual CSROWS (which is # of DIMMS * 2)
1054 * and calculate the information for each DIMM 1051 * and calculate the information for each DIMM
1055 * Start with the highest csrow first, to display it first 1052 * Start with the highest slot first, to display it first
1056 * and work toward the 0th csrow 1053 * and work toward the 0th slot
1057 */ 1054 */
1058 max_csrows = pvt->maxdimmperch * 2; 1055 for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
1059 for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
1060 1056
1061 /* on an odd csrow, first output a 'boundary' marker, 1057 /* on an odd slot, first output a 'boundary' marker,
1062 * then reset the message buffer */ 1058 * then reset the message buffer */
1063 if (csrow & 0x1) { 1059 if (slot & 0x1) {
1064 n = snprintf(p, space, "---------------------------" 1060 n = snprintf(p, space, "--------------------------"
1065 "--------------------------------"); 1061 "--------------------------------");
1066 p += n; 1062 p += n;
1067 space -= n; 1063 space -= n;
@@ -1069,30 +1065,39 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1069 p = mem_buffer; 1065 p = mem_buffer;
1070 space = PAGE_SIZE; 1066 space = PAGE_SIZE;
1071 } 1067 }
1072 n = snprintf(p, space, "csrow %2d ", csrow); 1068 n = snprintf(p, space, "slot %2d ", slot);
1073 p += n; 1069 p += n;
1074 space -= n; 1070 space -= n;
1075 1071
1076 for (channel = 0; channel < pvt->maxch; channel++) { 1072 for (channel = 0; channel < pvt->maxch; channel++) {
1077 dinfo = &pvt->dimm_info[csrow][channel]; 1073 dinfo = &pvt->dimm_info[slot][channel];
1078 handle_channel(pvt, csrow, channel, dinfo); 1074 handle_channel(pvt, slot, channel, dinfo);
1079 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 1075 if (dinfo->megabytes)
1076 n = snprintf(p, space, "%4d MB %dR| ",
1077 dinfo->megabytes, dinfo->dual_rank + 1);
1078 else
1079 n = snprintf(p, space, "%4d MB | ", 0);
1080 p += n; 1080 p += n;
1081 space -= n; 1081 space -= n;
1082 } 1082 }
1083 n = snprintf(p, space, "\n");
1084 p += n; 1083 p += n;
1085 space -= n; 1084 space -= n;
1085 debugf2("%s\n", mem_buffer);
1086 p = mem_buffer;
1087 space = PAGE_SIZE;
1086 } 1088 }
1087 1089
1088 /* Output the last bottom 'boundary' marker */ 1090 /* Output the last bottom 'boundary' marker */
1089 n = snprintf(p, space, "---------------------------" 1091 n = snprintf(p, space, "--------------------------"
1090 "--------------------------------\n"); 1092 "--------------------------------");
1091 p += n; 1093 p += n;
1092 space -= n; 1094 space -= n;
1095 debugf2("%s\n", mem_buffer);
1096 p = mem_buffer;
1097 space = PAGE_SIZE;
1093 1098
1094 /* now output the 'channel' labels */ 1099 /* now output the 'channel' labels */
1095 n = snprintf(p, space, " "); 1100 n = snprintf(p, space, " ");
1096 p += n; 1101 p += n;
1097 space -= n; 1102 space -= n;
1098 for (channel = 0; channel < pvt->maxch; channel++) { 1103 for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1100 p += n; 1105 p += n;
1101 space -= n; 1106 space -= n;
1102 } 1107 }
1103 n = snprintf(p, space, "\n"); 1108 debugf2("%s\n", mem_buffer);
1109 p = mem_buffer;
1110 space = PAGE_SIZE;
1111
1112 n = snprintf(p, space, " ");
1104 p += n; 1113 p += n;
1105 space -= n; 1114 for (branch = 0; branch < MAX_BRANCHES; branch++) {
1115 n = snprintf(p, space, " branch %d | ", branch);
1116 p += n;
1117 space -= n;
1118 }
1106 1119
1107 /* output the last message and free buffer */ 1120 /* output the last message and free buffer */
1108 debugf2("%s\n", mem_buffer); 1121 debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1235static int i5000_init_csrows(struct mem_ctl_info *mci) 1248static int i5000_init_csrows(struct mem_ctl_info *mci)
1236{ 1249{
1237 struct i5000_pvt *pvt; 1250 struct i5000_pvt *pvt;
1238 struct csrow_info *p_csrow; 1251 struct dimm_info *dimm;
1239 int empty, channel_count; 1252 int empty, channel_count;
1240 int max_csrows; 1253 int max_csrows;
1241 int mtr, mtr1; 1254 int mtr;
1242 int csrow_megs; 1255 int csrow_megs;
1243 int channel; 1256 int channel;
1244 int csrow; 1257 int slot;
1245 1258
1246 pvt = mci->pvt_info; 1259 pvt = mci->pvt_info;
1247 1260
@@ -1250,43 +1263,40 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1250 1263
1251 empty = 1; /* Assume NO memory */ 1264 empty = 1; /* Assume NO memory */
1252 1265
1253 for (csrow = 0; csrow < max_csrows; csrow++) { 1266 /*
1254 p_csrow = &mci->csrows[csrow]; 1267 * FIXME: The memory layout used to map slot/channel into the
1255 1268 * real memory architecture is weird: branch+slot are "csrows"
1256 p_csrow->csrow_idx = csrow; 1269 * and channel is channel. That required an extra array (dimm_info)
1257 1270 * to map the dimms. A good cleanup would be to remove this array,
1258 /* use branch 0 for the basis */ 1271 * and do a loop here with branch, channel, slot
1259 mtr = pvt->b0_mtr[csrow >> 1]; 1272 */
1260 mtr1 = pvt->b1_mtr[csrow >> 1]; 1273 for (slot = 0; slot < max_csrows; slot++) {
1261 1274 for (channel = 0; channel < pvt->maxch; channel++) {
1262 /* if no DIMMS on this row, continue */
1263 if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
1264 continue;
1265 1275
1266 /* FAKE OUT VALUES, FIXME */ 1276 mtr = determine_mtr(pvt, slot, channel);
1267 p_csrow->first_page = 0 + csrow * 20;
1268 p_csrow->last_page = 9 + csrow * 20;
1269 p_csrow->page_mask = 0xFFF;
1270 1277
1271 p_csrow->grain = 8; 1278 if (!MTR_DIMMS_PRESENT(mtr))
1279 continue;
1272 1280
1273 csrow_megs = 0; 1281 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
1274 for (channel = 0; channel < pvt->maxch; channel++) { 1282 channel / MAX_BRANCHES,
1275 csrow_megs += pvt->dimm_info[csrow][channel].megabytes; 1283 channel % MAX_BRANCHES, slot);
1276 }
1277 1284
1278 p_csrow->nr_pages = csrow_megs << 8; 1285 csrow_megs = pvt->dimm_info[slot][channel].megabytes;
1286 dimm->grain = 8;
1279 1287
1280 /* Assume DDR2 for now */ 1288 /* Assume DDR2 for now */
1281 p_csrow->mtype = MEM_FB_DDR2; 1289 dimm->mtype = MEM_FB_DDR2;
1282 1290
1283 /* ask what device type on this row */ 1291 /* ask what device type on this row */
1284 if (MTR_DRAM_WIDTH(mtr)) 1292 if (MTR_DRAM_WIDTH(mtr))
1285 p_csrow->dtype = DEV_X8; 1293 dimm->dtype = DEV_X8;
1286 else 1294 else
1287 p_csrow->dtype = DEV_X4; 1295 dimm->dtype = DEV_X4;
1288 1296
1289 p_csrow->edac_mode = EDAC_S8ECD8ED; 1297 dimm->edac_mode = EDAC_S8ECD8ED;
1298 dimm->nr_pages = csrow_megs << 8;
1299 }
1290 1300
1291 empty = 0; 1301 empty = 0;
1292 } 1302 }
@@ -1317,7 +1327,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
1317} 1327}
1318 1328
1319/* 1329/*
1320 * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels) 1330 * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
1321 * 1331 *
1322 * ask the device how many channels are present and how many CSROWS 1332 * ask the device how many channels are present and how many CSROWS
1323 * as well 1333 * as well
@@ -1332,7 +1342,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
1332 * supported on this memory controller 1342 * supported on this memory controller
1333 */ 1343 */
1334 pci_read_config_byte(pdev, MAXDIMMPERCH, &value); 1344 pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
1335 *num_dimms_per_channel = (int)value *2; 1345 *num_dimms_per_channel = (int)value;
1336 1346
1337 pci_read_config_byte(pdev, MAXCH, &value); 1347 pci_read_config_byte(pdev, MAXCH, &value);
1338 *num_channels = (int)value; 1348 *num_channels = (int)value;
@@ -1348,10 +1358,10 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
1348static int i5000_probe1(struct pci_dev *pdev, int dev_idx) 1358static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1349{ 1359{
1350 struct mem_ctl_info *mci; 1360 struct mem_ctl_info *mci;
1361 struct edac_mc_layer layers[3];
1351 struct i5000_pvt *pvt; 1362 struct i5000_pvt *pvt;
1352 int num_channels; 1363 int num_channels;
1353 int num_dimms_per_channel; 1364 int num_dimms_per_channel;
1354 int num_csrows;
1355 1365
1356 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1366 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1357 __FILE__, __func__, 1367 __FILE__, __func__,
@@ -1377,14 +1387,22 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1377 */ 1387 */
1378 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, 1388 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
1379 &num_channels); 1389 &num_channels);
1380 num_csrows = num_dimms_per_channel * 2;
1381 1390
1382 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", 1391 debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n",
1383 __func__, num_channels, num_dimms_per_channel, num_csrows); 1392 __func__, num_channels, num_dimms_per_channel);
1384 1393
1385 /* allocate a new MC control structure */ 1394 /* allocate a new MC control structure */
1386 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
1387 1395
1396 layers[0].type = EDAC_MC_LAYER_BRANCH;
1397 layers[0].size = MAX_BRANCHES;
1398 layers[0].is_virt_csrow = false;
1399 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1400 layers[1].size = num_channels / MAX_BRANCHES;
1401 layers[1].is_virt_csrow = false;
1402 layers[2].type = EDAC_MC_LAYER_SLOT;
1403 layers[2].size = num_dimms_per_channel;
1404 layers[2].is_virt_csrow = true;
1405 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1388 if (mci == NULL) 1406 if (mci == NULL)
1389 return -ENOMEM; 1407 return -ENOMEM;
1390 1408
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d500749464ea..e9e7c2a29dc3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -14,6 +14,11 @@
14 * rows for each respective channel are laid out one after another, 14 * rows for each respective channel are laid out one after another,
15 * the first half belonging to channel 0, the second half belonging 15 * the first half belonging to channel 0, the second half belonging
16 * to channel 1. 16 * to channel 1.
17 *
18 * This driver is for DDR2 DIMMs, and it uses chip select to select among the
19 * several ranks. However, instead of showing memories as ranks, it outputs
20 * them as DIMM's. An internal table creates the association between ranks
21 * and DIMM's.
17 */ 22 */
18#include <linux/module.h> 23#include <linux/module.h>
19#include <linux/init.h> 24#include <linux/init.h>
@@ -410,14 +415,6 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
410 return csrow / priv->ranksperchan; 415 return csrow / priv->ranksperchan;
411} 416}
412 417
413static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
414 int chan, int rank)
415{
416 const struct i5100_priv *priv = mci->pvt_info;
417
418 return chan * priv->ranksperchan + rank;
419}
420
421static void i5100_handle_ce(struct mem_ctl_info *mci, 418static void i5100_handle_ce(struct mem_ctl_info *mci,
422 int chan, 419 int chan,
423 unsigned bank, 420 unsigned bank,
@@ -427,17 +424,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
427 unsigned ras, 424 unsigned ras,
428 const char *msg) 425 const char *msg)
429{ 426{
430 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 427 char detail[80];
431 428
432 printk(KERN_ERR 429 /* Form out message */
433 "CE chan %d, bank %u, rank %u, syndrome 0x%lx, " 430 snprintf(detail, sizeof(detail),
434 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 431 "bank %u, cas %u, ras %u\n",
435 chan, bank, rank, syndrome, cas, ras, 432 bank, cas, ras);
436 csrow, mci->csrows[csrow].channels[0].label, msg);
437 433
438 mci->ce_count++; 434 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
439 mci->csrows[csrow].ce_count++; 435 0, 0, syndrome,
440 mci->csrows[csrow].channels[0].ce_count++; 436 chan, rank, -1,
437 msg, detail, NULL);
441} 438}
442 439
443static void i5100_handle_ue(struct mem_ctl_info *mci, 440static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
449 unsigned ras, 446 unsigned ras,
450 const char *msg) 447 const char *msg)
451{ 448{
452 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 449 char detail[80];
453 450
454 printk(KERN_ERR 451 /* Form out message */
455 "UE chan %d, bank %u, rank %u, syndrome 0x%lx, " 452 snprintf(detail, sizeof(detail),
456 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 453 "bank %u, cas %u, ras %u\n",
457 chan, bank, rank, syndrome, cas, ras, 454 bank, cas, ras);
458 csrow, mci->csrows[csrow].channels[0].label, msg);
459 455
460 mci->ue_count++; 456 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
461 mci->csrows[csrow].ue_count++; 457 0, 0, syndrome,
458 chan, rank, -1,
459 msg, detail, NULL);
462} 460}
463 461
464static void i5100_read_log(struct mem_ctl_info *mci, int chan, 462static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
835static void __devinit i5100_init_csrows(struct mem_ctl_info *mci) 833static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
836{ 834{
837 int i; 835 int i;
838 unsigned long total_pages = 0UL;
839 struct i5100_priv *priv = mci->pvt_info; 836 struct i5100_priv *priv = mci->pvt_info;
840 837
841 for (i = 0; i < mci->nr_csrows; i++) { 838 for (i = 0; i < mci->tot_dimms; i++) {
839 struct dimm_info *dimm;
842 const unsigned long npages = i5100_npages(mci, i); 840 const unsigned long npages = i5100_npages(mci, i);
843 const unsigned chan = i5100_csrow_to_chan(mci, i); 841 const unsigned chan = i5100_csrow_to_chan(mci, i);
844 const unsigned rank = i5100_csrow_to_rank(mci, i); 842 const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
846 if (!npages) 844 if (!npages)
847 continue; 845 continue;
848 846
849 /* 847 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
850 * FIXME: these two are totally bogus -- I don't see how to 848 chan, rank, 0);
851 * map them correctly to this structure... 849
852 */ 850 dimm->nr_pages = npages;
853 mci->csrows[i].first_page = total_pages; 851 if (npages) {
854 mci->csrows[i].last_page = total_pages + npages - 1; 852 dimm->grain = 32;
855 mci->csrows[i].page_mask = 0UL; 853 dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
856 854 DEV_X4 : DEV_X8;
857 mci->csrows[i].nr_pages = npages; 855 dimm->mtype = MEM_RDDR2;
858 mci->csrows[i].grain = 32; 856 dimm->edac_mode = EDAC_SECDED;
859 mci->csrows[i].csrow_idx = i; 857 snprintf(dimm->label, sizeof(dimm->label),
860 mci->csrows[i].dtype = 858 "DIMM%u",
861 (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8; 859 i5100_rank_to_slot(mci, chan, rank));
862 mci->csrows[i].ue_count = 0; 860 }
863 mci->csrows[i].ce_count = 0; 861
864 mci->csrows[i].mtype = MEM_RDDR2; 862 debugf2("dimm channel %d, rank %d, size %ld\n",
865 mci->csrows[i].edac_mode = EDAC_SECDED; 863 chan, rank, (long)PAGES_TO_MiB(npages));
866 mci->csrows[i].mci = mci;
867 mci->csrows[i].nr_channels = 1;
868 mci->csrows[i].channels[0].chan_idx = 0;
869 mci->csrows[i].channels[0].ce_count = 0;
870 mci->csrows[i].channels[0].csrow = mci->csrows + i;
871 snprintf(mci->csrows[i].channels[0].label,
872 sizeof(mci->csrows[i].channels[0].label),
873 "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
874
875 total_pages += npages;
876 } 864 }
877} 865}
878 866
@@ -881,6 +869,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
881{ 869{
882 int rc; 870 int rc;
883 struct mem_ctl_info *mci; 871 struct mem_ctl_info *mci;
872 struct edac_mc_layer layers[2];
884 struct i5100_priv *priv; 873 struct i5100_priv *priv;
885 struct pci_dev *ch0mm, *ch1mm; 874 struct pci_dev *ch0mm, *ch1mm;
886 int ret = 0; 875 int ret = 0;
@@ -941,7 +930,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
941 goto bail_ch1; 930 goto bail_ch1;
942 } 931 }
943 932
944 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0); 933 layers[0].type = EDAC_MC_LAYER_CHANNEL;
934 layers[0].size = 2;
935 layers[0].is_virt_csrow = false;
936 layers[1].type = EDAC_MC_LAYER_SLOT;
937 layers[1].size = ranksperch;
938 layers[1].is_virt_csrow = true;
939 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
940 sizeof(*priv));
945 if (!mci) { 941 if (!mci) {
946 ret = -ENOMEM; 942 ret = -ENOMEM;
947 goto bail_disable_ch1; 943 goto bail_disable_ch1;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 1869a1018fb5..6640c29e1885 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -18,6 +18,10 @@
18 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet 18 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
19 * http://developer.intel.com/design/chipsets/datashts/313070.htm 19 * http://developer.intel.com/design/chipsets/datashts/313070.htm
20 * 20 *
21 * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
22 * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
23 * 4 dimm's, each with up to 8GB.
24 *
21 */ 25 */
22 26
23#include <linux/module.h> 27#include <linux/module.h>
@@ -44,12 +48,10 @@
44 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg) 48 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
45 49
46/* Limits for i5400 */ 50/* Limits for i5400 */
47#define NUM_MTRS_PER_BRANCH 4 51#define MAX_BRANCHES 2
48#define CHANNELS_PER_BRANCH 2 52#define CHANNELS_PER_BRANCH 2
49#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH 53#define DIMMS_PER_CHANNEL 4
50#define MAX_CHANNELS 4 54#define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
51/* max possible csrows per channel */
52#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
53 55
54/* Device 16, 56/* Device 16,
55 * Function 0: System Address 57 * Function 0: System Address
@@ -347,16 +349,16 @@ struct i5400_pvt {
347 349
348 u16 mir0, mir1; 350 u16 mir0, mir1;
349 351
350 u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ 352 u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
351 u16 b0_ambpresent0; /* Branch 0, Channel 0 */ 353 u16 b0_ambpresent0; /* Branch 0, Channel 0 */
352 u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ 354 u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
353 355
354 u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ 356 u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
355 u16 b1_ambpresent0; /* Branch 1, Channel 8 */ 357 u16 b1_ambpresent0; /* Branch 1, Channel 8 */
356 u16 b1_ambpresent1; /* Branch 1, Channel 1 */ 358 u16 b1_ambpresent1; /* Branch 1, Channel 1 */
357 359
358 /* DIMM information matrix, allocating architecture maximums */ 360 /* DIMM information matrix, allocating architecture maximums */
359 struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS]; 361 struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
360 362
361 /* Actual values for this controller */ 363 /* Actual values for this controller */
362 int maxch; /* Max channels */ 364 int maxch; /* Max channels */
@@ -532,13 +534,15 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
532 int ras, cas; 534 int ras, cas;
533 int errnum; 535 int errnum;
534 char *type = NULL; 536 char *type = NULL;
537 enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
535 538
536 if (!allErrors) 539 if (!allErrors)
537 return; /* if no error, return now */ 540 return; /* if no error, return now */
538 541
539 if (allErrors & ERROR_FAT_MASK) 542 if (allErrors & ERROR_FAT_MASK) {
540 type = "FATAL"; 543 type = "FATAL";
541 else if (allErrors & FERR_NF_UNCORRECTABLE) 544 tp_event = HW_EVENT_ERR_FATAL;
545 } else if (allErrors & FERR_NF_UNCORRECTABLE)
542 type = "NON-FATAL uncorrected"; 546 type = "NON-FATAL uncorrected";
543 else 547 else
544 type = "NON-FATAL recoverable"; 548 type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
556 ras = nrec_ras(info); 560 ras = nrec_ras(info);
557 cas = nrec_cas(info); 561 cas = nrec_cas(info);
558 562
559 debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " 563 debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
560 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", 564 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
561 rank, channel, channel + 1, branch >> 1, bank, 565 rank, channel, channel + 1, branch >> 1, bank,
562 buf_id, rdwr_str(rdwr), ras, cas); 566 buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
566 570
567 /* Form out message */ 571 /* Form out message */
568 snprintf(msg, sizeof(msg), 572 snprintf(msg, sizeof(msg),
569 "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s " 573 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
570 "RAS=%d CAS=%d %s Err=0x%lx (%s))", 574 bank, buf_id, ras, cas, allErrors, error_name[errnum]);
571 type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
572 type, allErrors, error_name[errnum]);
573 575
574 /* Call the helper to output message */ 576 edac_mc_handle_error(tp_event, mci, 0, 0, 0,
575 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); 577 branch >> 1, -1, rank,
578 rdwr ? "Write error" : "Read error",
579 msg, NULL);
576} 580}
577 581
578/* 582/*
@@ -630,7 +634,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
630 /* Only 1 bit will be on */ 634 /* Only 1 bit will be on */
631 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 635 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
632 636
633 debugf0("\t\tCSROW= %d Channel= %d (Branch %d " 637 debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
634 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 638 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
635 rank, channel, branch >> 1, bank, 639 rank, channel, branch >> 1, bank,
636 rdwr_str(rdwr), ras, cas); 640 rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
642 branch >> 1, bank, rdwr_str(rdwr), ras, cas, 646 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
643 allErrors, error_name[errnum]); 647 allErrors, error_name[errnum]);
644 648
645 /* Call the helper to output message */ 649 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
646 edac_mc_handle_fbd_ce(mci, rank, channel, msg); 650 branch >> 1, channel % 2, rank,
651 rdwr ? "Write error" : "Read error",
652 msg, NULL);
647 653
648 return; 654 return;
649 } 655 }
@@ -831,8 +837,8 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
831/* 837/*
832 * determine_amb_present 838 * determine_amb_present
833 * 839 *
834 * the information is contained in NUM_MTRS_PER_BRANCH different 840 * the information is contained in DIMMS_PER_CHANNEL different
835 * registers determining which of the NUM_MTRS_PER_BRANCH requires 841 * registers determining which of the DIMMS_PER_CHANNEL requires
836 * knowing which channel is in question 842 * knowing which channel is in question
837 * 843 *
838 * 2 branches, each with 2 channels 844 * 2 branches, each with 2 channels
@@ -861,11 +867,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
861} 867}
862 868
863/* 869/*
864 * determine_mtr(pvt, csrow, channel) 870 * determine_mtr(pvt, dimm, channel)
865 * 871 *
866 * return the proper MTR register as determine by the csrow and desired channel 872 * return the proper MTR register as determine by the dimm and desired channel
867 */ 873 */
868static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel) 874static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
869{ 875{
870 int mtr; 876 int mtr;
871 int n; 877 int n;
@@ -873,11 +879,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
873 /* There is one MTR for each slot pair of FB-DIMMs, 879 /* There is one MTR for each slot pair of FB-DIMMs,
874 Each slot pair may be at branch 0 or branch 1. 880 Each slot pair may be at branch 0 or branch 1.
875 */ 881 */
876 n = csrow; 882 n = dimm;
877 883
878 if (n >= NUM_MTRS_PER_BRANCH) { 884 if (n >= DIMMS_PER_CHANNEL) {
879 debugf0("ERROR: trying to access an invalid csrow: %d\n", 885 debugf0("ERROR: trying to access an invalid dimm: %d\n",
880 csrow); 886 dimm);
881 return 0; 887 return 0;
882 } 888 }
883 889
@@ -913,19 +919,19 @@ static void decode_mtr(int slot_row, u16 mtr)
913 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 919 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
914} 920}
915 921
916static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel, 922static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
917 struct i5400_dimm_info *dinfo) 923 struct i5400_dimm_info *dinfo)
918{ 924{
919 int mtr; 925 int mtr;
920 int amb_present_reg; 926 int amb_present_reg;
921 int addrBits; 927 int addrBits;
922 928
923 mtr = determine_mtr(pvt, csrow, channel); 929 mtr = determine_mtr(pvt, dimm, channel);
924 if (MTR_DIMMS_PRESENT(mtr)) { 930 if (MTR_DIMMS_PRESENT(mtr)) {
925 amb_present_reg = determine_amb_present_reg(pvt, channel); 931 amb_present_reg = determine_amb_present_reg(pvt, channel);
926 932
927 /* Determine if there is a DIMM present in this DIMM slot */ 933 /* Determine if there is a DIMM present in this DIMM slot */
928 if (amb_present_reg & (1 << csrow)) { 934 if (amb_present_reg & (1 << dimm)) {
929 /* Start with the number of bits for a Bank 935 /* Start with the number of bits for a Bank
930 * on the DRAM */ 936 * on the DRAM */
931 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); 937 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
954static void calculate_dimm_size(struct i5400_pvt *pvt) 960static void calculate_dimm_size(struct i5400_pvt *pvt)
955{ 961{
956 struct i5400_dimm_info *dinfo; 962 struct i5400_dimm_info *dinfo;
957 int csrow, max_csrows; 963 int dimm, max_dimms;
958 char *p, *mem_buffer; 964 char *p, *mem_buffer;
959 int space, n; 965 int space, n;
960 int channel; 966 int channel, branch;
961 967
962 /* ================= Generate some debug output ================= */ 968 /* ================= Generate some debug output ================= */
963 space = PAGE_SIZE; 969 space = PAGE_SIZE;
@@ -968,32 +974,32 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
968 return; 974 return;
969 } 975 }
970 976
971 /* Scan all the actual CSROWS 977 /* Scan all the actual DIMMS
972 * and calculate the information for each DIMM 978 * and calculate the information for each DIMM
973 * Start with the highest csrow first, to display it first 979 * Start with the highest dimm first, to display it first
974 * and work toward the 0th csrow 980 * and work toward the 0th dimm
975 */ 981 */
976 max_csrows = pvt->maxdimmperch; 982 max_dimms = pvt->maxdimmperch;
977 for (csrow = max_csrows - 1; csrow >= 0; csrow--) { 983 for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
978 984
979 /* on an odd csrow, first output a 'boundary' marker, 985 /* on an odd dimm, first output a 'boundary' marker,
980 * then reset the message buffer */ 986 * then reset the message buffer */
981 if (csrow & 0x1) { 987 if (dimm & 0x1) {
982 n = snprintf(p, space, "---------------------------" 988 n = snprintf(p, space, "---------------------------"
983 "--------------------------------"); 989 "-------------------------------");
984 p += n; 990 p += n;
985 space -= n; 991 space -= n;
986 debugf2("%s\n", mem_buffer); 992 debugf2("%s\n", mem_buffer);
987 p = mem_buffer; 993 p = mem_buffer;
988 space = PAGE_SIZE; 994 space = PAGE_SIZE;
989 } 995 }
990 n = snprintf(p, space, "csrow %2d ", csrow); 996 n = snprintf(p, space, "dimm %2d ", dimm);
991 p += n; 997 p += n;
992 space -= n; 998 space -= n;
993 999
994 for (channel = 0; channel < pvt->maxch; channel++) { 1000 for (channel = 0; channel < pvt->maxch; channel++) {
995 dinfo = &pvt->dimm_info[csrow][channel]; 1001 dinfo = &pvt->dimm_info[dimm][channel];
996 handle_channel(pvt, csrow, channel, dinfo); 1002 handle_channel(pvt, dimm, channel, dinfo);
997 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 1003 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
998 p += n; 1004 p += n;
999 space -= n; 1005 space -= n;
@@ -1005,7 +1011,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1005 1011
1006 /* Output the last bottom 'boundary' marker */ 1012 /* Output the last bottom 'boundary' marker */
1007 n = snprintf(p, space, "---------------------------" 1013 n = snprintf(p, space, "---------------------------"
1008 "--------------------------------"); 1014 "-------------------------------");
1009 p += n; 1015 p += n;
1010 space -= n; 1016 space -= n;
1011 debugf2("%s\n", mem_buffer); 1017 debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1013 space = PAGE_SIZE; 1019 space = PAGE_SIZE;
1014 1020
1015 /* now output the 'channel' labels */ 1021 /* now output the 'channel' labels */
1016 n = snprintf(p, space, " "); 1022 n = snprintf(p, space, " ");
1017 p += n; 1023 p += n;
1018 space -= n; 1024 space -= n;
1019 for (channel = 0; channel < pvt->maxch; channel++) { 1025 for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1022 space -= n; 1028 space -= n;
1023 } 1029 }
1024 1030
1031 space -= n;
1032 debugf2("%s\n", mem_buffer);
1033 p = mem_buffer;
1034 space = PAGE_SIZE;
1035
1036 n = snprintf(p, space, " ");
1037 p += n;
1038 for (branch = 0; branch < MAX_BRANCHES; branch++) {
1039 n = snprintf(p, space, " branch %d | ", branch);
1040 p += n;
1041 space -= n;
1042 }
1043
1025 /* output the last message and free buffer */ 1044 /* output the last message and free buffer */
1026 debugf2("%s\n", mem_buffer); 1045 debugf2("%s\n", mem_buffer);
1027 kfree(mem_buffer); 1046 kfree(mem_buffer);
@@ -1080,7 +1099,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1080 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1099 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
1081 1100
1082 /* Get the set of MTR[0-3] regs by each branch */ 1101 /* Get the set of MTR[0-3] regs by each branch */
1083 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) { 1102 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
1084 int where = MTR0 + (slot_row * sizeof(u16)); 1103 int where = MTR0 + (slot_row * sizeof(u16));
1085 1104
1086 /* Branch 0 set of MTR registers */ 1105 /* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1105 /* Read and dump branch 0's MTRs */ 1124 /* Read and dump branch 0's MTRs */
1106 debugf2("\nMemory Technology Registers:\n"); 1125 debugf2("\nMemory Technology Registers:\n");
1107 debugf2(" Branch 0:\n"); 1126 debugf2(" Branch 0:\n");
1108 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) 1127 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1109 decode_mtr(slot_row, pvt->b0_mtr[slot_row]); 1128 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1110 1129
1111 pci_read_config_word(pvt->branch_0, AMBPRESENT_0, 1130 pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1122 } else { 1141 } else {
1123 /* Read and dump branch 1's MTRs */ 1142 /* Read and dump branch 1's MTRs */
1124 debugf2(" Branch 1:\n"); 1143 debugf2(" Branch 1:\n");
1125 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) 1144 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1126 decode_mtr(slot_row, pvt->b1_mtr[slot_row]); 1145 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1127 1146
1128 pci_read_config_word(pvt->branch_1, AMBPRESENT_0, 1147 pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1141} 1160}
1142 1161
1143/* 1162/*
1144 * i5400_init_csrows Initialize the 'csrows' table within 1163 * i5400_init_dimms Initialize the 'dimms' table within
1145 * the mci control structure with the 1164 * the mci control structure with the
1146 * addressing of memory. 1165 * addressing of memory.
1147 * 1166 *
@@ -1149,64 +1168,68 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1149 * 0 success 1168 * 0 success
1150 * 1 no actual memory found on this MC 1169 * 1 no actual memory found on this MC
1151 */ 1170 */
1152static int i5400_init_csrows(struct mem_ctl_info *mci) 1171static int i5400_init_dimms(struct mem_ctl_info *mci)
1153{ 1172{
1154 struct i5400_pvt *pvt; 1173 struct i5400_pvt *pvt;
1155 struct csrow_info *p_csrow; 1174 struct dimm_info *dimm;
1156 int empty, channel_count; 1175 int ndimms, channel_count;
1157 int max_csrows; 1176 int max_dimms;
1158 int mtr; 1177 int mtr;
1159 int csrow_megs; 1178 int size_mb;
1160 int channel; 1179 int channel, slot;
1161 int csrow;
1162 1180
1163 pvt = mci->pvt_info; 1181 pvt = mci->pvt_info;
1164 1182
1165 channel_count = pvt->maxch; 1183 channel_count = pvt->maxch;
1166 max_csrows = pvt->maxdimmperch; 1184 max_dimms = pvt->maxdimmperch;
1167 1185
1168 empty = 1; /* Assume NO memory */ 1186 ndimms = 0;
1169 1187
1170 for (csrow = 0; csrow < max_csrows; csrow++) { 1188 /*
1171 p_csrow = &mci->csrows[csrow]; 1189 * FIXME: remove pvt->dimm_info[slot][channel] and use the 3
1172 1190 * layers here.
1173 p_csrow->csrow_idx = csrow; 1191 */
1174 1192 for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
1175 /* use branch 0 for the basis */ 1193 channel++) {
1176 mtr = determine_mtr(pvt, csrow, 0); 1194 for (slot = 0; slot < mci->layers[2].size; slot++) {
1177 1195 mtr = determine_mtr(pvt, slot, channel);
1178 /* if no DIMMS on this row, continue */ 1196
1179 if (!MTR_DIMMS_PRESENT(mtr)) 1197 /* if no DIMMS on this slot, continue */
1180 continue; 1198 if (!MTR_DIMMS_PRESENT(mtr))
1181 1199 continue;
1182 /* FAKE OUT VALUES, FIXME */ 1200
1183 p_csrow->first_page = 0 + csrow * 20; 1201 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
1184 p_csrow->last_page = 9 + csrow * 20; 1202 channel / 2, channel % 2, slot);
1185 p_csrow->page_mask = 0xFFF; 1203
1186 1204 size_mb = pvt->dimm_info[slot][channel].megabytes;
1187 p_csrow->grain = 8; 1205
1188 1206 debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
1189 csrow_megs = 0; 1207 __func__, dimm - mci->dimms,
1190 for (channel = 0; channel < pvt->maxch; channel++) 1208 channel / 2, channel % 2, slot,
1191 csrow_megs += pvt->dimm_info[csrow][channel].megabytes; 1209 size_mb / 1000, size_mb % 1000);
1192 1210
1193 p_csrow->nr_pages = csrow_megs << 8; 1211 dimm->nr_pages = size_mb << 8;
1194 1212 dimm->grain = 8;
1195 /* Assume DDR2 for now */ 1213 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
1196 p_csrow->mtype = MEM_FB_DDR2; 1214 dimm->mtype = MEM_FB_DDR2;
1197 1215 /*
1198 /* ask what device type on this row */ 1216 * The eccc mechanism is SDDC (aka SECC), with
1199 if (MTR_DRAM_WIDTH(mtr)) 1217 * is similar to Chipkill.
1200 p_csrow->dtype = DEV_X8; 1218 */
1201 else 1219 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
1202 p_csrow->dtype = DEV_X4; 1220 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1203 1221 ndimms++;
1204 p_csrow->edac_mode = EDAC_S8ECD8ED; 1222 }
1205
1206 empty = 0;
1207 } 1223 }
1208 1224
1209 return empty; 1225 /*
1226 * When just one memory is provided, it should be at location (0,0,0).
1227 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
1228 */
1229 if (ndimms == 1)
1230 mci->dimms[0].edac_mode = EDAC_SECDED;
1231
1232 return (ndimms == 0);
1210} 1233}
1211 1234
1212/* 1235/*
@@ -1242,9 +1265,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1242{ 1265{
1243 struct mem_ctl_info *mci; 1266 struct mem_ctl_info *mci;
1244 struct i5400_pvt *pvt; 1267 struct i5400_pvt *pvt;
1245 int num_channels; 1268 struct edac_mc_layer layers[3];
1246 int num_dimms_per_channel;
1247 int num_csrows;
1248 1269
1249 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1270 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1250 return -EINVAL; 1271 return -EINVAL;
@@ -1258,23 +1279,21 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1258 if (PCI_FUNC(pdev->devfn) != 0) 1279 if (PCI_FUNC(pdev->devfn) != 0)
1259 return -ENODEV; 1280 return -ENODEV;
1260 1281
1261 /* As we don't have a motherboard identification routine to determine 1282 /*
1262 * actual number of slots/dimms per channel, we thus utilize the 1283 * allocate a new MC control structure
1263 * resource as specified by the chipset. Thus, we might have 1284 *
1264 * have more DIMMs per channel than actually on the mobo, but this 1285 * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
1265 * allows the driver to support up to the chipset max, without
1266 * some fancy mobo determination.
1267 */ 1286 */
1268 num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL; 1287 layers[0].type = EDAC_MC_LAYER_BRANCH;
1269 num_channels = MAX_CHANNELS; 1288 layers[0].size = MAX_BRANCHES;
1270 num_csrows = num_dimms_per_channel; 1289 layers[0].is_virt_csrow = false;
1271 1290 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1272 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", 1291 layers[1].size = CHANNELS_PER_BRANCH;
1273 __func__, num_channels, num_dimms_per_channel, num_csrows); 1292 layers[1].is_virt_csrow = false;
1274 1293 layers[2].type = EDAC_MC_LAYER_SLOT;
1275 /* allocate a new MC control structure */ 1294 layers[2].size = DIMMS_PER_CHANNEL;
1276 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); 1295 layers[2].is_virt_csrow = true;
1277 1296 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1278 if (mci == NULL) 1297 if (mci == NULL)
1279 return -ENOMEM; 1298 return -ENOMEM;
1280 1299
@@ -1284,8 +1303,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1284 1303
1285 pvt = mci->pvt_info; 1304 pvt = mci->pvt_info;
1286 pvt->system_address = pdev; /* Record this device in our private */ 1305 pvt->system_address = pdev; /* Record this device in our private */
1287 pvt->maxch = num_channels; 1306 pvt->maxch = MAX_CHANNELS;
1288 pvt->maxdimmperch = num_dimms_per_channel; 1307 pvt->maxdimmperch = DIMMS_PER_CHANNEL;
1289 1308
1290 /* 'get' the pci devices we want to reserve for our use */ 1309 /* 'get' the pci devices we want to reserve for our use */
1291 if (i5400_get_devices(mci, dev_idx)) 1310 if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1307 /* Set the function pointer to an actual operation function */ 1326 /* Set the function pointer to an actual operation function */
1308 mci->edac_check = i5400_check_error; 1327 mci->edac_check = i5400_check_error;
1309 1328
1310 /* initialize the MC control structure 'csrows' table 1329 /* initialize the MC control structure 'dimms' table
1311 * with the mapping and control information */ 1330 * with the mapping and control information */
1312 if (i5400_init_csrows(mci)) { 1331 if (i5400_init_dimms(mci)) {
1313 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1332 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
1314 " because i5400_init_csrows() returned nonzero " 1333 " because i5400_init_dimms() returned nonzero "
1315 "value\n"); 1334 "value\n");
1316 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1335 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
1317 } else { 1336 } else {
1318 debugf1("MC: Enable error reporting now\n"); 1337 debugf1("MC: Enable error reporting now\n");
1319 i5400_enable_error_reporting(mci); 1338 i5400_enable_error_reporting(mci);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3bca148..97c22fd650ee 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -464,17 +464,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
464 FERR_FAT_FBD, error_reg); 464 FERR_FAT_FBD, error_reg);
465 465
466 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 466 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
467 "FATAL (Branch=%d DRAM-Bank=%d %s " 467 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
468 "RAS=%d CAS=%d Err=0x%lx (%s))", 468 bank, ras, cas, errors, specific);
469 branch, bank, 469
470 is_wr ? "RDWR" : "RD", 470 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
471 ras, cas, 471 branch, -1, rank,
472 errors, specific); 472 is_wr ? "Write error" : "Read error",
473 473 pvt->tmp_prt_buffer, NULL);
474 /* Call the helper to output message */ 474
475 edac_mc_handle_fbd_ue(mci, rank, branch << 1,
476 (branch << 1) + 1,
477 pvt->tmp_prt_buffer);
478 } 475 }
479 476
480 /* read in the 1st NON-FATAL error register */ 477 /* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
513 510
514 /* Form out message */ 511 /* Form out message */
515 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 512 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
516 "Corrected error (Branch=%d, Channel %d), " 513 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
517 " DRAM-Bank=%d %s " 514 bank, ras, cas, errors, specific);
518 "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))", 515
519 branch, channel, 516 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
520 bank, 517 syndrome,
521 is_wr ? "RDWR" : "RD", 518 branch >> 1, channel % 2, rank,
522 ras, cas, 519 is_wr ? "Write error" : "Read error",
523 errors, syndrome, specific); 520 pvt->tmp_prt_buffer, NULL);
524
525 /*
526 * Call the helper to output message
527 * NOTE: Errors are reported per-branch, and not per-channel
528 * Currently, we don't know how to identify the right
529 * channel.
530 */
531 edac_mc_handle_fbd_ce(mci, rank, channel,
532 pvt->tmp_prt_buffer);
533 } 521 }
534 return; 522 return;
535} 523}
@@ -617,8 +605,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
617static int decode_mtr(struct i7300_pvt *pvt, 605static int decode_mtr(struct i7300_pvt *pvt,
618 int slot, int ch, int branch, 606 int slot, int ch, int branch,
619 struct i7300_dimm_info *dinfo, 607 struct i7300_dimm_info *dinfo,
620 struct csrow_info *p_csrow, 608 struct dimm_info *dimm)
621 u32 *nr_pages)
622{ 609{
623 int mtr, ans, addrBits, channel; 610 int mtr, ans, addrBits, channel;
624 611
@@ -650,7 +637,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
650 addrBits -= 3; /* 8 bits per bytes */ 637 addrBits -= 3; /* 8 bits per bytes */
651 638
652 dinfo->megabytes = 1 << addrBits; 639 dinfo->megabytes = 1 << addrBits;
653 *nr_pages = dinfo->megabytes << 8;
654 640
655 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 641 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
656 642
@@ -663,11 +649,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
663 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 649 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
664 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); 650 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
665 651
666 p_csrow->grain = 8;
667 p_csrow->mtype = MEM_FB_DDR2;
668 p_csrow->csrow_idx = slot;
669 p_csrow->page_mask = 0;
670
671 /* 652 /*
672 * The type of error detection actually depends of the 653 * The type of error detection actually depends of the
673 * mode of operation. When it is just one single memory chip, at 654 * mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@ static int decode_mtr(struct i7300_pvt *pvt,
677 * See datasheet Sections 7.3.6 to 7.3.8 658 * See datasheet Sections 7.3.6 to 7.3.8
678 */ 659 */
679 660
661 dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
662 dimm->grain = 8;
663 dimm->mtype = MEM_FB_DDR2;
680 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 664 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
681 p_csrow->edac_mode = EDAC_SECDED; 665 dimm->edac_mode = EDAC_SECDED;
682 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); 666 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
683 } else { 667 } else {
684 debugf2("\t\tECC code is on Lockstep mode\n"); 668 debugf2("\t\tECC code is on Lockstep mode\n");
685 if (MTR_DRAM_WIDTH(mtr) == 8) 669 if (MTR_DRAM_WIDTH(mtr) == 8)
686 p_csrow->edac_mode = EDAC_S8ECD8ED; 670 dimm->edac_mode = EDAC_S8ECD8ED;
687 else 671 else
688 p_csrow->edac_mode = EDAC_S4ECD4ED; 672 dimm->edac_mode = EDAC_S4ECD4ED;
689 } 673 }
690 674
691 /* ask what device type on this row */ 675 /* ask what device type on this row */
@@ -694,9 +678,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
694 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? 678 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
695 "enhanced" : "normal"); 679 "enhanced" : "normal");
696 680
697 p_csrow->dtype = DEV_X8; 681 dimm->dtype = DEV_X8;
698 } else 682 } else
699 p_csrow->dtype = DEV_X4; 683 dimm->dtype = DEV_X4;
700 684
701 return mtr; 685 return mtr;
702} 686}
@@ -774,11 +758,10 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
774{ 758{
775 struct i7300_pvt *pvt; 759 struct i7300_pvt *pvt;
776 struct i7300_dimm_info *dinfo; 760 struct i7300_dimm_info *dinfo;
777 struct csrow_info *p_csrow;
778 int rc = -ENODEV; 761 int rc = -ENODEV;
779 int mtr; 762 int mtr;
780 int ch, branch, slot, channel; 763 int ch, branch, slot, channel;
781 u32 last_page = 0, nr_pages; 764 struct dimm_info *dimm;
782 765
783 pvt = mci->pvt_info; 766 pvt = mci->pvt_info;
784 767
@@ -809,25 +792,23 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
809 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 792 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
810 where, 793 where,
811 &pvt->mtr[slot][branch]); 794 &pvt->mtr[slot][branch]);
812 for (ch = 0; ch < MAX_BRANCHES; ch++) { 795 for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
813 int channel = to_channel(ch, branch); 796 int channel = to_channel(ch, branch);
814 797
798 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
799 mci->n_layers, branch, ch, slot);
800
815 dinfo = &pvt->dimm_info[slot][channel]; 801 dinfo = &pvt->dimm_info[slot][channel];
816 p_csrow = &mci->csrows[slot];
817 802
818 mtr = decode_mtr(pvt, slot, ch, branch, 803 mtr = decode_mtr(pvt, slot, ch, branch,
819 dinfo, p_csrow, &nr_pages); 804 dinfo, dimm);
805
820 /* if no DIMMS on this row, continue */ 806 /* if no DIMMS on this row, continue */
821 if (!MTR_DIMMS_PRESENT(mtr)) 807 if (!MTR_DIMMS_PRESENT(mtr))
822 continue; 808 continue;
823 809
824 /* Update per_csrow memory count */
825 p_csrow->nr_pages += nr_pages;
826 p_csrow->first_page = last_page;
827 last_page += nr_pages;
828 p_csrow->last_page = last_page;
829
830 rc = 0; 810 rc = 0;
811
831 } 812 }
832 } 813 }
833 } 814 }
@@ -1042,10 +1023,8 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1042 const struct pci_device_id *id) 1023 const struct pci_device_id *id)
1043{ 1024{
1044 struct mem_ctl_info *mci; 1025 struct mem_ctl_info *mci;
1026 struct edac_mc_layer layers[3];
1045 struct i7300_pvt *pvt; 1027 struct i7300_pvt *pvt;
1046 int num_channels;
1047 int num_dimms_per_channel;
1048 int num_csrows;
1049 int rc; 1028 int rc;
1050 1029
1051 /* wake up device */ 1030 /* wake up device */
@@ -1062,23 +1041,17 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1062 if (PCI_FUNC(pdev->devfn) != 0) 1041 if (PCI_FUNC(pdev->devfn) != 0)
1063 return -ENODEV; 1042 return -ENODEV;
1064 1043
1065 /* As we don't have a motherboard identification routine to determine
1066 * actual number of slots/dimms per channel, we thus utilize the
1067 * resource as specified by the chipset. Thus, we might have
1068 * have more DIMMs per channel than actually on the mobo, but this
1069 * allows the driver to support up to the chipset max, without
1070 * some fancy mobo determination.
1071 */
1072 num_dimms_per_channel = MAX_SLOTS;
1073 num_channels = MAX_CHANNELS;
1074 num_csrows = MAX_SLOTS * MAX_CHANNELS;
1075
1076 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
1077 __func__, num_channels, num_dimms_per_channel, num_csrows);
1078
1079 /* allocate a new MC control structure */ 1044 /* allocate a new MC control structure */
1080 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); 1045 layers[0].type = EDAC_MC_LAYER_BRANCH;
1081 1046 layers[0].size = MAX_BRANCHES;
1047 layers[0].is_virt_csrow = false;
1048 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1049 layers[1].size = MAX_CH_PER_BRANCH;
1050 layers[1].is_virt_csrow = true;
1051 layers[2].type = EDAC_MC_LAYER_SLOT;
1052 layers[2].size = MAX_SLOTS;
1053 layers[2].is_virt_csrow = true;
1054 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1082 if (mci == NULL) 1055 if (mci == NULL)
1083 return -ENOMEM; 1056 return -ENOMEM;
1084 1057
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 7f1dfcc4e597..d27778f65a5d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -221,7 +221,9 @@ struct i7core_inject {
221}; 221};
222 222
223struct i7core_channel { 223struct i7core_channel {
224 u32 ranks; 224 bool is_3dimms_present;
225 bool is_single_4rank;
226 bool has_4rank;
225 u32 dimms; 227 u32 dimms;
226}; 228};
227 229
@@ -257,7 +259,6 @@ struct i7core_pvt {
257 struct i7core_channel channel[NUM_CHANS]; 259 struct i7core_channel channel[NUM_CHANS];
258 260
259 int ce_count_available; 261 int ce_count_available;
260 int csrow_map[NUM_CHANS][MAX_DIMMS];
261 262
262 /* ECC corrected errors counts per udimm */ 263 /* ECC corrected errors counts per udimm */
263 unsigned long udimm_ce_count[MAX_DIMMS]; 264 unsigned long udimm_ce_count[MAX_DIMMS];
@@ -492,116 +493,15 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev)
492/**************************************************************************** 493/****************************************************************************
493 Memory check routines 494 Memory check routines
494 ****************************************************************************/ 495 ****************************************************************************/
495static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
496 unsigned func)
497{
498 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
499 int i;
500
501 if (!i7core_dev)
502 return NULL;
503
504 for (i = 0; i < i7core_dev->n_devs; i++) {
505 if (!i7core_dev->pdev[i])
506 continue;
507
508 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510 return i7core_dev->pdev[i];
511 }
512 }
513
514 return NULL;
515}
516
517/**
518 * i7core_get_active_channels() - gets the number of channels and csrows
519 * @socket: Quick Path Interconnect socket
520 * @channels: Number of channels that will be returned
521 * @csrows: Number of csrows found
522 *
523 * Since EDAC core needs to know in advance the number of available channels
524 * and csrows, in order to allocate memory for csrows/channels, it is needed
525 * to run two similar steps. At the first step, implemented on this function,
526 * it checks the number of csrows/channels present at one socket.
527 * this is used in order to properly allocate the size of mci components.
528 *
529 * It should be noticed that none of the current available datasheets explain
530 * or even mention how csrows are seen by the memory controller. So, we need
531 * to add a fake description for csrows.
532 * So, this driver is attributing one DIMM memory for one csrow.
533 */
534static int i7core_get_active_channels(const u8 socket, unsigned *channels,
535 unsigned *csrows)
536{
537 struct pci_dev *pdev = NULL;
538 int i, j;
539 u32 status, control;
540
541 *channels = 0;
542 *csrows = 0;
543
544 pdev = get_pdev_slot_func(socket, 3, 0);
545 if (!pdev) {
546 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
547 socket);
548 return -ENODEV;
549 }
550
551 /* Device 3 function 0 reads */
552 pci_read_config_dword(pdev, MC_STATUS, &status);
553 pci_read_config_dword(pdev, MC_CONTROL, &control);
554
555 for (i = 0; i < NUM_CHANS; i++) {
556 u32 dimm_dod[3];
557 /* Check if the channel is active */
558 if (!(control & (1 << (8 + i))))
559 continue;
560
561 /* Check if the channel is disabled */
562 if (status & (1 << i))
563 continue;
564
565 pdev = get_pdev_slot_func(socket, i + 4, 1);
566 if (!pdev) {
567 i7core_printk(KERN_ERR, "Couldn't find socket %d "
568 "fn %d.%d!!!\n",
569 socket, i + 4, 1);
570 return -ENODEV;
571 }
572 /* Devices 4-6 function 1 */
573 pci_read_config_dword(pdev,
574 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575 pci_read_config_dword(pdev,
576 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577 pci_read_config_dword(pdev,
578 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579 496
580 (*channels)++; 497static int get_dimm_config(struct mem_ctl_info *mci)
581
582 for (j = 0; j < 3; j++) {
583 if (!DIMM_PRESENT(dimm_dod[j]))
584 continue;
585 (*csrows)++;
586 }
587 }
588
589 debugf0("Number of active channels on socket %d: %d\n",
590 socket, *channels);
591
592 return 0;
593}
594
595static int get_dimm_config(const struct mem_ctl_info *mci)
596{ 498{
597 struct i7core_pvt *pvt = mci->pvt_info; 499 struct i7core_pvt *pvt = mci->pvt_info;
598 struct csrow_info *csr;
599 struct pci_dev *pdev; 500 struct pci_dev *pdev;
600 int i, j; 501 int i, j;
601 int csrow = 0;
602 unsigned long last_page = 0;
603 enum edac_type mode; 502 enum edac_type mode;
604 enum mem_type mtype; 503 enum mem_type mtype;
504 struct dimm_info *dimm;
605 505
606 /* Get data from the MC register, function 0 */ 506 /* Get data from the MC register, function 0 */
607 pdev = pvt->pci_mcr[0]; 507 pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
657 pci_read_config_dword(pvt->pci_ch[i][0], 557 pci_read_config_dword(pvt->pci_ch[i][0],
658 MC_CHANNEL_DIMM_INIT_PARAMS, &data); 558 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
659 559
660 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ? 560
661 4 : 2; 561 if (data & THREE_DIMMS_PRESENT)
562 pvt->channel[i].is_3dimms_present = true;
563
564 if (data & SINGLE_QUAD_RANK_PRESENT)
565 pvt->channel[i].is_single_4rank = true;
566
567 if (data & QUAD_RANK_PRESENT)
568 pvt->channel[i].has_4rank = true;
662 569
663 if (data & REGISTERED_DIMM) 570 if (data & REGISTERED_DIMM)
664 mtype = MEM_RDDR3; 571 mtype = MEM_RDDR3;
665 else 572 else
666 mtype = MEM_DDR3; 573 mtype = MEM_DDR3;
667#if 0
668 if (data & THREE_DIMMS_PRESENT)
669 pvt->channel[i].dimms = 3;
670 else if (data & SINGLE_QUAD_RANK_PRESENT)
671 pvt->channel[i].dimms = 1;
672 else
673 pvt->channel[i].dimms = 2;
674#endif
675 574
676 /* Devices 4-6 function 1 */ 575 /* Devices 4-6 function 1 */
677 pci_read_config_dword(pvt->pci_ch[i][1], 576 pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
682 MC_DOD_CH_DIMM2, &dimm_dod[2]); 581 MC_DOD_CH_DIMM2, &dimm_dod[2]);
683 582
684 debugf0("Ch%d phy rd%d, wr%d (0x%08x): " 583 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
685 "%d ranks, %cDIMMs\n", 584 "%s%s%s%cDIMMs\n",
686 i, 585 i,
687 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), 586 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
688 data, 587 data,
689 pvt->channel[i].ranks, 588 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
589 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
590 pvt->channel[i].has_4rank ? "HAS_4R " : "",
690 (data & REGISTERED_DIMM) ? 'R' : 'U'); 591 (data & REGISTERED_DIMM) ? 'R' : 'U');
691 592
692 for (j = 0; j < 3; j++) { 593 for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
696 if (!DIMM_PRESENT(dimm_dod[j])) 597 if (!DIMM_PRESENT(dimm_dod[j]))
697 continue; 598 continue;
698 599
600 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
601 i, j, 0);
699 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); 602 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
700 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); 603 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
701 rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); 604 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
704 /* DDR3 has 8 I/O banks */ 607 /* DDR3 has 8 I/O banks */
705 size = (rows * cols * banks * ranks) >> (20 - 3); 608 size = (rows * cols * banks * ranks) >> (20 - 3);
706 609
707 pvt->channel[i].dimms++;
708
709 debugf0("\tdimm %d %d Mb offset: %x, " 610 debugf0("\tdimm %d %d Mb offset: %x, "
710 "bank: %d, rank: %d, row: %#x, col: %#x\n", 611 "bank: %d, rank: %d, row: %#x, col: %#x\n",
711 j, size, 612 j, size,
@@ -714,44 +615,28 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
714 615
715 npages = MiB_TO_PAGES(size); 616 npages = MiB_TO_PAGES(size);
716 617
717 csr = &mci->csrows[csrow]; 618 dimm->nr_pages = npages;
718 csr->first_page = last_page + 1;
719 last_page += npages;
720 csr->last_page = last_page;
721 csr->nr_pages = npages;
722
723 csr->page_mask = 0;
724 csr->grain = 8;
725 csr->csrow_idx = csrow;
726 csr->nr_channels = 1;
727
728 csr->channels[0].chan_idx = i;
729 csr->channels[0].ce_count = 0;
730
731 pvt->csrow_map[i][j] = csrow;
732 619
733 switch (banks) { 620 switch (banks) {
734 case 4: 621 case 4:
735 csr->dtype = DEV_X4; 622 dimm->dtype = DEV_X4;
736 break; 623 break;
737 case 8: 624 case 8:
738 csr->dtype = DEV_X8; 625 dimm->dtype = DEV_X8;
739 break; 626 break;
740 case 16: 627 case 16:
741 csr->dtype = DEV_X16; 628 dimm->dtype = DEV_X16;
742 break; 629 break;
743 default: 630 default:
744 csr->dtype = DEV_UNKNOWN; 631 dimm->dtype = DEV_UNKNOWN;
745 } 632 }
746 633
747 csr->edac_mode = mode; 634 snprintf(dimm->label, sizeof(dimm->label),
748 csr->mtype = mtype; 635 "CPU#%uChannel#%u_DIMM#%u",
749 snprintf(csr->channels[0].label, 636 pvt->i7core_dev->socket, i, j);
750 sizeof(csr->channels[0].label), 637 dimm->grain = 8;
751 "CPU#%uChannel#%u_DIMM#%u", 638 dimm->edac_mode = mode;
752 pvt->i7core_dev->socket, i, j); 639 dimm->mtype = mtype;
753
754 csrow++;
755 } 640 }
756 641
757 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); 642 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1567,22 +1452,16 @@ error:
1567/**************************************************************************** 1452/****************************************************************************
1568 Error check routines 1453 Error check routines
1569 ****************************************************************************/ 1454 ****************************************************************************/
1570static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, 1455static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
1571 const int chan, 1456 const int chan,
1572 const int dimm, 1457 const int dimm,
1573 const int add) 1458 const int add)
1574{ 1459{
1575 char *msg; 1460 int i;
1576 struct i7core_pvt *pvt = mci->pvt_info;
1577 int row = pvt->csrow_map[chan][dimm], i;
1578 1461
1579 for (i = 0; i < add; i++) { 1462 for (i = 0; i < add; i++) {
1580 msg = kasprintf(GFP_KERNEL, "Corrected error " 1463 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
1581 "(Socket=%d channel=%d dimm=%d)", 1464 chan, dimm, -1, "error", "", NULL);
1582 pvt->i7core_dev->socket, chan, dimm);
1583
1584 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1585 kfree (msg);
1586 } 1465 }
1587} 1466}
1588 1467
@@ -1623,11 +1502,11 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1623 1502
1624 /*updated the edac core */ 1503 /*updated the edac core */
1625 if (add0 != 0) 1504 if (add0 != 0)
1626 i7core_rdimm_update_csrow(mci, chan, 0, add0); 1505 i7core_rdimm_update_errcount(mci, chan, 0, add0);
1627 if (add1 != 0) 1506 if (add1 != 0)
1628 i7core_rdimm_update_csrow(mci, chan, 1, add1); 1507 i7core_rdimm_update_errcount(mci, chan, 1, add1);
1629 if (add2 != 0) 1508 if (add2 != 0)
1630 i7core_rdimm_update_csrow(mci, chan, 2, add2); 1509 i7core_rdimm_update_errcount(mci, chan, 2, add2);
1631 1510
1632} 1511}
1633 1512
@@ -1747,20 +1626,30 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1747 const struct mce *m) 1626 const struct mce *m)
1748{ 1627{
1749 struct i7core_pvt *pvt = mci->pvt_info; 1628 struct i7core_pvt *pvt = mci->pvt_info;
1750 char *type, *optype, *err, *msg; 1629 char *type, *optype, *err, msg[80];
1630 enum hw_event_mc_err_type tp_event;
1751 unsigned long error = m->status & 0x1ff0000l; 1631 unsigned long error = m->status & 0x1ff0000l;
1632 bool uncorrected_error = m->mcgstatus & 1ll << 61;
1633 bool ripv = m->mcgstatus & 1;
1752 u32 optypenum = (m->status >> 4) & 0x07; 1634 u32 optypenum = (m->status >> 4) & 0x07;
1753 u32 core_err_cnt = (m->status >> 38) & 0x7fff; 1635 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1754 u32 dimm = (m->misc >> 16) & 0x3; 1636 u32 dimm = (m->misc >> 16) & 0x3;
1755 u32 channel = (m->misc >> 18) & 0x3; 1637 u32 channel = (m->misc >> 18) & 0x3;
1756 u32 syndrome = m->misc >> 32; 1638 u32 syndrome = m->misc >> 32;
1757 u32 errnum = find_first_bit(&error, 32); 1639 u32 errnum = find_first_bit(&error, 32);
1758 int csrow;
1759 1640
1760 if (m->mcgstatus & 1) 1641 if (uncorrected_error) {
1761 type = "FATAL"; 1642 if (ripv) {
1762 else 1643 type = "FATAL";
1763 type = "NON_FATAL"; 1644 tp_event = HW_EVENT_ERR_FATAL;
1645 } else {
1646 type = "NON_FATAL";
1647 tp_event = HW_EVENT_ERR_UNCORRECTED;
1648 }
1649 } else {
1650 type = "CORRECTED";
1651 tp_event = HW_EVENT_ERR_CORRECTED;
1652 }
1764 1653
1765 switch (optypenum) { 1654 switch (optypenum) {
1766 case 0: 1655 case 0:
@@ -1815,27 +1704,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1815 err = "unknown"; 1704 err = "unknown";
1816 } 1705 }
1817 1706
1818 /* FIXME: should convert addr into bank and rank information */ 1707 snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
1819 msg = kasprintf(GFP_ATOMIC,
1820 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1821 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1822 type, (long long) m->addr, m->cpu, dimm, channel,
1823 syndrome, core_err_cnt, (long long)m->status,
1824 (long long)m->misc, optype, err);
1825
1826 debugf0("%s", msg);
1827
1828 csrow = pvt->csrow_map[channel][dimm];
1829 1708
1830 /* Call the helper to output message */ 1709 /*
1831 if (m->mcgstatus & 1) 1710 * Call the helper to output message
1832 edac_mc_handle_fbd_ue(mci, csrow, 0, 1711 * FIXME: what to do if core_err_cnt > 1? Currently, it generates
1833 0 /* FIXME: should be channel here */, msg); 1712 * only one event
1834 else if (!pvt->is_registered) 1713 */
1835 edac_mc_handle_fbd_ce(mci, csrow, 1714 if (uncorrected_error || !pvt->is_registered)
1836 0 /* FIXME: should be channel here */, msg); 1715 edac_mc_handle_error(tp_event, mci,
1837 1716 m->addr >> PAGE_SHIFT,
1838 kfree(msg); 1717 m->addr & ~PAGE_MASK,
1718 syndrome,
1719 channel, dimm, -1,
1720 err, msg, m);
1839} 1721}
1840 1722
1841/* 1723/*
@@ -2252,15 +2134,19 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2252{ 2134{
2253 struct mem_ctl_info *mci; 2135 struct mem_ctl_info *mci;
2254 struct i7core_pvt *pvt; 2136 struct i7core_pvt *pvt;
2255 int rc, channels, csrows; 2137 int rc;
2256 2138 struct edac_mc_layer layers[2];
2257 /* Check the number of active and not disabled channels */
2258 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2259 if (unlikely(rc < 0))
2260 return rc;
2261 2139
2262 /* allocate a new MC control structure */ 2140 /* allocate a new MC control structure */
2263 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket); 2141
2142 layers[0].type = EDAC_MC_LAYER_CHANNEL;
2143 layers[0].size = NUM_CHANS;
2144 layers[0].is_virt_csrow = false;
2145 layers[1].type = EDAC_MC_LAYER_SLOT;
2146 layers[1].size = MAX_DIMMS;
2147 layers[1].is_virt_csrow = true;
2148 mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
2149 sizeof(*pvt));
2264 if (unlikely(!mci)) 2150 if (unlikely(!mci))
2265 return -ENOMEM; 2151 return -ENOMEM;
2266 2152
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 3bf2b2f490e7..52072c28a8a6 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. 12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
13 * 13 *
14 * Written with reference to 82443BX Host Bridge Datasheet: 14 * Written with reference to 82443BX Host Bridge Datasheet:
15 * http://download.intel.com/design/chipsets/datashts/29063301.pdf 15 * http://download.intel.com/design/chipsets/datashts/29063301.pdf
16 * references to this document given in []. 16 * references to this document given in [].
17 * 17 *
18 * This module doesn't support the 440LX, but it may be possible to 18 * This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { 156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
157 error_found = 1; 157 error_found = 1;
158 if (handle_errors) 158 if (handle_errors)
159 edac_mc_handle_ce(mci, page, pageoffset, 159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
160 /* 440BX/GX don't make syndrome information 160 page, pageoffset, 0,
161 * available */ 161 edac_mc_find_csrow_by_page(mci, page),
162 0, edac_mc_find_csrow_by_page(mci, page), 0, 162 0, -1, mci->ctl_name, "", NULL);
163 mci->ctl_name);
164 } 163 }
165 164
166 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { 165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
167 error_found = 1; 166 error_found = 1;
168 if (handle_errors) 167 if (handle_errors)
169 edac_mc_handle_ue(mci, page, pageoffset, 168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
170 edac_mc_find_csrow_by_page(mci, page), 169 page, pageoffset, 0,
171 mci->ctl_name); 170 edac_mc_find_csrow_by_page(mci, page),
171 0, -1, mci->ctl_name, "", NULL);
172 } 172 }
173 173
174 return error_found; 174 return error_found;
@@ -189,6 +189,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
189 enum mem_type mtype) 189 enum mem_type mtype)
190{ 190{
191 struct csrow_info *csrow; 191 struct csrow_info *csrow;
192 struct dimm_info *dimm;
192 int index; 193 int index;
193 u8 drbar, dramc; 194 u8 drbar, dramc;
194 u32 row_base, row_high_limit, row_high_limit_last; 195 u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
197 row_high_limit_last = 0; 198 row_high_limit_last = 0;
198 for (index = 0; index < mci->nr_csrows; index++) { 199 for (index = 0; index < mci->nr_csrows; index++) {
199 csrow = &mci->csrows[index]; 200 csrow = &mci->csrows[index];
201 dimm = csrow->channels[0].dimm;
202
200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
201 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", 204 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
202 mci->mc_idx, __FILE__, __func__, index, drbar); 205 mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
217 row_base = row_high_limit_last; 220 row_base = row_high_limit_last;
218 csrow->first_page = row_base >> PAGE_SHIFT; 221 csrow->first_page = row_base >> PAGE_SHIFT;
219 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 222 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
220 csrow->nr_pages = csrow->last_page - csrow->first_page + 1; 223 dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
221 /* EAP reports in 4kilobyte granularity [61] */ 224 /* EAP reports in 4kilobyte granularity [61] */
222 csrow->grain = 1 << 12; 225 dimm->grain = 1 << 12;
223 csrow->mtype = mtype; 226 dimm->mtype = mtype;
224 /* I don't think 440BX can tell you device type? FIXME? */ 227 /* I don't think 440BX can tell you device type? FIXME? */
225 csrow->dtype = DEV_UNKNOWN; 228 dimm->dtype = DEV_UNKNOWN;
226 /* Mode is global to all rows on 440BX */ 229 /* Mode is global to all rows on 440BX */
227 csrow->edac_mode = edac_mode; 230 dimm->edac_mode = edac_mode;
228 row_high_limit_last = row_high_limit; 231 row_high_limit_last = row_high_limit;
229 } 232 }
230} 233}
@@ -232,6 +235,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
232static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) 235static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
233{ 236{
234 struct mem_ctl_info *mci; 237 struct mem_ctl_info *mci;
238 struct edac_mc_layer layers[2];
235 u8 dramc; 239 u8 dramc;
236 u32 nbxcfg, ecc_mode; 240 u32 nbxcfg, ecc_mode;
237 enum mem_type mtype; 241 enum mem_type mtype;
@@ -245,8 +249,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
245 if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg)) 249 if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
246 return -EIO; 250 return -EIO;
247 251
248 mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0); 252 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
249 253 layers[0].size = I82443BXGX_NR_CSROWS;
254 layers[0].is_virt_csrow = true;
255 layers[1].type = EDAC_MC_LAYER_CHANNEL;
256 layers[1].size = I82443BXGX_NR_CHANS;
257 layers[1].is_virt_csrow = false;
258 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
250 if (mci == NULL) 259 if (mci == NULL)
251 return -ENOMEM; 260 return -ENOMEM;
252 261
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index c779092d18d1..08045059d10b 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -99,6 +99,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
99 struct i82860_error_info *info, 99 struct i82860_error_info *info,
100 int handle_errors) 100 int handle_errors)
101{ 101{
102 struct dimm_info *dimm;
102 int row; 103 int row;
103 104
104 if (!(info->errsts2 & 0x0003)) 105 if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
108 return 1; 109 return 1;
109 110
110 if ((info->errsts ^ info->errsts2) & 0x0003) { 111 if ((info->errsts ^ info->errsts2) & 0x0003) {
111 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
113 -1, -1, -1, "UE overwrote CE", "", NULL);
112 info->errsts = info->errsts2; 114 info->errsts = info->errsts2;
113 } 115 }
114 116
115 info->eap >>= PAGE_SHIFT; 117 info->eap >>= PAGE_SHIFT;
116 row = edac_mc_find_csrow_by_page(mci, info->eap); 118 row = edac_mc_find_csrow_by_page(mci, info->eap);
119 dimm = mci->csrows[row].channels[0].dimm;
117 120
118 if (info->errsts & 0x0002) 121 if (info->errsts & 0x0002)
119 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); 122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
123 info->eap, 0, 0,
124 dimm->location[0], dimm->location[1], -1,
125 "i82860 UE", "", NULL);
120 else 126 else
121 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, 127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
122 "i82860 UE"); 128 info->eap, 0, info->derrsyn,
129 dimm->location[0], dimm->location[1], -1,
130 "i82860 CE", "", NULL);
123 131
124 return 1; 132 return 1;
125} 133}
@@ -140,6 +148,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
140 u16 value; 148 u16 value;
141 u32 cumul_size; 149 u32 cumul_size;
142 struct csrow_info *csrow; 150 struct csrow_info *csrow;
151 struct dimm_info *dimm;
143 int index; 152 int index;
144 153
145 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); 154 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
153 */ 162 */
154 for (index = 0; index < mci->nr_csrows; index++) { 163 for (index = 0; index < mci->nr_csrows; index++) {
155 csrow = &mci->csrows[index]; 164 csrow = &mci->csrows[index];
165 dimm = csrow->channels[0].dimm;
166
156 pci_read_config_word(pdev, I82860_GBA + index * 2, &value); 167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
157 cumul_size = (value & I82860_GBA_MASK) << 168 cumul_size = (value & I82860_GBA_MASK) <<
158 (I82860_GBA_SHIFT - PAGE_SHIFT); 169 (I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
164 175
165 csrow->first_page = last_cumul_size; 176 csrow->first_page = last_cumul_size;
166 csrow->last_page = cumul_size - 1; 177 csrow->last_page = cumul_size - 1;
167 csrow->nr_pages = cumul_size - last_cumul_size; 178 dimm->nr_pages = cumul_size - last_cumul_size;
168 last_cumul_size = cumul_size; 179 last_cumul_size = cumul_size;
169 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ 180 dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
170 csrow->mtype = MEM_RMBS; 181 dimm->mtype = MEM_RMBS;
171 csrow->dtype = DEV_UNKNOWN; 182 dimm->dtype = DEV_UNKNOWN;
172 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; 183 dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
173 } 184 }
174} 185}
175 186
176static int i82860_probe1(struct pci_dev *pdev, int dev_idx) 187static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
177{ 188{
178 struct mem_ctl_info *mci; 189 struct mem_ctl_info *mci;
190 struct edac_mc_layer layers[2];
179 struct i82860_error_info discard; 191 struct i82860_error_info discard;
180 192
181 /* RDRAM has channels but these don't map onto the abstractions that 193 /*
182 edac uses. 194 * RDRAM has channels but these don't map onto the csrow abstraction.
183 The device groups from the GRA registers seem to map reasonably 195 * According with the datasheet, there are 2 Rambus channels, supporting
184 well onto the notion of a chip select row. 196 * up to 16 direct RDRAM devices.
185 There are 16 GRA registers and since the name is associated with 197 * The device groups from the GRA registers seem to map reasonably
186 the channel and the GRA registers map to physical devices so we are 198 * well onto the notion of a chip select row.
187 going to make 1 channel for group. 199 * There are 16 GRA registers and since the name is associated with
200 * the channel and the GRA registers map to physical devices so we are
201 * going to make 1 channel for group.
188 */ 202 */
189 mci = edac_mc_alloc(0, 16, 1, 0); 203 layers[0].type = EDAC_MC_LAYER_CHANNEL;
190 204 layers[0].size = 2;
205 layers[0].is_virt_csrow = true;
206 layers[1].type = EDAC_MC_LAYER_SLOT;
207 layers[1].size = 8;
208 layers[1].is_virt_csrow = true;
209 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
191 if (!mci) 210 if (!mci)
192 return -ENOMEM; 211 return -ENOMEM;
193 212
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 10f15d85fb5e..b613e31c16e5 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -38,7 +38,8 @@
38#endif /* PCI_DEVICE_ID_INTEL_82875_6 */ 38#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
39 39
40/* four csrows in dual channel, eight in single channel */ 40/* four csrows in dual channel, eight in single channel */
41#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) 41#define I82875P_NR_DIMMS 8
42#define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
42 43
43/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ 44/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
44#define I82875P_EAP 0x58 /* Error Address Pointer (32b) 45#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
@@ -235,7 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
235 return 1; 236 return 1;
236 237
237 if ((info->errsts ^ info->errsts2) & 0x0081) { 238 if ((info->errsts ^ info->errsts2) & 0x0081) {
238 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
240 -1, -1, -1,
241 "UE overwrote CE", "", NULL);
239 info->errsts = info->errsts2; 242 info->errsts = info->errsts2;
240 } 243 }
241 244
@@ -243,11 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
243 row = edac_mc_find_csrow_by_page(mci, info->eap); 246 row = edac_mc_find_csrow_by_page(mci, info->eap);
244 247
245 if (info->errsts & 0x0080) 248 if (info->errsts & 0x0080)
246 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); 249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
250 info->eap, 0, 0,
251 row, -1, -1,
252 "i82875p UE", "", NULL);
247 else 253 else
248 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
249 multi_chan ? (info->des & 0x1) : 0, 255 info->eap, 0, info->derrsyn,
250 "i82875p CE"); 256 row, multi_chan ? (info->des & 0x1) : 0,
257 -1, "i82875p CE", "", NULL);
251 258
252 return 1; 259 return 1;
253} 260}
@@ -342,11 +349,13 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
342 void __iomem * ovrfl_window, u32 drc) 349 void __iomem * ovrfl_window, u32 drc)
343{ 350{
344 struct csrow_info *csrow; 351 struct csrow_info *csrow;
352 struct dimm_info *dimm;
353 unsigned nr_chans = dual_channel_active(drc) + 1;
345 unsigned long last_cumul_size; 354 unsigned long last_cumul_size;
346 u8 value; 355 u8 value;
347 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 356 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
348 u32 cumul_size; 357 u32 cumul_size, nr_pages;
349 int index; 358 int index, j;
350 359
351 drc_ddim = (drc >> 18) & 0x1; 360 drc_ddim = (drc >> 18) & 0x1;
352 last_cumul_size = 0; 361 last_cumul_size = 0;
@@ -369,12 +378,18 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
369 378
370 csrow->first_page = last_cumul_size; 379 csrow->first_page = last_cumul_size;
371 csrow->last_page = cumul_size - 1; 380 csrow->last_page = cumul_size - 1;
372 csrow->nr_pages = cumul_size - last_cumul_size; 381 nr_pages = cumul_size - last_cumul_size;
373 last_cumul_size = cumul_size; 382 last_cumul_size = cumul_size;
374 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 383
375 csrow->mtype = MEM_DDR; 384 for (j = 0; j < nr_chans; j++) {
376 csrow->dtype = DEV_UNKNOWN; 385 dimm = csrow->channels[j].dimm;
377 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 386
387 dimm->nr_pages = nr_pages / nr_chans;
388 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
389 dimm->mtype = MEM_DDR;
390 dimm->dtype = DEV_UNKNOWN;
391 dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
392 }
378 } 393 }
379} 394}
380 395
@@ -382,6 +397,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
382{ 397{
383 int rc = -ENODEV; 398 int rc = -ENODEV;
384 struct mem_ctl_info *mci; 399 struct mem_ctl_info *mci;
400 struct edac_mc_layer layers[2];
385 struct i82875p_pvt *pvt; 401 struct i82875p_pvt *pvt;
386 struct pci_dev *ovrfl_pdev; 402 struct pci_dev *ovrfl_pdev;
387 void __iomem *ovrfl_window; 403 void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
397 return -ENODEV; 413 return -ENODEV;
398 drc = readl(ovrfl_window + I82875P_DRC); 414 drc = readl(ovrfl_window + I82875P_DRC);
399 nr_chans = dual_channel_active(drc) + 1; 415 nr_chans = dual_channel_active(drc) + 1;
400 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
401 nr_chans, 0);
402 416
417 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
418 layers[0].size = I82875P_NR_CSROWS(nr_chans);
419 layers[0].is_virt_csrow = true;
420 layers[1].type = EDAC_MC_LAYER_CHANNEL;
421 layers[1].size = nr_chans;
422 layers[1].is_virt_csrow = false;
423 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
403 if (!mci) { 424 if (!mci) {
404 rc = -ENOMEM; 425 rc = -ENOMEM;
405 goto fail0; 426 goto fail0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0cd8368f88f8..433332c7cdba 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -29,7 +29,8 @@
29#define PCI_DEVICE_ID_INTEL_82975_0 0x277c 29#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
30#endif /* PCI_DEVICE_ID_INTEL_82975_0 */ 30#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
31 31
32#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans)) 32#define I82975X_NR_DIMMS 8
33#define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
33 34
34/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */ 35/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
35#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b) 36#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
287 return 1; 288 return 1;
288 289
289 if ((info->errsts ^ info->errsts2) & 0x0003) { 290 if ((info->errsts ^ info->errsts2) & 0x0003) {
290 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
292 -1, -1, -1, "UE overwrote CE", "", NULL);
291 info->errsts = info->errsts2; 293 info->errsts = info->errsts2;
292 } 294 }
293 295
@@ -309,13 +311,18 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
309 chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1; 311 chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
310 offst = info->eap 312 offst = info->eap
311 & ((1 << PAGE_SHIFT) - 313 & ((1 << PAGE_SHIFT) -
312 (1 << mci->csrows[row].grain)); 314 (1 << mci->csrows[row].channels[chan].dimm->grain));
313 315
314 if (info->errsts & 0x0002) 316 if (info->errsts & 0x0002)
315 edac_mc_handle_ue(mci, page, offst , row, "i82975x UE"); 317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
318 page, offst, 0,
319 row, -1, -1,
320 "i82975x UE", "", NULL);
316 else 321 else
317 edac_mc_handle_ce(mci, page, offst, info->derrsyn, row, 322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
318 chan, "i82975x CE"); 323 page, offst, info->derrsyn,
324 row, chan ? chan : 0, -1,
325 "i82975x CE", "", NULL);
319 326
320 return 1; 327 return 1;
321} 328}
@@ -370,8 +377,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
370 struct csrow_info *csrow; 377 struct csrow_info *csrow;
371 unsigned long last_cumul_size; 378 unsigned long last_cumul_size;
372 u8 value; 379 u8 value;
373 u32 cumul_size; 380 u32 cumul_size, nr_pages;
374 int index, chan; 381 int index, chan;
382 struct dimm_info *dimm;
383 enum dev_type dtype;
375 384
376 last_cumul_size = 0; 385 last_cumul_size = 0;
377 386
@@ -400,28 +409,33 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
400 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 409 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
401 cumul_size); 410 cumul_size);
402 411
412 nr_pages = cumul_size - last_cumul_size;
413 if (!nr_pages)
414 continue;
415
403 /* 416 /*
404 * Initialise dram labels 417 * Initialise dram labels
405 * index values: 418 * index values:
406 * [0-7] for single-channel; i.e. csrow->nr_channels = 1 419 * [0-7] for single-channel; i.e. csrow->nr_channels = 1
407 * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 420 * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
408 */ 421 */
409 for (chan = 0; chan < csrow->nr_channels; chan++) 422 dtype = i82975x_dram_type(mch_window, index);
410 strncpy(csrow->channels[chan].label, 423 for (chan = 0; chan < csrow->nr_channels; chan++) {
424 dimm = mci->csrows[index].channels[chan].dimm;
425
426 dimm->nr_pages = nr_pages / csrow->nr_channels;
427 strncpy(csrow->channels[chan].dimm->label,
411 labels[(index >> 1) + (chan * 2)], 428 labels[(index >> 1) + (chan * 2)],
412 EDAC_MC_LABEL_LEN); 429 EDAC_MC_LABEL_LEN);
413 430 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
414 if (cumul_size == last_cumul_size) 431 dimm->dtype = i82975x_dram_type(mch_window, index);
415 continue; /* not populated */ 432 dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
433 dimm->edac_mode = EDAC_SECDED; /* only supported */
434 }
416 435
417 csrow->first_page = last_cumul_size; 436 csrow->first_page = last_cumul_size;
418 csrow->last_page = cumul_size - 1; 437 csrow->last_page = cumul_size - 1;
419 csrow->nr_pages = cumul_size - last_cumul_size;
420 last_cumul_size = cumul_size; 438 last_cumul_size = cumul_size;
421 csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
422 csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
423 csrow->dtype = i82975x_dram_type(mch_window, index);
424 csrow->edac_mode = EDAC_SECDED; /* only supported */
425 } 439 }
426} 440}
427 441
@@ -463,6 +477,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
463{ 477{
464 int rc = -ENODEV; 478 int rc = -ENODEV;
465 struct mem_ctl_info *mci; 479 struct mem_ctl_info *mci;
480 struct edac_mc_layer layers[2];
466 struct i82975x_pvt *pvt; 481 struct i82975x_pvt *pvt;
467 void __iomem *mch_window; 482 void __iomem *mch_window;
468 u32 mchbar; 483 u32 mchbar;
@@ -531,8 +546,13 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
531 chans = dual_channel_active(mch_window) + 1; 546 chans = dual_channel_active(mch_window) + 1;
532 547
533 /* assuming only one controller, index thus is 0 */ 548 /* assuming only one controller, index thus is 0 */
534 mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans), 549 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
535 chans, 0); 550 layers[0].size = I82975X_NR_DIMMS;
551 layers[0].is_virt_csrow = true;
552 layers[1].type = EDAC_MC_LAYER_CHANNEL;
553 layers[1].size = I82975X_NR_CSROWS(chans);
554 layers[1].is_virt_csrow = false;
555 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
536 if (!mci) { 556 if (!mci) {
537 rc = -ENOMEM; 557 rc = -ENOMEM;
538 goto fail1; 558 goto fail1;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 73464a62adf7..4c402353ba98 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,12 +854,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); 854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
855 855
856 if (err_detect & DDR_EDE_SBE) 856 if (err_detect & DDR_EDE_SBE)
857 edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK, 857 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
858 syndrome, row_index, 0, mci->ctl_name); 858 pfn, err_addr & ~PAGE_MASK, syndrome,
859 row_index, 0, -1,
860 mci->ctl_name, "", NULL);
859 861
860 if (err_detect & DDR_EDE_MBE) 862 if (err_detect & DDR_EDE_MBE)
861 edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK, 863 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
862 row_index, mci->ctl_name); 864 pfn, err_addr & ~PAGE_MASK, syndrome,
865 row_index, 0, -1,
866 mci->ctl_name, "", NULL);
863 867
864 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 868 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
865} 869}
@@ -883,6 +887,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
883{ 887{
884 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 888 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
885 struct csrow_info *csrow; 889 struct csrow_info *csrow;
890 struct dimm_info *dimm;
886 u32 sdram_ctl; 891 u32 sdram_ctl;
887 u32 sdtype; 892 u32 sdtype;
888 enum mem_type mtype; 893 enum mem_type mtype;
@@ -929,6 +934,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
929 u32 end; 934 u32 end;
930 935
931 csrow = &mci->csrows[index]; 936 csrow = &mci->csrows[index];
937 dimm = csrow->channels[0].dimm;
938
932 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + 939 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
933 (index * MPC85XX_MC_CS_BNDS_OFS)); 940 (index * MPC85XX_MC_CS_BNDS_OFS));
934 941
@@ -944,19 +951,21 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
944 951
945 csrow->first_page = start; 952 csrow->first_page = start;
946 csrow->last_page = end; 953 csrow->last_page = end;
947 csrow->nr_pages = end + 1 - start; 954
948 csrow->grain = 8; 955 dimm->nr_pages = end + 1 - start;
949 csrow->mtype = mtype; 956 dimm->grain = 8;
950 csrow->dtype = DEV_UNKNOWN; 957 dimm->mtype = mtype;
958 dimm->dtype = DEV_UNKNOWN;
951 if (sdram_ctl & DSC_X32_EN) 959 if (sdram_ctl & DSC_X32_EN)
952 csrow->dtype = DEV_X32; 960 dimm->dtype = DEV_X32;
953 csrow->edac_mode = EDAC_SECDED; 961 dimm->edac_mode = EDAC_SECDED;
954 } 962 }
955} 963}
956 964
957static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) 965static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
958{ 966{
959 struct mem_ctl_info *mci; 967 struct mem_ctl_info *mci;
968 struct edac_mc_layer layers[2];
960 struct mpc85xx_mc_pdata *pdata; 969 struct mpc85xx_mc_pdata *pdata;
961 struct resource r; 970 struct resource r;
962 u32 sdram_ctl; 971 u32 sdram_ctl;
@@ -965,7 +974,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
965 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL)) 974 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
966 return -ENOMEM; 975 return -ENOMEM;
967 976
968 mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx); 977 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
978 layers[0].size = 4;
979 layers[0].is_virt_csrow = true;
980 layers[1].type = EDAC_MC_LAYER_CHANNEL;
981 layers[1].size = 1;
982 layers[1].is_virt_csrow = false;
983 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
969 if (!mci) { 984 if (!mci) {
970 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 985 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
971 return -ENOMEM; 986 return -ENOMEM;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 7e5ff367705c..b0bb5a3d2527 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -611,12 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
611 611
612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ 612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
613 if (!(reg & 0x1)) 613 if (!(reg & 0x1))
614 edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT, 614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
615 err_addr & PAGE_MASK, syndrome, 0, 0, 615 err_addr >> PAGE_SHIFT,
616 mci->ctl_name); 616 err_addr & PAGE_MASK, syndrome,
617 0, 0, -1,
618 mci->ctl_name, "", NULL);
617 else /* 2 bit error, UE */ 619 else /* 2 bit error, UE */
618 edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT, 620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
619 err_addr & PAGE_MASK, 0, mci->ctl_name); 621 err_addr >> PAGE_SHIFT,
622 err_addr & PAGE_MASK, 0,
623 0, 0, -1,
624 mci->ctl_name, "", NULL);
620 625
621 /* clear the error */ 626 /* clear the error */
622 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); 627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
656 struct mv64x60_mc_pdata *pdata) 661 struct mv64x60_mc_pdata *pdata)
657{ 662{
658 struct csrow_info *csrow; 663 struct csrow_info *csrow;
664 struct dimm_info *dimm;
665
659 u32 devtype; 666 u32 devtype;
660 u32 ctl; 667 u32 ctl;
661 668
@@ -664,35 +671,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
664 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); 671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
665 672
666 csrow = &mci->csrows[0]; 673 csrow = &mci->csrows[0];
667 csrow->first_page = 0; 674 dimm = csrow->channels[0].dimm;
668 csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT; 675
669 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
670 csrow->grain = 8; 677 dimm->grain = 8;
671 678
672 csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR; 679 dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
673 680
674 devtype = (ctl >> 20) & 0x3; 681 devtype = (ctl >> 20) & 0x3;
675 switch (devtype) { 682 switch (devtype) {
676 case 0x0: 683 case 0x0:
677 csrow->dtype = DEV_X32; 684 dimm->dtype = DEV_X32;
678 break; 685 break;
679 case 0x2: /* could be X8 too, but no way to tell */ 686 case 0x2: /* could be X8 too, but no way to tell */
680 csrow->dtype = DEV_X16; 687 dimm->dtype = DEV_X16;
681 break; 688 break;
682 case 0x3: 689 case 0x3:
683 csrow->dtype = DEV_X4; 690 dimm->dtype = DEV_X4;
684 break; 691 break;
685 default: 692 default:
686 csrow->dtype = DEV_UNKNOWN; 693 dimm->dtype = DEV_UNKNOWN;
687 break; 694 break;
688 } 695 }
689 696
690 csrow->edac_mode = EDAC_SECDED; 697 dimm->edac_mode = EDAC_SECDED;
691} 698}
692 699
693static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) 700static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
694{ 701{
695 struct mem_ctl_info *mci; 702 struct mem_ctl_info *mci;
703 struct edac_mc_layer layers[2];
696 struct mv64x60_mc_pdata *pdata; 704 struct mv64x60_mc_pdata *pdata;
697 struct resource *r; 705 struct resource *r;
698 u32 ctl; 706 u32 ctl;
@@ -701,7 +709,14 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
701 if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL)) 709 if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
702 return -ENOMEM; 710 return -ENOMEM;
703 711
704 mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx); 712 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
713 layers[0].size = 1;
714 layers[0].is_virt_csrow = true;
715 layers[1].type = EDAC_MC_LAYER_CHANNEL;
716 layers[1].size = 1;
717 layers[1].is_virt_csrow = false;
718 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
719 sizeof(struct mv64x60_mc_pdata));
705 if (!mci) { 720 if (!mci) {
706 printk(KERN_ERR "%s: No memory for CPU err\n", __func__); 721 printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
707 devres_release_group(&pdev->dev, mv64x60_mc_err_probe); 722 devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 7f71ee436744..b095a906a994 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -110,15 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
110 /* uncorrectable/multi-bit errors */ 110 /* uncorrectable/multi-bit errors */
111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | 111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
112 MCDEBUG_ERRSTA_RFL_STATUS)) { 112 MCDEBUG_ERRSTA_RFL_STATUS)) {
113 edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0, 113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
114 cs, mci->ctl_name); 114 mci->csrows[cs].first_page, 0, 0,
115 cs, 0, -1, mci->ctl_name, "", NULL);
115 } 116 }
116 117
117 /* correctable/single-bit errors */ 118 /* correctable/single-bit errors */
118 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) { 119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
119 edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0, 120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
120 0, cs, 0, mci->ctl_name); 121 mci->csrows[cs].first_page, 0, 0,
121 } 122 cs, 0, -1, mci->ctl_name, "", NULL);
122} 123}
123 124
124static void pasemi_edac_check(struct mem_ctl_info *mci) 125static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
135 enum edac_type edac_mode) 136 enum edac_type edac_mode)
136{ 137{
137 struct csrow_info *csrow; 138 struct csrow_info *csrow;
139 struct dimm_info *dimm;
138 u32 rankcfg; 140 u32 rankcfg;
139 int index; 141 int index;
140 142
141 for (index = 0; index < mci->nr_csrows; index++) { 143 for (index = 0; index < mci->nr_csrows; index++) {
142 csrow = &mci->csrows[index]; 144 csrow = &mci->csrows[index];
145 dimm = csrow->channels[0].dimm;
143 146
144 pci_read_config_dword(pdev, 147 pci_read_config_dword(pdev,
145 MCDRAM_RANKCFG + (index * 12), 148 MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
151 switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >> 154 switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
152 MCDRAM_RANKCFG_TYPE_SIZE_S) { 155 MCDRAM_RANKCFG_TYPE_SIZE_S) {
153 case 0: 156 case 0:
154 csrow->nr_pages = 128 << (20 - PAGE_SHIFT); 157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
155 break; 158 break;
156 case 1: 159 case 1:
157 csrow->nr_pages = 256 << (20 - PAGE_SHIFT); 160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
158 break; 161 break;
159 case 2: 162 case 2:
160 case 3: 163 case 3:
161 csrow->nr_pages = 512 << (20 - PAGE_SHIFT); 164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
162 break; 165 break;
163 case 4: 166 case 4:
164 csrow->nr_pages = 1024 << (20 - PAGE_SHIFT); 167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
165 break; 168 break;
166 case 5: 169 case 5:
167 csrow->nr_pages = 2048 << (20 - PAGE_SHIFT); 170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
168 break; 171 break;
169 default: 172 default:
170 edac_mc_printk(mci, KERN_ERR, 173 edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
174 } 177 }
175 178
176 csrow->first_page = last_page_in_mmc; 179 csrow->first_page = last_page_in_mmc;
177 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 180 csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
178 last_page_in_mmc += csrow->nr_pages; 181 last_page_in_mmc += dimm->nr_pages;
179 csrow->page_mask = 0; 182 csrow->page_mask = 0;
180 csrow->grain = PASEMI_EDAC_ERROR_GRAIN; 183 dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
181 csrow->mtype = MEM_DDR; 184 dimm->mtype = MEM_DDR;
182 csrow->dtype = DEV_UNKNOWN; 185 dimm->dtype = DEV_UNKNOWN;
183 csrow->edac_mode = edac_mode; 186 dimm->edac_mode = edac_mode;
184 } 187 }
185 return 0; 188 return 0;
186} 189}
@@ -189,6 +192,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
189 const struct pci_device_id *ent) 192 const struct pci_device_id *ent)
190{ 193{
191 struct mem_ctl_info *mci = NULL; 194 struct mem_ctl_info *mci = NULL;
195 struct edac_mc_layer layers[2];
192 u32 errctl1, errcor, scrub, mcen; 196 u32 errctl1, errcor, scrub, mcen;
193 197
194 pci_read_config_dword(pdev, MCCFG_MCEN, &mcen); 198 pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
205 MCDEBUG_ERRCTL1_RFL_LOG_EN; 209 MCDEBUG_ERRCTL1_RFL_LOG_EN;
206 pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1); 210 pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
207 211
208 mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS, 212 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
209 system_mmc_id++); 213 layers[0].size = PASEMI_EDAC_NR_CSROWS;
210 214 layers[0].is_virt_csrow = true;
215 layers[1].type = EDAC_MC_LAYER_CHANNEL;
216 layers[1].size = PASEMI_EDAC_NR_CHANS;
217 layers[1].is_virt_csrow = false;
218 mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
219 0);
211 if (mci == NULL) 220 if (mci == NULL)
212 return -ENOMEM; 221 return -ENOMEM;
213 222
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d427c69bb8b1..f3f9fed06ad7 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,7 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
727 727
728 for (row = 0; row < mci->nr_csrows; row++) 728 for (row = 0; row < mci->nr_csrows; row++)
729 if (ppc4xx_edac_check_bank_error(status, row)) 729 if (ppc4xx_edac_check_bank_error(status, row))
730 edac_mc_handle_ce_no_info(mci, message); 730 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
731 0, 0, 0,
732 row, 0, -1,
733 message, "", NULL);
731} 734}
732 735
733/** 736/**
@@ -755,7 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
755 758
756 for (row = 0; row < mci->nr_csrows; row++) 759 for (row = 0; row < mci->nr_csrows; row++)
757 if (ppc4xx_edac_check_bank_error(status, row)) 760 if (ppc4xx_edac_check_bank_error(status, row))
758 edac_mc_handle_ue(mci, page, offset, row, message); 761 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
762 page, offset, 0,
763 row, 0, -1,
764 message, "", NULL);
759} 765}
760 766
761/** 767/**
@@ -895,9 +901,8 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
895 enum mem_type mtype; 901 enum mem_type mtype;
896 enum dev_type dtype; 902 enum dev_type dtype;
897 enum edac_type edac_mode; 903 enum edac_type edac_mode;
898 int row; 904 int row, j;
899 u32 mbxcf, size; 905 u32 mbxcf, size, nr_pages;
900 static u32 ppc4xx_last_page;
901 906
902 /* Establish the memory type and width */ 907 /* Establish the memory type and width */
903 908
@@ -948,7 +953,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
948 case SDRAM_MBCF_SZ_2GB: 953 case SDRAM_MBCF_SZ_2GB:
949 case SDRAM_MBCF_SZ_4GB: 954 case SDRAM_MBCF_SZ_4GB:
950 case SDRAM_MBCF_SZ_8GB: 955 case SDRAM_MBCF_SZ_8GB:
951 csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size); 956 nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
952 break; 957 break;
953 default: 958 default:
954 ppc4xx_edac_mc_printk(KERN_ERR, mci, 959 ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
959 goto done; 964 goto done;
960 } 965 }
961 966
962 csi->first_page = ppc4xx_last_page;
963 csi->last_page = csi->first_page + csi->nr_pages - 1;
964 csi->page_mask = 0;
965
966 /* 967 /*
967 * It's unclear exactly what grain should be set to 968 * It's unclear exactly what grain should be set to
968 * here. The SDRAM_ECCES register allows resolution of 969 * here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
975 * possible values would be the PLB width (16), the 976 * possible values would be the PLB width (16), the
976 * page size (PAGE_SIZE) or the memory width (2 or 4). 977 * page size (PAGE_SIZE) or the memory width (2 or 4).
977 */ 978 */
979 for (j = 0; j < csi->nr_channels; j++) {
980 struct dimm_info *dimm = csi->channels[j].dimm;
978 981
979 csi->grain = 1; 982 dimm->nr_pages = nr_pages / csi->nr_channels;
980 983 dimm->grain = 1;
981 csi->mtype = mtype;
982 csi->dtype = dtype;
983 984
984 csi->edac_mode = edac_mode; 985 dimm->mtype = mtype;
986 dimm->dtype = dtype;
985 987
986 ppc4xx_last_page += csi->nr_pages; 988 dimm->edac_mode = edac_mode;
989 }
987 } 990 }
988 991
989 done: 992 done:
@@ -1236,6 +1239,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
1236 dcr_host_t dcr_host; 1239 dcr_host_t dcr_host;
1237 const struct device_node *np = op->dev.of_node; 1240 const struct device_node *np = op->dev.of_node;
1238 struct mem_ctl_info *mci = NULL; 1241 struct mem_ctl_info *mci = NULL;
1242 struct edac_mc_layer layers[2];
1239 static int ppc4xx_edac_instance; 1243 static int ppc4xx_edac_instance;
1240 1244
1241 /* 1245 /*
@@ -1281,12 +1285,14 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
1281 * controller instance and perform the appropriate 1285 * controller instance and perform the appropriate
1282 * initialization. 1286 * initialization.
1283 */ 1287 */
1284 1288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1285 mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata), 1289 layers[0].size = ppc4xx_edac_nr_csrows;
1286 ppc4xx_edac_nr_csrows, 1290 layers[0].is_virt_csrow = true;
1287 ppc4xx_edac_nr_chans, 1291 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1288 ppc4xx_edac_instance); 1292 layers[1].size = ppc4xx_edac_nr_chans;
1289 1293 layers[1].is_virt_csrow = false;
1294 mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
1295 sizeof(struct ppc4xx_edac_pdata));
1290 if (mci == NULL) { 1296 if (mci == NULL) {
1291 ppc4xx_edac_printk(KERN_ERR, "%s: " 1297 ppc4xx_edac_printk(KERN_ERR, "%s: "
1292 "Failed to allocate EDAC MC instance!\n", 1298 "Failed to allocate EDAC MC instance!\n",
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6d908ad72d64..e1cacd164f31 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -179,10 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
179 error_found = 1; 179 error_found = 1;
180 180
181 if (handle_errors) 181 if (handle_errors)
182 edac_mc_handle_ce(mci, page, 0, /* not avail */ 182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
183 syndrome, 183 page, 0, syndrome,
184 edac_mc_find_csrow_by_page(mci, page), 184 edac_mc_find_csrow_by_page(mci, page),
185 0, mci->ctl_name); 185 0, -1,
186 mci->ctl_name, "", NULL);
186 } 187 }
187 188
188 if (info->eapr & BIT(1)) { /* UE? */ 189 if (info->eapr & BIT(1)) { /* UE? */
@@ -190,9 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
190 191
191 if (handle_errors) 192 if (handle_errors)
192 /* 82600 doesn't give enough info */ 193 /* 82600 doesn't give enough info */
193 edac_mc_handle_ue(mci, page, 0, 194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
194 edac_mc_find_csrow_by_page(mci, page), 195 page, 0, 0,
195 mci->ctl_name); 196 edac_mc_find_csrow_by_page(mci, page),
197 0, -1,
198 mci->ctl_name, "", NULL);
196 } 199 }
197 200
198 return error_found; 201 return error_found;
@@ -216,6 +219,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
216 u8 dramcr) 219 u8 dramcr)
217{ 220{
218 struct csrow_info *csrow; 221 struct csrow_info *csrow;
222 struct dimm_info *dimm;
219 int index; 223 int index;
220 u8 drbar; /* SDRAM Row Boundary Address Register */ 224 u8 drbar; /* SDRAM Row Boundary Address Register */
221 u32 row_high_limit, row_high_limit_last; 225 u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
227 231
228 for (index = 0; index < mci->nr_csrows; index++) { 232 for (index = 0; index < mci->nr_csrows; index++) {
229 csrow = &mci->csrows[index]; 233 csrow = &mci->csrows[index];
234 dimm = csrow->channels[0].dimm;
230 235
231 /* find the DRAM Chip Select Base address and mask */ 236 /* find the DRAM Chip Select Base address and mask */
232 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); 237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
247 252
248 csrow->first_page = row_base >> PAGE_SHIFT; 253 csrow->first_page = row_base >> PAGE_SHIFT;
249 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 254 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
250 csrow->nr_pages = csrow->last_page - csrow->first_page + 1; 255
256 dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
251 /* Error address is top 19 bits - so granularity is * 257 /* Error address is top 19 bits - so granularity is *
252 * 14 bits */ 258 * 14 bits */
253 csrow->grain = 1 << 14; 259 dimm->grain = 1 << 14;
254 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; 260 dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
255 /* FIXME - check that this is unknowable with this chipset */ 261 /* FIXME - check that this is unknowable with this chipset */
256 csrow->dtype = DEV_UNKNOWN; 262 dimm->dtype = DEV_UNKNOWN;
257 263
258 /* Mode is global on 82600 */ 264 /* Mode is global on 82600 */
259 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; 265 dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
260 row_high_limit_last = row_high_limit; 266 row_high_limit_last = row_high_limit;
261 } 267 }
262} 268}
@@ -264,6 +270,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
264static int r82600_probe1(struct pci_dev *pdev, int dev_idx) 270static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
265{ 271{
266 struct mem_ctl_info *mci; 272 struct mem_ctl_info *mci;
273 struct edac_mc_layer layers[2];
267 u8 dramcr; 274 u8 dramcr;
268 u32 eapr; 275 u32 eapr;
269 u32 scrub_disabled; 276 u32 scrub_disabled;
@@ -278,8 +285,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
278 debugf2("%s(): sdram refresh rate = %#0x\n", __func__, 285 debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
279 sdram_refresh_rate); 286 sdram_refresh_rate);
280 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); 287 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
281 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0); 288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
282 289 layers[0].size = R82600_NR_CSROWS;
290 layers[0].is_virt_csrow = true;
291 layers[1].type = EDAC_MC_LAYER_CHANNEL;
292 layers[1].size = R82600_NR_CHANS;
293 layers[1].is_virt_csrow = false;
294 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
283 if (mci == NULL) 295 if (mci == NULL)
284 return -ENOMEM; 296 return -ENOMEM;
285 297
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 123204f8e23b..4adaf4b7da99 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -314,8 +314,6 @@ struct sbridge_pvt {
314 struct sbridge_info info; 314 struct sbridge_info info;
315 struct sbridge_channel channel[NUM_CHANNELS]; 315 struct sbridge_channel channel[NUM_CHANNELS];
316 316
317 int csrow_map[NUM_CHANNELS][MAX_DIMMS];
318
319 /* Memory type detection */ 317 /* Memory type detection */
320 bool is_mirrored, is_lockstep, is_close_pg; 318 bool is_mirrored, is_lockstep, is_close_pg;
321 319
@@ -487,29 +485,14 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
487} 485}
488 486
489/** 487/**
490 * sbridge_get_active_channels() - gets the number of channels and csrows 488 * check_if_ecc_is_active() - Checks if ECC is active
491 * bus: Device bus 489 * bus: Device bus
492 * @channels: Number of channels that will be returned
493 * @csrows: Number of csrows found
494 *
495 * Since EDAC core needs to know in advance the number of available channels
496 * and csrows, in order to allocate memory for csrows/channels, it is needed
497 * to run two similar steps. At the first step, implemented on this function,
498 * it checks the number of csrows/channels present at one socket, identified
499 * by the associated PCI bus.
500 * this is used in order to properly allocate the size of mci components.
501 * Note: one csrow is one dimm.
502 */ 490 */
503static int sbridge_get_active_channels(const u8 bus, unsigned *channels, 491static int check_if_ecc_is_active(const u8 bus)
504 unsigned *csrows)
505{ 492{
506 struct pci_dev *pdev = NULL; 493 struct pci_dev *pdev = NULL;
507 int i, j;
508 u32 mcmtr; 494 u32 mcmtr;
509 495
510 *channels = 0;
511 *csrows = 0;
512
513 pdev = get_pdev_slot_func(bus, 15, 0); 496 pdev = get_pdev_slot_func(bus, 15, 0);
514 if (!pdev) { 497 if (!pdev) {
515 sbridge_printk(KERN_ERR, "Couldn't find PCI device " 498 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@ static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
523 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); 506 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
524 return -ENODEV; 507 return -ENODEV;
525 } 508 }
526
527 for (i = 0; i < NUM_CHANNELS; i++) {
528 u32 mtr;
529
530 /* Device 15 functions 2 - 5 */
531 pdev = get_pdev_slot_func(bus, 15, 2 + i);
532 if (!pdev) {
533 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
534 "%2x.%02d.%d!!!\n",
535 bus, 15, 2 + i);
536 return -ENODEV;
537 }
538 (*channels)++;
539
540 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
541 pci_read_config_dword(pdev, mtr_regs[j], &mtr);
542 debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
543 if (IS_DIMM_PRESENT(mtr))
544 (*csrows)++;
545 }
546 }
547
548 debugf0("Number of active channels: %d, number of active dimms: %d\n",
549 *channels, *csrows);
550
551 return 0; 509 return 0;
552} 510}
553 511
554static int get_dimm_config(const struct mem_ctl_info *mci) 512static int get_dimm_config(struct mem_ctl_info *mci)
555{ 513{
556 struct sbridge_pvt *pvt = mci->pvt_info; 514 struct sbridge_pvt *pvt = mci->pvt_info;
557 struct csrow_info *csr; 515 struct dimm_info *dimm;
558 int i, j, banks, ranks, rows, cols, size, npages; 516 int i, j, banks, ranks, rows, cols, size, npages;
559 int csrow = 0;
560 unsigned long last_page = 0;
561 u32 reg; 517 u32 reg;
562 enum edac_type mode; 518 enum edac_type mode;
563 enum mem_type mtype; 519 enum mem_type mtype;
@@ -616,6 +572,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
616 u32 mtr; 572 u32 mtr;
617 573
618 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) { 574 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
575 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
576 i, j, 0);
619 pci_read_config_dword(pvt->pci_tad[i], 577 pci_read_config_dword(pvt->pci_tad[i],
620 mtr_regs[j], &mtr); 578 mtr_regs[j], &mtr);
621 debugf4("Channel #%d MTR%d = %x\n", i, j, mtr); 579 debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
634 pvt->sbridge_dev->mc, i, j, 592 pvt->sbridge_dev->mc, i, j,
635 size, npages, 593 size, npages,
636 banks, ranks, rows, cols); 594 banks, ranks, rows, cols);
637 csr = &mci->csrows[csrow]; 595
638 596 dimm->nr_pages = npages;
639 csr->first_page = last_page; 597 dimm->grain = 32;
640 csr->last_page = last_page + npages - 1; 598 dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
641 csr->page_mask = 0UL; /* Unused */ 599 dimm->mtype = mtype;
642 csr->nr_pages = npages; 600 dimm->edac_mode = mode;
643 csr->grain = 32; 601 snprintf(dimm->label, sizeof(dimm->label),
644 csr->csrow_idx = csrow;
645 csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
646 csr->ce_count = 0;
647 csr->ue_count = 0;
648 csr->mtype = mtype;
649 csr->edac_mode = mode;
650 csr->nr_channels = 1;
651 csr->channels[0].chan_idx = i;
652 csr->channels[0].ce_count = 0;
653 pvt->csrow_map[i][j] = csrow;
654 snprintf(csr->channels[0].label,
655 sizeof(csr->channels[0].label),
656 "CPU_SrcID#%u_Channel#%u_DIMM#%u", 602 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
657 pvt->sbridge_dev->source_id, i, j); 603 pvt->sbridge_dev->source_id, i, j);
658 last_page += npages;
659 csrow++;
660 } 604 }
661 } 605 }
662 } 606 }
@@ -844,11 +788,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
844 u8 *socket, 788 u8 *socket,
845 long *channel_mask, 789 long *channel_mask,
846 u8 *rank, 790 u8 *rank,
847 char *area_type) 791 char **area_type, char *msg)
848{ 792{
849 struct mem_ctl_info *new_mci; 793 struct mem_ctl_info *new_mci;
850 struct sbridge_pvt *pvt = mci->pvt_info; 794 struct sbridge_pvt *pvt = mci->pvt_info;
851 char msg[256];
852 int n_rir, n_sads, n_tads, sad_way, sck_xch; 795 int n_rir, n_sads, n_tads, sad_way, sck_xch;
853 int sad_interl, idx, base_ch; 796 int sad_interl, idx, base_ch;
854 int interleave_mode; 797 int interleave_mode;
@@ -870,12 +813,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
870 */ 813 */
871 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { 814 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
872 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); 815 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
873 edac_mc_handle_ce_no_info(mci, msg);
874 return -EINVAL; 816 return -EINVAL;
875 } 817 }
876 if (addr >= (u64)pvt->tohm) { 818 if (addr >= (u64)pvt->tohm) {
877 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); 819 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
878 edac_mc_handle_ce_no_info(mci, msg);
879 return -EINVAL; 820 return -EINVAL;
880 } 821 }
881 822
@@ -892,7 +833,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
892 limit = SAD_LIMIT(reg); 833 limit = SAD_LIMIT(reg);
893 if (limit <= prv) { 834 if (limit <= prv) {
894 sprintf(msg, "Can't discover the memory socket"); 835 sprintf(msg, "Can't discover the memory socket");
895 edac_mc_handle_ce_no_info(mci, msg);
896 return -EINVAL; 836 return -EINVAL;
897 } 837 }
898 if (addr <= limit) 838 if (addr <= limit)
@@ -901,10 +841,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
901 } 841 }
902 if (n_sads == MAX_SAD) { 842 if (n_sads == MAX_SAD) {
903 sprintf(msg, "Can't discover the memory socket"); 843 sprintf(msg, "Can't discover the memory socket");
904 edac_mc_handle_ce_no_info(mci, msg);
905 return -EINVAL; 844 return -EINVAL;
906 } 845 }
907 area_type = get_dram_attr(reg); 846 *area_type = get_dram_attr(reg);
908 interleave_mode = INTERLEAVE_MODE(reg); 847 interleave_mode = INTERLEAVE_MODE(reg);
909 848
910 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], 849 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
942 break; 881 break;
943 default: 882 default:
944 sprintf(msg, "Can't discover socket interleave"); 883 sprintf(msg, "Can't discover socket interleave");
945 edac_mc_handle_ce_no_info(mci, msg);
946 return -EINVAL; 884 return -EINVAL;
947 } 885 }
948 *socket = sad_interleave[idx]; 886 *socket = sad_interleave[idx];
@@ -957,7 +895,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
957 if (!new_mci) { 895 if (!new_mci) {
958 sprintf(msg, "Struct for socket #%u wasn't initialized", 896 sprintf(msg, "Struct for socket #%u wasn't initialized",
959 *socket); 897 *socket);
960 edac_mc_handle_ce_no_info(mci, msg);
961 return -EINVAL; 898 return -EINVAL;
962 } 899 }
963 mci = new_mci; 900 mci = new_mci;
@@ -973,7 +910,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
973 limit = TAD_LIMIT(reg); 910 limit = TAD_LIMIT(reg);
974 if (limit <= prv) { 911 if (limit <= prv) {
975 sprintf(msg, "Can't discover the memory channel"); 912 sprintf(msg, "Can't discover the memory channel");
976 edac_mc_handle_ce_no_info(mci, msg);
977 return -EINVAL; 913 return -EINVAL;
978 } 914 }
979 if (addr <= limit) 915 if (addr <= limit)
@@ -1013,7 +949,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1013 break; 949 break;
1014 default: 950 default:
1015 sprintf(msg, "Can't discover the TAD target"); 951 sprintf(msg, "Can't discover the TAD target");
1016 edac_mc_handle_ce_no_info(mci, msg);
1017 return -EINVAL; 952 return -EINVAL;
1018 } 953 }
1019 *channel_mask = 1 << base_ch; 954 *channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1027 break; 962 break;
1028 default: 963 default:
1029 sprintf(msg, "Invalid mirror set. Can't decode addr"); 964 sprintf(msg, "Invalid mirror set. Can't decode addr");
1030 edac_mc_handle_ce_no_info(mci, msg);
1031 return -EINVAL; 965 return -EINVAL;
1032 } 966 }
1033 } else 967 } else
@@ -1055,7 +989,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1055 if (offset > addr) { 989 if (offset > addr) {
1056 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", 990 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
1057 offset, addr); 991 offset, addr);
1058 edac_mc_handle_ce_no_info(mci, msg);
1059 return -EINVAL; 992 return -EINVAL;
1060 } 993 }
1061 addr -= offset; 994 addr -= offset;
@@ -1095,7 +1028,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1095 if (n_rir == MAX_RIR_RANGES) { 1028 if (n_rir == MAX_RIR_RANGES) {
1096 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", 1029 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
1097 ch_addr); 1030 ch_addr);
1098 edac_mc_handle_ce_no_info(mci, msg);
1099 return -EINVAL; 1031 return -EINVAL;
1100 } 1032 }
1101 rir_way = RIR_WAY(reg); 1033 rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1409{ 1341{
1410 struct mem_ctl_info *new_mci; 1342 struct mem_ctl_info *new_mci;
1411 struct sbridge_pvt *pvt = mci->pvt_info; 1343 struct sbridge_pvt *pvt = mci->pvt_info;
1412 char *type, *optype, *msg, *recoverable_msg; 1344 enum hw_event_mc_err_type tp_event;
1345 char *type, *optype, msg[256];
1413 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); 1346 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1414 bool overflow = GET_BITFIELD(m->status, 62, 62); 1347 bool overflow = GET_BITFIELD(m->status, 62, 62);
1415 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); 1348 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,13 +1354,21 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1421 u32 optypenum = GET_BITFIELD(m->status, 4, 6); 1354 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1422 long channel_mask, first_channel; 1355 long channel_mask, first_channel;
1423 u8 rank, socket; 1356 u8 rank, socket;
1424 int csrow, rc, dimm; 1357 int rc, dimm;
1425 char *area_type = "Unknown"; 1358 char *area_type = NULL;
1426 1359
1427 if (ripv) 1360 if (uncorrected_error) {
1428 type = "NON_FATAL"; 1361 if (ripv) {
1429 else 1362 type = "FATAL";
1430 type = "FATAL"; 1363 tp_event = HW_EVENT_ERR_FATAL;
1364 } else {
1365 type = "NON_FATAL";
1366 tp_event = HW_EVENT_ERR_UNCORRECTED;
1367 }
1368 } else {
1369 type = "CORRECTED";
1370 tp_event = HW_EVENT_ERR_CORRECTED;
1371 }
1431 1372
1432 /* 1373 /*
1433 * According with Table 15-9 of the Intel Architecture spec vol 3A, 1374 * According with Table 15-9 of the Intel Architecture spec vol 3A,
@@ -1445,19 +1386,19 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1445 } else { 1386 } else {
1446 switch (optypenum) { 1387 switch (optypenum) {
1447 case 0: 1388 case 0:
1448 optype = "generic undef request"; 1389 optype = "generic undef request error";
1449 break; 1390 break;
1450 case 1: 1391 case 1:
1451 optype = "memory read"; 1392 optype = "memory read error";
1452 break; 1393 break;
1453 case 2: 1394 case 2:
1454 optype = "memory write"; 1395 optype = "memory write error";
1455 break; 1396 break;
1456 case 3: 1397 case 3:
1457 optype = "addr/cmd"; 1398 optype = "addr/cmd error";
1458 break; 1399 break;
1459 case 4: 1400 case 4:
1460 optype = "memory scrubbing"; 1401 optype = "memory scrubbing error";
1461 break; 1402 break;
1462 default: 1403 default:
1463 optype = "reserved"; 1404 optype = "reserved";
@@ -1466,13 +1407,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1466 } 1407 }
1467 1408
1468 rc = get_memory_error_data(mci, m->addr, &socket, 1409 rc = get_memory_error_data(mci, m->addr, &socket,
1469 &channel_mask, &rank, area_type); 1410 &channel_mask, &rank, &area_type, msg);
1470 if (rc < 0) 1411 if (rc < 0)
1471 return; 1412 goto err_parsing;
1472 new_mci = get_mci_for_node_id(socket); 1413 new_mci = get_mci_for_node_id(socket);
1473 if (!new_mci) { 1414 if (!new_mci) {
1474 edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!"); 1415 strcpy(msg, "Error: socket got corrupted!");
1475 return; 1416 goto err_parsing;
1476 } 1417 }
1477 mci = new_mci; 1418 mci = new_mci;
1478 pvt = mci->pvt_info; 1419 pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1486 else 1427 else
1487 dimm = 2; 1428 dimm = 2;
1488 1429
1489 csrow = pvt->csrow_map[first_channel][dimm];
1490
1491 if (uncorrected_error && recoverable)
1492 recoverable_msg = " recoverable";
1493 else
1494 recoverable_msg = "";
1495 1430
1496 /* 1431 /*
1497 * FIXME: What should we do with "channel" information on mcelog? 1432 * FIXME: On some memory configurations (mirror, lockstep), the
1498 * Probably, we can just discard it, as the channel information 1433 * Memory Controller can't point the error to a single DIMM. The
1499 * comes from the get_memory_error_data() address decoding 1434 * EDAC core should be handling the channel mask, in order to point
1435 * to the group of dimm's where the error may be happening.
1500 */ 1436 */
1501 msg = kasprintf(GFP_ATOMIC, 1437 snprintf(msg, sizeof(msg),
1502 "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), " 1438 "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1503 "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n", 1439 core_err_cnt,
1504 core_err_cnt, 1440 overflow ? " OVERFLOW" : "",
1505 area_type, 1441 (uncorrected_error && recoverable) ? " recoverable" : "",
1506 optype, 1442 area_type,
1507 type, 1443 mscod, errcode,
1508 recoverable_msg, 1444 socket,
1509 overflow ? "OVERFLOW" : "", 1445 channel_mask,
1510 m->cpu, 1446 rank);
1511 mscod, errcode,
1512 channel, /* 1111b means not specified */
1513 (long long) m->addr,
1514 socket,
1515 first_channel, /* This is the real channel on SB */
1516 channel_mask,
1517 rank);
1518 1447
1519 debugf0("%s", msg); 1448 debugf0("%s", msg);
1520 1449
1450 /* FIXME: need support for channel mask */
1451
1521 /* Call the helper to output message */ 1452 /* Call the helper to output message */
1522 if (uncorrected_error) 1453 edac_mc_handle_error(tp_event, mci,
1523 edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg); 1454 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1524 else 1455 channel, dimm, -1,
1525 edac_mc_handle_fbd_ce(mci, csrow, 0, msg); 1456 optype, msg, m);
1457 return;
1458err_parsing:
1459 edac_mc_handle_error(tp_event, mci, 0, 0, 0,
1460 -1, -1, -1,
1461 msg, "", m);
1526 1462
1527 kfree(msg);
1528} 1463}
1529 1464
1530/* 1465/*
@@ -1683,16 +1618,25 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1683static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) 1618static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1684{ 1619{
1685 struct mem_ctl_info *mci; 1620 struct mem_ctl_info *mci;
1621 struct edac_mc_layer layers[2];
1686 struct sbridge_pvt *pvt; 1622 struct sbridge_pvt *pvt;
1687 int rc, channels, csrows; 1623 int rc;
1688 1624
1689 /* Check the number of active and not disabled channels */ 1625 /* Check the number of active and not disabled channels */
1690 rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows); 1626 rc = check_if_ecc_is_active(sbridge_dev->bus);
1691 if (unlikely(rc < 0)) 1627 if (unlikely(rc < 0))
1692 return rc; 1628 return rc;
1693 1629
1694 /* allocate a new MC control structure */ 1630 /* allocate a new MC control structure */
1695 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc); 1631 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1632 layers[0].size = NUM_CHANNELS;
1633 layers[0].is_virt_csrow = false;
1634 layers[1].type = EDAC_MC_LAYER_SLOT;
1635 layers[1].size = MAX_DIMMS;
1636 layers[1].is_virt_csrow = true;
1637 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
1638 sizeof(*pvt));
1639
1696 if (unlikely(!mci)) 1640 if (unlikely(!mci))
1697 return -ENOMEM; 1641 return -ENOMEM;
1698 1642
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index e99d00976189..7bb4614730db 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -71,7 +71,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
71 if (mem_error.sbe_count != priv->ce_count) { 71 if (mem_error.sbe_count != priv->ce_count) {
72 dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); 72 dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
73 priv->ce_count = mem_error.sbe_count; 73 priv->ce_count = mem_error.sbe_count;
74 edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name); 74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
75 0, 0, 0,
76 0, 0, -1,
77 mci->ctl_name, "", NULL);
75 } 78 }
76} 79}
77 80
@@ -84,6 +87,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
84 struct csrow_info *csrow = &mci->csrows[0]; 87 struct csrow_info *csrow = &mci->csrows[0];
85 struct tile_edac_priv *priv = mci->pvt_info; 88 struct tile_edac_priv *priv = mci->pvt_info;
86 struct mshim_mem_info mem_info; 89 struct mshim_mem_info mem_info;
90 struct dimm_info *dimm = csrow->channels[0].dimm;
87 91
88 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, 92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
89 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != 93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
93 } 97 }
94 98
95 if (mem_info.mem_ecc) 99 if (mem_info.mem_ecc)
96 csrow->edac_mode = EDAC_SECDED; 100 dimm->edac_mode = EDAC_SECDED;
97 else 101 else
98 csrow->edac_mode = EDAC_NONE; 102 dimm->edac_mode = EDAC_NONE;
99 switch (mem_info.mem_type) { 103 switch (mem_info.mem_type) {
100 case DDR2: 104 case DDR2:
101 csrow->mtype = MEM_DDR2; 105 dimm->mtype = MEM_DDR2;
102 break; 106 break;
103 107
104 case DDR3: 108 case DDR3:
105 csrow->mtype = MEM_DDR3; 109 dimm->mtype = MEM_DDR3;
106 break; 110 break;
107 111
108 default: 112 default:
109 return -1; 113 return -1;
110 } 114 }
111 115
112 csrow->first_page = 0; 116 dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
113 csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT; 117 dimm->grain = TILE_EDAC_ERROR_GRAIN;
114 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 118 dimm->dtype = DEV_UNKNOWN;
115 csrow->grain = TILE_EDAC_ERROR_GRAIN;
116 csrow->dtype = DEV_UNKNOWN;
117 119
118 return 0; 120 return 0;
119} 121}
@@ -123,6 +125,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
123 char hv_file[32]; 125 char hv_file[32];
124 int hv_devhdl; 126 int hv_devhdl;
125 struct mem_ctl_info *mci; 127 struct mem_ctl_info *mci;
128 struct edac_mc_layer layers[2];
126 struct tile_edac_priv *priv; 129 struct tile_edac_priv *priv;
127 int rc; 130 int rc;
128 131
@@ -132,8 +135,14 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
132 return -EINVAL; 135 return -EINVAL;
133 136
134 /* A TILE MC has a single channel and one chip-select row. */ 137 /* A TILE MC has a single channel and one chip-select row. */
135 mci = edac_mc_alloc(sizeof(struct tile_edac_priv), 138 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
136 TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id); 139 layers[0].size = TILE_EDAC_NR_CSROWS;
140 layers[0].is_virt_csrow = true;
141 layers[1].type = EDAC_MC_LAYER_CHANNEL;
142 layers[1].size = TILE_EDAC_NR_CHANS;
143 layers[1].is_virt_csrow = false;
144 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
145 sizeof(struct tile_edac_priv));
137 if (mci == NULL) 146 if (mci == NULL)
138 return -ENOMEM; 147 return -ENOMEM;
139 priv = mci->pvt_info; 148 priv = mci->pvt_info;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index a438297389e5..1ac7962d63ea 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -215,19 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
215 return; 215 return;
216 216
217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { 217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
218 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
219 -1, -1, -1,
220 "UE overwrote CE", "", NULL);
219 info->errsts = info->errsts2; 221 info->errsts = info->errsts2;
220 } 222 }
221 223
222 for (channel = 0; channel < x38_channel_num; channel++) { 224 for (channel = 0; channel < x38_channel_num; channel++) {
223 log = info->eccerrlog[channel]; 225 log = info->eccerrlog[channel];
224 if (log & X38_ECCERRLOG_UE) { 226 if (log & X38_ECCERRLOG_UE) {
225 edac_mc_handle_ue(mci, 0, 0, 227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
226 eccerrlog_row(channel, log), "x38 UE"); 228 0, 0, 0,
229 eccerrlog_row(channel, log),
230 -1, -1,
231 "x38 UE", "", NULL);
227 } else if (log & X38_ECCERRLOG_CE) { 232 } else if (log & X38_ECCERRLOG_CE) {
228 edac_mc_handle_ce(mci, 0, 0, 233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
229 eccerrlog_syndrome(log), 234 0, 0, eccerrlog_syndrome(log),
230 eccerrlog_row(channel, log), 0, "x38 CE"); 235 eccerrlog_row(channel, log),
236 -1, -1,
237 "x38 CE", "", NULL);
231 } 238 }
232 } 239 }
233} 240}
@@ -317,9 +324,9 @@ static unsigned long drb_to_nr_pages(
317static int x38_probe1(struct pci_dev *pdev, int dev_idx) 324static int x38_probe1(struct pci_dev *pdev, int dev_idx)
318{ 325{
319 int rc; 326 int rc;
320 int i; 327 int i, j;
321 struct mem_ctl_info *mci = NULL; 328 struct mem_ctl_info *mci = NULL;
322 unsigned long last_page; 329 struct edac_mc_layer layers[2];
323 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]; 330 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
324 bool stacked; 331 bool stacked;
325 void __iomem *window; 332 void __iomem *window;
@@ -335,7 +342,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
335 how_many_channel(pdev); 342 how_many_channel(pdev);
336 343
337 /* FIXME: unconventional pvt_info usage */ 344 /* FIXME: unconventional pvt_info usage */
338 mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0); 345 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
346 layers[0].size = X38_RANKS;
347 layers[0].is_virt_csrow = true;
348 layers[1].type = EDAC_MC_LAYER_CHANNEL;
349 layers[1].size = x38_channel_num;
350 layers[1].is_virt_csrow = false;
351 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
339 if (!mci) 352 if (!mci)
340 return -ENOMEM; 353 return -ENOMEM;
341 354
@@ -363,7 +376,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
363 * cumulative; the last one will contain the total memory 376 * cumulative; the last one will contain the total memory
364 * contained in all ranks. 377 * contained in all ranks.
365 */ 378 */
366 last_page = -1UL;
367 for (i = 0; i < mci->nr_csrows; i++) { 379 for (i = 0; i < mci->nr_csrows; i++) {
368 unsigned long nr_pages; 380 unsigned long nr_pages;
369 struct csrow_info *csrow = &mci->csrows[i]; 381 struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
372 i / X38_RANKS_PER_CHANNEL, 384 i / X38_RANKS_PER_CHANNEL,
373 i % X38_RANKS_PER_CHANNEL); 385 i % X38_RANKS_PER_CHANNEL);
374 386
375 if (nr_pages == 0) { 387 if (nr_pages == 0)
376 csrow->mtype = MEM_EMPTY;
377 continue; 388 continue;
378 }
379 389
380 csrow->first_page = last_page + 1; 390 for (j = 0; j < x38_channel_num; j++) {
381 last_page += nr_pages; 391 struct dimm_info *dimm = csrow->channels[j].dimm;
382 csrow->last_page = last_page;
383 csrow->nr_pages = nr_pages;
384 392
385 csrow->grain = nr_pages << PAGE_SHIFT; 393 dimm->nr_pages = nr_pages / x38_channel_num;
386 csrow->mtype = MEM_DDR2; 394 dimm->grain = nr_pages << PAGE_SHIFT;
387 csrow->dtype = DEV_UNKNOWN; 395 dimm->mtype = MEM_DDR2;
388 csrow->edac_mode = EDAC_UNKNOWN; 396 dimm->dtype = DEV_UNKNOWN;
397 dimm->edac_mode = EDAC_UNKNOWN;
398 }
389 } 399 }
390 400
391 x38_clear_error_info(mci); 401 x38_clear_error_info(mci);