aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2010-10-08 12:32:29 -0400
committerBorislav Petkov <borislav.petkov@amd.com>2011-03-17 09:46:11 -0400
commitb2b0c605436e343a9a24f00e7fc8fb89a8316e20 (patch)
treeafc2d4300b7ad6e8b8c92d02cdf176e55fddc689 /drivers/edac
parentb6a280bb96e0981a527d26cfb0fad203cb9bd808 (diff)
amd64_edac: Add support for F15h DCT PCI config accesses
F15h "multiplexes" between the configuration space of the two DRAM controllers by toggling D18F1x10C[DctCfgSel] while F10h has a different set of registers for DCT0, and DCT1 in extended PCI config space. Add DCT configuration space accessors per family thus wrapping all the different access prerequisites. Clean up code while at it, shorten names. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c164
-rw-r--r--drivers/edac/amd64_edac.h49
-rw-r--r--drivers/edac/amd64_edac_inj.c8
3 files changed, 146 insertions, 75 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 23e03554f0d3..ae5adac3733f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -107,6 +107,79 @@ struct scrubrate {
107 { 0x00, 0UL}, /* scrubbing off */ 107 { 0x00, 0UL}, /* scrubbing off */
108}; 108};
109 109
110static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
111 u32 *val, const char *func)
112{
113 int err = 0;
114
115 err = pci_read_config_dword(pdev, offset, val);
116 if (err)
117 amd64_warn("%s: error reading F%dx%03x.\n",
118 func, PCI_FUNC(pdev->devfn), offset);
119
120 return err;
121}
122
123int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
124 u32 val, const char *func)
125{
126 int err = 0;
127
128 err = pci_write_config_dword(pdev, offset, val);
129 if (err)
130 amd64_warn("%s: error writing to F%dx%03x.\n",
131 func, PCI_FUNC(pdev->devfn), offset);
132
133 return err;
134}
135
136/*
137 *
138 * Depending on the family, F2 DCT reads need special handling:
139 *
140 * K8: has a single DCT only
141 *
142 * F10h: each DCT has its own set of regs
143 * DCT0 -> F2x040..
144 * DCT1 -> F2x140..
145 *
146 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
147 *
148 */
149static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
150 const char *func)
151{
152 if (addr >= 0x100)
153 return -EINVAL;
154
155 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
156}
157
158static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
159 const char *func)
160{
161 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
162}
163
164static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
165 const char *func)
166{
167 u32 reg = 0;
168 u8 dct = 0;
169
170 if (addr >= 0x140 && addr <= 0x1a0) {
171 dct = 1;
172 addr -= 0x100;
173 }
174
175 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
176 reg &= 0xfffffffe;
177 reg |= dct;
178 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
179
180 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
181}
182
110/* 183/*
111 * Memory scrubber control interface. For K8, memory scrubbing is handled by 184 * Memory scrubber control interface. For K8, memory scrubbing is handled by
112 * hardware and can involve L2 cache, dcache as well as the main memory. With 185 * hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -824,7 +897,7 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
824} 897}
825 898
826/* Display and decode various NB registers for debug purposes. */ 899/* Display and decode various NB registers for debug purposes. */
827static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 900static void dump_misc_regs(struct amd64_pvt *pvt)
828{ 901{
829 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 902 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
830 903
@@ -864,13 +937,10 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
864 amd64_dump_dramcfg_low(pvt->dclr1, 1); 937 amd64_dump_dramcfg_low(pvt->dclr1, 1);
865} 938}
866 939
867/* Read in both of DBAM registers */
868static void amd64_read_dbam_reg(struct amd64_pvt *pvt) 940static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
869{ 941{
870 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0); 942 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
871 943 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
872 if (boot_cpu_data.x86 >= 0x10)
873 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
874} 944}
875 945
876/* 946/*
@@ -925,7 +995,7 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
925/* 995/*
926 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers 996 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
927 */ 997 */
928static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) 998static void read_dct_base_mask(struct amd64_pvt *pvt)
929{ 999{
930 int cs, reg; 1000 int cs, reg;
931 1001
@@ -933,37 +1003,33 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
933 1003
934 for (cs = 0; cs < pvt->cs_count; cs++) { 1004 for (cs = 0; cs < pvt->cs_count; cs++) {
935 reg = K8_DCSB0 + (cs * 4); 1005 reg = K8_DCSB0 + (cs * 4);
936 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs])) 1006
1007 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb0[cs]))
937 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 1008 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
938 cs, pvt->dcsb0[cs], reg); 1009 cs, pvt->dcsb0[cs], reg);
939 1010
940 /* If DCT are NOT ganged, then read in DCT1's base */ 1011 if (!dct_ganging_enabled(pvt)) {
941 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
942 reg = F10_DCSB1 + (cs * 4); 1012 reg = F10_DCSB1 + (cs * 4);
943 if (!amd64_read_pci_cfg(pvt->F2, reg, 1013
944 &pvt->dcsb1[cs])) 1014 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb1[cs]))
945 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 1015 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
946 cs, pvt->dcsb1[cs], reg); 1016 cs, pvt->dcsb1[cs], reg);
947 } else {
948 pvt->dcsb1[cs] = 0;
949 } 1017 }
950 } 1018 }
951 1019
952 for (cs = 0; cs < pvt->num_dcsm; cs++) { 1020 for (cs = 0; cs < pvt->num_dcsm; cs++) {
953 reg = K8_DCSM0 + (cs * 4); 1021 reg = K8_DCSM0 + (cs * 4);
954 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs])) 1022
1023 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm0[cs]))
955 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 1024 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
956 cs, pvt->dcsm0[cs], reg); 1025 cs, pvt->dcsm0[cs], reg);
957 1026
958 /* If DCT are NOT ganged, then read in DCT1's mask */ 1027 if (!dct_ganging_enabled(pvt)) {
959 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
960 reg = F10_DCSM1 + (cs * 4); 1028 reg = F10_DCSM1 + (cs * 4);
961 if (!amd64_read_pci_cfg(pvt->F2, reg, 1029
962 &pvt->dcsm1[cs])) 1030 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm1[cs]))
963 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 1031 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
964 cs, pvt->dcsm1[cs], reg); 1032 cs, pvt->dcsm1[cs], reg);
965 } else {
966 pvt->dcsm1[cs] = 0;
967 } 1033 }
968 } 1034 }
969} 1035}
@@ -999,7 +1065,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
999{ 1065{
1000 int flag, err = 0; 1066 int flag, err = 0;
1001 1067
1002 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); 1068 err = amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
1003 if (err) 1069 if (err)
1004 return err; 1070 return err;
1005 1071
@@ -1163,7 +1229,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1163 * both controllers since DIMMs can be placed in either one. 1229 * both controllers since DIMMs can be placed in either one.
1164 */ 1230 */
1165 for (i = 0; i < ARRAY_SIZE(dbams); i++) { 1231 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1166 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam)) 1232 if (amd64_read_dct_pci_cfg(pvt, dbams[i], &dbam))
1167 goto err_reg; 1233 goto err_reg;
1168 1234
1169 for (j = 0; j < 4; j++) { 1235 for (j = 0; j < 4; j++) {
@@ -1255,12 +1321,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1255static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1321static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1256{ 1322{
1257 1323
1258 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW, 1324 if (!amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_LOW, &pvt->dct_sel_low)) {
1259 &pvt->dram_ctl_select_low)) { 1325 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, High range addrs at: 0x%x\n",
1260 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " 1326 pvt->dct_sel_low, dct_sel_baseaddr(pvt));
1261 "High range addresses at: 0x%x\n",
1262 pvt->dram_ctl_select_low,
1263 dct_sel_baseaddr(pvt));
1264 1327
1265 debugf0(" DCT mode: %s, All DCTs on: %s\n", 1328 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1266 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), 1329 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
@@ -1281,8 +1344,7 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1281 dct_sel_interleave_addr(pvt)); 1344 dct_sel_interleave_addr(pvt));
1282 } 1345 }
1283 1346
1284 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH, 1347 amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_HIGH, &pvt->dct_sel_hi);
1285 &pvt->dram_ctl_select_high);
1286} 1348}
1287 1349
1288/* 1350/*
@@ -1292,7 +1354,7 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1292static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1354static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1293 int hi_range_sel, u32 intlv_en) 1355 int hi_range_sel, u32 intlv_en)
1294{ 1356{
1295 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; 1357 u32 cs, temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
1296 1358
1297 if (dct_ganging_enabled(pvt)) 1359 if (dct_ganging_enabled(pvt))
1298 cs = 0; 1360 cs = 0;
@@ -1481,7 +1543,7 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1481 */ 1543 */
1482 hole_off = (pvt->dhar & 0x0000FF80); 1544 hole_off = (pvt->dhar & 0x0000FF80);
1483 hole_valid = (pvt->dhar & 0x1); 1545 hole_valid = (pvt->dhar & 0x1);
1484 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; 1546 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1485 1547
1486 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", 1548 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1487 hole_off, hole_valid, intlv_sel); 1549 hole_off, hole_valid, intlv_sel);
@@ -1668,6 +1730,7 @@ static struct amd64_family_type amd64_family_types[] = {
1668 .read_dram_base_limit = k8_read_dram_base_limit, 1730 .read_dram_base_limit = k8_read_dram_base_limit,
1669 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1731 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1670 .dbam_to_cs = k8_dbam_to_chip_select, 1732 .dbam_to_cs = k8_dbam_to_chip_select,
1733 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1671 } 1734 }
1672 }, 1735 },
1673 [F10_CPUS] = { 1736 [F10_CPUS] = {
@@ -1681,6 +1744,13 @@ static struct amd64_family_type amd64_family_types[] = {
1681 .read_dram_ctl_register = f10_read_dram_ctl_register, 1744 .read_dram_ctl_register = f10_read_dram_ctl_register,
1682 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 1745 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1683 .dbam_to_cs = f10_dbam_to_chip_select, 1746 .dbam_to_cs = f10_dbam_to_chip_select,
1747 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1748 }
1749 },
1750 [F15_CPUS] = {
1751 .ctl_name = "F15h",
1752 .ops = {
1753 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1684 } 1754 }
1685 }, 1755 },
1686}; 1756};
@@ -2081,24 +2151,24 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2081 } 2151 }
2082 } 2152 }
2083 2153
2084 amd64_read_dct_base_mask(pvt); 2154 read_dct_base_mask(pvt);
2085 2155
2086 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar); 2156 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
2087 amd64_read_dbam_reg(pvt); 2157 amd64_read_dbam_reg(pvt);
2088 2158
2089 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); 2159 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2090 2160
2091 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); 2161 amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
2092 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0); 2162 amd64_read_dct_pci_cfg(pvt, F10_DCHR_0, &pvt->dchr0);
2093 2163
2094 if (boot_cpu_data.x86 >= 0x10) { 2164 if (!dct_ganging_enabled(pvt)) {
2095 if (!dct_ganging_enabled(pvt)) { 2165 amd64_read_dct_pci_cfg(pvt, F10_DCLR_1, &pvt->dclr1);
2096 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1); 2166 amd64_read_dct_pci_cfg(pvt, F10_DCHR_1, &pvt->dchr1);
2097 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
2098 }
2099 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2100 } 2167 }
2101 2168
2169 if (boot_cpu_data.x86 >= 0x10)
2170 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2171
2102 if (boot_cpu_data.x86 == 0x10 && 2172 if (boot_cpu_data.x86 == 0x10 &&
2103 boot_cpu_data.x86_model > 7 && 2173 boot_cpu_data.x86_model > 7 &&
2104 /* F3x180[EccSymbolSize]=1 => x8 symbols */ 2174 /* F3x180[EccSymbolSize]=1 => x8 symbols */
@@ -2107,7 +2177,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2107 else 2177 else
2108 pvt->syn_type = 4; 2178 pvt->syn_type = 4;
2109 2179
2110 amd64_dump_misc_regs(pvt); 2180 dump_misc_regs(pvt);
2111} 2181}
2112 2182
2113/* 2183/*
@@ -2342,7 +2412,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2342 s->nbctl_valid = true; 2412 s->nbctl_valid = true;
2343 2413
2344 value |= mask; 2414 value |= mask;
2345 pci_write_config_dword(F3, K8_NBCTL, value); 2415 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2346 2416
2347 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2417 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2348 2418
@@ -2357,7 +2427,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2357 2427
2358 /* Attempt to turn on DRAM ECC Enable */ 2428 /* Attempt to turn on DRAM ECC Enable */
2359 value |= K8_NBCFG_ECC_ENABLE; 2429 value |= K8_NBCFG_ECC_ENABLE;
2360 pci_write_config_dword(F3, K8_NBCFG, value); 2430 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2361 2431
2362 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2432 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2363 2433
@@ -2391,13 +2461,13 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2391 value &= ~mask; 2461 value &= ~mask;
2392 value |= s->old_nbctl; 2462 value |= s->old_nbctl;
2393 2463
2394 pci_write_config_dword(F3, K8_NBCTL, value); 2464 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2395 2465
2396 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ 2466 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2397 if (!s->flags.nb_ecc_prev) { 2467 if (!s->flags.nb_ecc_prev) {
2398 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2468 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2399 value &= ~K8_NBCFG_ECC_ENABLE; 2469 value &= ~K8_NBCFG_ECC_ENABLE;
2400 pci_write_config_dword(F3, K8_NBCFG, value); 2470 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2401 } 2471 }
2402 2472
2403 /* restore the NB Enable MCGCTL bit */ 2473 /* restore the NB Enable MCGCTL bit */
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 613ec72b0f65..91c266b9f6cf 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -184,6 +184,7 @@
184 /* NOTE: Extra mask bit vs K8 */ 184 /* NOTE: Extra mask bit vs K8 */
185#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16) 185#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
186 186
187#define DCT_CFG_SEL 0x10C
187 188
188/* F10 High BASE/LIMIT registers */ 189/* F10 High BASE/LIMIT registers */
189#define F10_DRAM_BASE_HIGH 0x140 190#define F10_DRAM_BASE_HIGH 0x140
@@ -257,14 +258,14 @@
257 258
258 259
259#define F10_DCTL_SEL_LOW 0x110 260#define F10_DCTL_SEL_LOW 0x110
260#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800) 261#define dct_sel_baseaddr(pvt) ((pvt->dct_sel_low) & 0xFFFFF800)
261#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3) 262#define dct_sel_interleave_addr(pvt) (((pvt->dct_sel_low) >> 6) & 0x3)
262#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0)) 263#define dct_high_range_enabled(pvt) (pvt->dct_sel_low & BIT(0))
263#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2)) 264#define dct_interleave_enabled(pvt) (pvt->dct_sel_low & BIT(2))
264#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4)) 265#define dct_ganging_enabled(pvt) (pvt->dct_sel_low & BIT(4))
265#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5)) 266#define dct_data_intlv_enabled(pvt) (pvt->dct_sel_low & BIT(5))
266#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8)) 267#define dct_dram_enabled(pvt) (pvt->dct_sel_low & BIT(8))
267#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10)) 268#define dct_memory_cleared(pvt) (pvt->dct_sel_low & BIT(10))
268 269
269#define F10_DCTL_SEL_HIGH 0x114 270#define F10_DCTL_SEL_HIGH 0x114
270 271
@@ -380,9 +381,11 @@ static inline int get_node_id(struct pci_dev *pdev)
380 return PCI_SLOT(pdev->devfn) - 0x18; 381 return PCI_SLOT(pdev->devfn) - 0x18;
381} 382}
382 383
383enum amd64_chipset_families { 384enum amd_families {
384 K8_CPUS = 0, 385 K8_CPUS = 0,
385 F10_CPUS, 386 F10_CPUS,
387 F15_CPUS,
388 NUM_FAMILIES,
386}; 389};
387 390
388/* Error injection control structure */ 391/* Error injection control structure */
@@ -448,9 +451,9 @@ struct amd64_pvt {
448 u64 top_mem; /* top of memory below 4GB */ 451 u64 top_mem; /* top of memory below 4GB */
449 u64 top_mem2; /* top of memory above 4GB */ 452 u64 top_mem2; /* top of memory above 4GB */
450 453
451 u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */ 454 u32 dct_sel_low; /* DRAM Controller Select Low Reg */
452 u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ 455 u32 dct_sel_hi; /* DRAM Controller Select High Reg */
453 u32 online_spare; /* On-Line spare Reg */ 456 u32 online_spare; /* On-Line spare Reg */
454 457
455 /* x4 or x8 syndromes in use */ 458 /* x4 or x8 syndromes in use */
456 u8 syn_type; 459 u8 syn_type;
@@ -519,6 +522,8 @@ struct low_ops {
519 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, 522 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
520 struct err_regs *info, u64 SystemAddr); 523 struct err_regs *info, u64 SystemAddr);
521 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); 524 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
525 int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
526 u32 *val, const char *func);
522}; 527};
523 528
524struct amd64_family_type { 529struct amd64_family_type {
@@ -527,21 +532,17 @@ struct amd64_family_type {
527 struct low_ops ops; 532 struct low_ops ops;
528}; 533};
529 534
530static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, 535int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
531 u32 *val, const char *func) 536 u32 val, const char *func);
532{
533 int err = 0;
534 537
535 err = pci_read_config_dword(pdev, offset, val); 538#define amd64_read_pci_cfg(pdev, offset, val) \
536 if (err) 539 __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
537 amd64_warn("%s: error reading F%dx%x.\n",
538 func, PCI_FUNC(pdev->devfn), offset);
539 540
540 return err; 541#define amd64_write_pci_cfg(pdev, offset, val) \
541} 542 __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
542 543
543#define amd64_read_pci_cfg(pdev, offset, val) \ 544#define amd64_read_dct_pci_cfg(pvt, offset, val) \
544 amd64_read_pci_cfg_dword(pdev, offset, val, __func__) 545 pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
545 546
546/* 547/*
547 * For future CPU versions, verify the following as new 'slow' rates appear and 548 * For future CPU versions, verify the following as new 'slow' rates appear and
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 688478de1cbd..303f10e03dda 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
117 /* Form value to choose 16-byte section of cacheline */ 117 /* Form value to choose 16-byte section of cacheline */
118 section = F10_NB_ARRAY_DRAM_ECC | 118 section = F10_NB_ARRAY_DRAM_ECC |
119 SET_NB_ARRAY_ADDRESS(pvt->injection.section); 119 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
120 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section); 120 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
121 121
122 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, 122 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
123 pvt->injection.bit_map); 123 pvt->injection.bit_map);
124 124
125 /* Issue 'word' and 'bit' along with the READ request */ 125 /* Issue 'word' and 'bit' along with the READ request */
126 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits); 126 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
127 127
128 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 128 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
129 129
@@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
150 /* Form value to choose 16-byte section of cacheline */ 150 /* Form value to choose 16-byte section of cacheline */
151 section = F10_NB_ARRAY_DRAM_ECC | 151 section = F10_NB_ARRAY_DRAM_ECC |
152 SET_NB_ARRAY_ADDRESS(pvt->injection.section); 152 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
153 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section); 153 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
154 154
155 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, 155 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
156 pvt->injection.bit_map); 156 pvt->injection.bit_map);
157 157
158 /* Issue 'word' and 'bit' along with the READ request */ 158 /* Issue 'word' and 'bit' along with the READ request */
159 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits); 159 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
160 160
161 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 161 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
162 162