aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2009-10-13 13:26:55 -0400
committerBorislav Petkov <borislav.petkov@amd.com>2009-12-07 13:14:27 -0500
commit6ba5dcdc44624677bba0bef1dcb93a524f88f8c1 (patch)
treeaa2aa4c95882b4451c2bf99c5228d81dd76e641d /drivers
parentf6d6ae965760906d79ab29bc38507608c5971549 (diff)
amd64_edac: wrap-up pci config read error handling
Add a pci config read wrapper for signaling pci config space access errors instead of them being visible only on a debug build. This is important on amd64_edac since it uses all those pci config register values to access the DRAM/DIMM configuration of the nodes. In addition, the wrapper makes a _lot_ (look at the diffstat!) of error handling code superfluous and improves much of the overall code readability by removing error handling details out of the way. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/edac/amd64_edac.c213
-rw-r--r--drivers/edac/amd64_edac.h16
2 files changed, 70 insertions, 159 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 70c7d5f5ba5e..3e5ece6e7c95 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -164,11 +164,9 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
164{ 164{
165 struct amd64_pvt *pvt = mci->pvt_info; 165 struct amd64_pvt *pvt = mci->pvt_info;
166 u32 scrubval = 0; 166 u32 scrubval = 0;
167 int status = -1, i, ret = 0; 167 int status = -1, i;
168 168
169 ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); 169 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
170 if (ret)
171 debugf0("Reading K8_SCRCTRL failed\n");
172 170
173 scrubval = scrubval & 0x001F; 171 scrubval = scrubval & 0x001F;
174 172
@@ -909,26 +907,10 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
909/* Read in both of DBAM registers */ 907/* Read in both of DBAM registers */
910static void amd64_read_dbam_reg(struct amd64_pvt *pvt) 908static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
911{ 909{
912 int err = 0; 910 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
913 unsigned int reg;
914 911
915 reg = DBAM0; 912 if (boot_cpu_data.x86 >= 0x10)
916 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0); 913 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
917 if (err)
918 goto err_reg;
919
920 if (boot_cpu_data.x86 >= 0x10) {
921 reg = DBAM1;
922 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
923
924 if (err)
925 goto err_reg;
926 }
927
928 return;
929
930err_reg:
931 debugf0("Error reading F2x%03x.\n", reg);
932} 914}
933 915
934/* 916/*
@@ -991,28 +973,21 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
991 */ 973 */
992static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) 974static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
993{ 975{
994 int cs, reg, err = 0; 976 int cs, reg;
995 977
996 amd64_set_dct_base_and_mask(pvt); 978 amd64_set_dct_base_and_mask(pvt);
997 979
998 for (cs = 0; cs < pvt->cs_count; cs++) { 980 for (cs = 0; cs < pvt->cs_count; cs++) {
999 reg = K8_DCSB0 + (cs * 4); 981 reg = K8_DCSB0 + (cs * 4);
1000 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 982 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
1001 &pvt->dcsb0[cs]);
1002 if (unlikely(err))
1003 debugf0("Reading K8_DCSB0[%d] failed\n", cs);
1004 else
1005 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 983 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
1006 cs, pvt->dcsb0[cs], reg); 984 cs, pvt->dcsb0[cs], reg);
1007 985
1008 /* If DCT are NOT ganged, then read in DCT1's base */ 986 /* If DCT are NOT ganged, then read in DCT1's base */
1009 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 987 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1010 reg = F10_DCSB1 + (cs * 4); 988 reg = F10_DCSB1 + (cs * 4);
1011 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 989 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1012 &pvt->dcsb1[cs]); 990 &pvt->dcsb1[cs]))
1013 if (unlikely(err))
1014 debugf0("Reading F10_DCSB1[%d] failed\n", cs);
1015 else
1016 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 991 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
1017 cs, pvt->dcsb1[cs], reg); 992 cs, pvt->dcsb1[cs], reg);
1018 } else { 993 } else {
@@ -1022,26 +997,20 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
1022 997
1023 for (cs = 0; cs < pvt->num_dcsm; cs++) { 998 for (cs = 0; cs < pvt->num_dcsm; cs++) {
1024 reg = K8_DCSM0 + (cs * 4); 999 reg = K8_DCSM0 + (cs * 4);
1025 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 1000 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
1026 &pvt->dcsm0[cs]);
1027 if (unlikely(err))
1028 debugf0("Reading K8_DCSM0 failed\n");
1029 else
1030 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 1001 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
1031 cs, pvt->dcsm0[cs], reg); 1002 cs, pvt->dcsm0[cs], reg);
1032 1003
1033 /* If DCT are NOT ganged, then read in DCT1's mask */ 1004 /* If DCT are NOT ganged, then read in DCT1's mask */
1034 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 1005 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1035 reg = F10_DCSM1 + (cs * 4); 1006 reg = F10_DCSM1 + (cs * 4);
1036 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 1007 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1037 &pvt->dcsm1[cs]); 1008 &pvt->dcsm1[cs]))
1038 if (unlikely(err))
1039 debugf0("Reading F10_DCSM1[%d] failed\n", cs);
1040 else
1041 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 1009 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
1042 cs, pvt->dcsm1[cs], reg); 1010 cs, pvt->dcsm1[cs], reg);
1043 } else 1011 } else {
1044 pvt->dcsm1[cs] = 0; 1012 pvt->dcsm1[cs] = 0;
1013 }
1045 } 1014 }
1046} 1015}
1047 1016
@@ -1078,7 +1047,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
1078{ 1047{
1079 int flag, err = 0; 1048 int flag, err = 0;
1080 1049
1081 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 1050 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1082 if (err) 1051 if (err)
1083 return err; 1052 return err;
1084 1053
@@ -1114,22 +1083,15 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1114{ 1083{
1115 u32 low; 1084 u32 low;
1116 u32 off = dram << 3; /* 8 bytes between DRAM entries */ 1085 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1117 int err;
1118 1086
1119 err = pci_read_config_dword(pvt->addr_f1_ctl, 1087 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1120 K8_DRAM_BASE_LOW + off, &low);
1121 if (err)
1122 debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1123 1088
1124 /* Extract parts into separate data entries */ 1089 /* Extract parts into separate data entries */
1125 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1090 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1126 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1091 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1127 pvt->dram_rw_en[dram] = (low & 0x3); 1092 pvt->dram_rw_en[dram] = (low & 0x3);
1128 1093
1129 err = pci_read_config_dword(pvt->addr_f1_ctl, 1094 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1130 K8_DRAM_LIMIT_LOW + off, &low);
1131 if (err)
1132 debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
1133 1095
1134 /* 1096 /*
1135 * Extract parts into separate data entries. Limit is the HIGHEST memory 1097 * Extract parts into separate data entries. Limit is the HIGHEST memory
@@ -1248,16 +1210,13 @@ static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1248static int f10_early_channel_count(struct amd64_pvt *pvt) 1210static int f10_early_channel_count(struct amd64_pvt *pvt)
1249{ 1211{
1250 int dbams[] = { DBAM0, DBAM1 }; 1212 int dbams[] = { DBAM0, DBAM1 };
1251 int err = 0, channels = 0; 1213 int i, j, channels = 0;
1252 int i, j;
1253 u32 dbam; 1214 u32 dbam;
1254 1215
1255 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 1216 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0))
1256 if (err)
1257 goto err_reg; 1217 goto err_reg;
1258 1218
1259 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); 1219 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1))
1260 if (err)
1261 goto err_reg; 1220 goto err_reg;
1262 1221
1263 /* If we are in 128 bit mode, then we are using 2 channels */ 1222 /* If we are in 128 bit mode, then we are using 2 channels */
@@ -1283,8 +1242,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1283 * both controllers since DIMMs can be placed in either one. 1242 * both controllers since DIMMs can be placed in either one.
1284 */ 1243 */
1285 for (i = 0; i < ARRAY_SIZE(dbams); i++) { 1244 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1286 err = pci_read_config_dword(pvt->dram_f2_ctl, dbams[i], &dbam); 1245 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1287 if (err)
1288 goto err_reg; 1246 goto err_reg;
1289 1247
1290 for (j = 0; j < 4; j++) { 1248 for (j = 0; j < 4; j++) {
@@ -1314,7 +1272,7 @@ static void amd64_setup(struct amd64_pvt *pvt)
1314{ 1272{
1315 u32 reg; 1273 u32 reg;
1316 1274
1317 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg); 1275 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1318 1276
1319 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); 1277 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1320 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; 1278 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
@@ -1326,7 +1284,7 @@ static void amd64_teardown(struct amd64_pvt *pvt)
1326{ 1284{
1327 u32 reg; 1285 u32 reg;
1328 1286
1329 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg); 1287 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1330 1288
1331 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; 1289 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1332 if (pvt->flags.cf8_extcfg) 1290 if (pvt->flags.cf8_extcfg)
@@ -1355,10 +1313,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1355 high_offset = F10_DRAM_BASE_HIGH + (dram << 3); 1313 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1356 1314
1357 /* read the 'raw' DRAM BASE Address register */ 1315 /* read the 'raw' DRAM BASE Address register */
1358 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base); 1316 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1359 1317
1360 /* Read from the ECS data register */ 1318 /* Read from the ECS data register */
1361 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base); 1319 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1362 1320
1363 /* Extract parts into separate data entries */ 1321 /* Extract parts into separate data entries */
1364 pvt->dram_rw_en[dram] = (low_base & 0x3); 1322 pvt->dram_rw_en[dram] = (low_base & 0x3);
@@ -1375,10 +1333,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1375 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1333 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1376 1334
1377 /* read the 'raw' LIMIT registers */ 1335 /* read the 'raw' LIMIT registers */
1378 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit); 1336 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1379 1337
1380 /* Read from the ECS data register for the HIGH portion */ 1338 /* Read from the ECS data register for the HIGH portion */
1381 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit); 1339 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1382 1340
1383 debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n", 1341 debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
1384 high_base, low_base, high_limit, low_limit); 1342 high_base, low_base, high_limit, low_limit);
@@ -1397,13 +1355,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1397 1355
1398static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1356static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1399{ 1357{
1400 int err = 0;
1401 1358
1402 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, 1359 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1403 &pvt->dram_ctl_select_low); 1360 &pvt->dram_ctl_select_low)) {
1404 if (err) {
1405 debugf0("Reading F2x110 (DCTL Sel. Low) failed\n");
1406 } else {
1407 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " 1361 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1408 "High range addresses at: 0x%x\n", 1362 "High range addresses at: 0x%x\n",
1409 pvt->dram_ctl_select_low, 1363 pvt->dram_ctl_select_low,
@@ -1428,10 +1382,8 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1428 dct_sel_interleave_addr(pvt)); 1382 dct_sel_interleave_addr(pvt));
1429 } 1383 }
1430 1384
1431 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, 1385 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1432 &pvt->dram_ctl_select_high); 1386 &pvt->dram_ctl_select_high);
1433 if (err)
1434 debugf0("Reading F2x114 (DCT Sel. High) failed\n");
1435} 1387}
1436 1388
1437/* 1389/*
@@ -2082,40 +2034,24 @@ static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
2082{ 2034{
2083 struct amd64_pvt *pvt; 2035 struct amd64_pvt *pvt;
2084 struct pci_dev *misc_f3_ctl; 2036 struct pci_dev *misc_f3_ctl;
2085 int err = 0;
2086 2037
2087 pvt = mci->pvt_info; 2038 pvt = mci->pvt_info;
2088 misc_f3_ctl = pvt->misc_f3_ctl; 2039 misc_f3_ctl = pvt->misc_f3_ctl;
2089 2040
2090 err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh); 2041 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, &regs->nbsh))
2091 if (err) 2042 return 0;
2092 goto err_reg;
2093 2043
2094 if (!(regs->nbsh & K8_NBSH_VALID_BIT)) 2044 if (!(regs->nbsh & K8_NBSH_VALID_BIT))
2095 return 0; 2045 return 0;
2096 2046
2097 /* valid error, read remaining error information registers */ 2047 /* valid error, read remaining error information registers */
2098 err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl); 2048 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, &regs->nbsl) ||
2099 if (err) 2049 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, &regs->nbeal) ||
2100 goto err_reg; 2050 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, &regs->nbeah) ||
2101 2051 amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, &regs->nbcfg))
2102 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal); 2052 return 0;
2103 if (err)
2104 goto err_reg;
2105
2106 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
2107 if (err)
2108 goto err_reg;
2109
2110 err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
2111 if (err)
2112 goto err_reg;
2113 2053
2114 return 1; 2054 return 1;
2115
2116err_reg:
2117 debugf0("Reading error info register failed\n");
2118 return 0;
2119} 2055}
2120 2056
2121/* 2057/*
@@ -2393,7 +2329,7 @@ static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2393static void amd64_read_mc_registers(struct amd64_pvt *pvt) 2329static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2394{ 2330{
2395 u64 msr_val; 2331 u64 msr_val;
2396 int dram, err = 0; 2332 int dram;
2397 2333
2398 /* 2334 /*
2399 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 2335 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -2412,9 +2348,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2412 2348
2413 amd64_cpu_display_info(pvt); 2349 amd64_cpu_display_info(pvt);
2414 2350
2415 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); 2351 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2416 if (err)
2417 goto err_reg;
2418 2352
2419 if (pvt->ops->read_dram_ctl_register) 2353 if (pvt->ops->read_dram_ctl_register)
2420 pvt->ops->read_dram_ctl_register(pvt); 2354 pvt->ops->read_dram_ctl_register(pvt);
@@ -2451,44 +2385,20 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2451 2385
2452 amd64_read_dct_base_mask(pvt); 2386 amd64_read_dct_base_mask(pvt);
2453 2387
2454 err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); 2388 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2455 if (err)
2456 goto err_reg;
2457
2458 amd64_read_dbam_reg(pvt); 2389 amd64_read_dbam_reg(pvt);
2459 2390
2460 err = pci_read_config_dword(pvt->misc_f3_ctl, 2391 amd64_read_pci_cfg(pvt->misc_f3_ctl,
2461 F10_ONLINE_SPARE, &pvt->online_spare); 2392 F10_ONLINE_SPARE, &pvt->online_spare);
2462 if (err)
2463 goto err_reg;
2464 2393
2465 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 2394 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2466 if (err) 2395 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2467 goto err_reg;
2468
2469 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2470 if (err)
2471 goto err_reg;
2472 2396
2473 if (!dct_ganging_enabled(pvt)) { 2397 if (!dct_ganging_enabled(pvt)) {
2474 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, 2398 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
2475 &pvt->dclr1); 2399 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
2476 if (err)
2477 goto err_reg;
2478
2479 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
2480 &pvt->dchr1);
2481 if (err)
2482 goto err_reg;
2483 } 2400 }
2484
2485 amd64_dump_misc_regs(pvt); 2401 amd64_dump_misc_regs(pvt);
2486
2487 return;
2488
2489err_reg:
2490 debugf0("Reading an MC register failed\n");
2491
2492} 2402}
2493 2403
2494/* 2404/*
@@ -2562,13 +2472,11 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2562 struct csrow_info *csrow; 2472 struct csrow_info *csrow;
2563 struct amd64_pvt *pvt; 2473 struct amd64_pvt *pvt;
2564 u64 input_addr_min, input_addr_max, sys_addr; 2474 u64 input_addr_min, input_addr_max, sys_addr;
2565 int i, err = 0, empty = 1; 2475 int i, empty = 1;
2566 2476
2567 pvt = mci->pvt_info; 2477 pvt = mci->pvt_info;
2568 2478
2569 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); 2479 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2570 if (err)
2571 debugf0("Reading K8_NBCFG failed\n");
2572 2480
2573 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, 2481 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2574 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2482 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2734,7 +2642,6 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2734static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) 2642static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2735{ 2643{
2736 struct amd64_pvt *pvt = mci->pvt_info; 2644 struct amd64_pvt *pvt = mci->pvt_info;
2737 int err = 0;
2738 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2645 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2739 2646
2740 if (!ecc_enable_override) 2647 if (!ecc_enable_override)
@@ -2744,9 +2651,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2744 "'ecc_enable_override' parameter is active, " 2651 "'ecc_enable_override' parameter is active, "
2745 "Enabling AMD ECC hardware now: CAUTION\n"); 2652 "Enabling AMD ECC hardware now: CAUTION\n");
2746 2653
2747 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); 2654 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2748 if (err)
2749 debugf0("Reading K8_NBCTL failed\n");
2750 2655
2751 /* turn on UECCn and CECCEn bits */ 2656 /* turn on UECCn and CECCEn bits */
2752 pvt->old_nbctl = value & mask; 2657 pvt->old_nbctl = value & mask;
@@ -2759,9 +2664,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2759 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " 2664 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2760 "MCGCTL!\n"); 2665 "MCGCTL!\n");
2761 2666
2762 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); 2667 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2763 if (err)
2764 debugf0("Reading K8_NBCFG failed\n");
2765 2668
2766 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, 2669 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2767 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2670 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2776,9 +2679,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2776 value |= K8_NBCFG_ECC_ENABLE; 2679 value |= K8_NBCFG_ECC_ENABLE;
2777 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); 2680 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2778 2681
2779 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); 2682 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2780 if (err)
2781 debugf0("Reading K8_NBCFG failed\n");
2782 2683
2783 if (!(value & K8_NBCFG_ECC_ENABLE)) { 2684 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2784 amd64_printk(KERN_WARNING, 2685 amd64_printk(KERN_WARNING,
@@ -2798,15 +2699,12 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2798 2699
2799static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) 2700static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2800{ 2701{
2801 int err = 0;
2802 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2702 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2803 2703
2804 if (!pvt->nbctl_mcgctl_saved) 2704 if (!pvt->nbctl_mcgctl_saved)
2805 return; 2705 return;
2806 2706
2807 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); 2707 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2808 if (err)
2809 debugf0("Reading K8_NBCTL failed\n");
2810 value &= ~mask; 2708 value &= ~mask;
2811 value |= pvt->old_nbctl; 2709 value |= pvt->old_nbctl;
2812 2710
@@ -2832,13 +2730,10 @@ static const char *ecc_warning =
2832static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) 2730static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2833{ 2731{
2834 u32 value; 2732 u32 value;
2835 int err = 0;
2836 u8 ecc_enabled = 0; 2733 u8 ecc_enabled = 0;
2837 bool nb_mce_en = false; 2734 bool nb_mce_en = false;
2838 2735
2839 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); 2736 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2840 if (err)
2841 debugf0("Reading K8_NBCTL failed\n");
2842 2737
2843 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2738 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2844 if (!ecc_enabled) 2739 if (!ecc_enabled)
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index bba6c944ff13..16f2df449a09 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -565,6 +565,22 @@ static inline struct low_ops *family_ops(int index)
565 return &amd64_family_types[index].ops; 565 return &amd64_family_types[index].ops;
566} 566}
567 567
568static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
569 u32 *val, const char *func)
570{
571 int err = 0;
572
573 err = pci_read_config_dword(pdev, offset, val);
574 if (err)
575 amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n",
576 func, PCI_FUNC(pdev->devfn), offset);
577
578 return err;
579}
580
581#define amd64_read_pci_cfg(pdev, offset, val) \
582 amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
583
568/* 584/*
569 * For future CPU versions, verify the following as new 'slow' rates appear and 585 * For future CPU versions, verify the following as new 'slow' rates appear and
570 * modify the necessary skip values for the supported CPU. 586 * modify the necessary skip values for the supported CPU.