aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac/amd64_edac.c
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2010-10-08 12:32:29 -0400
committerBorislav Petkov <borislav.petkov@amd.com>2011-03-17 09:46:11 -0400
commitb2b0c605436e343a9a24f00e7fc8fb89a8316e20 (patch)
treeafc2d4300b7ad6e8b8c92d02cdf176e55fddc689 /drivers/edac/amd64_edac.c
parentb6a280bb96e0981a527d26cfb0fad203cb9bd808 (diff)
amd64_edac: Add support for F15h DCT PCI config accesses
F15h "multiplexes" between the configuration space of the two DRAM controllers by toggling D18F1x10C[DctCfgSel] while F10h has a different set of registers for DCT0, and DCT1 in extended PCI config space. Add DCT configuration space accessors per family thus wrapping all the different access prerequisites. Clean up code while at it, shorten names. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac/amd64_edac.c')
-rw-r--r--drivers/edac/amd64_edac.c164
1 files changed, 117 insertions, 47 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 23e03554f0d3..ae5adac3733f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -107,6 +107,79 @@ struct scrubrate {
107 { 0x00, 0UL}, /* scrubbing off */ 107 { 0x00, 0UL}, /* scrubbing off */
108}; 108};
109 109
110static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
111 u32 *val, const char *func)
112{
113 int err = 0;
114
115 err = pci_read_config_dword(pdev, offset, val);
116 if (err)
117 amd64_warn("%s: error reading F%dx%03x.\n",
118 func, PCI_FUNC(pdev->devfn), offset);
119
120 return err;
121}
122
123int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
124 u32 val, const char *func)
125{
126 int err = 0;
127
128 err = pci_write_config_dword(pdev, offset, val);
129 if (err)
130 amd64_warn("%s: error writing to F%dx%03x.\n",
131 func, PCI_FUNC(pdev->devfn), offset);
132
133 return err;
134}
135
136/*
137 *
138 * Depending on the family, F2 DCT reads need special handling:
139 *
140 * K8: has a single DCT only
141 *
142 * F10h: each DCT has its own set of regs
143 * DCT0 -> F2x040..
144 * DCT1 -> F2x140..
145 *
146 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
147 *
148 */
149static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
150 const char *func)
151{
152 if (addr >= 0x100)
153 return -EINVAL;
154
155 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
156}
157
158static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
159 const char *func)
160{
161 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
162}
163
164static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
165 const char *func)
166{
167 u32 reg = 0;
168 u8 dct = 0;
169
170 if (addr >= 0x140 && addr <= 0x1a0) {
171 dct = 1;
172 addr -= 0x100;
173 }
174
175 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
176 reg &= 0xfffffffe;
177 reg |= dct;
178 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
179
180 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
181}
182
110/* 183/*
111 * Memory scrubber control interface. For K8, memory scrubbing is handled by 184 * Memory scrubber control interface. For K8, memory scrubbing is handled by
112 * hardware and can involve L2 cache, dcache as well as the main memory. With 185 * hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -824,7 +897,7 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
824} 897}
825 898
826/* Display and decode various NB registers for debug purposes. */ 899/* Display and decode various NB registers for debug purposes. */
827static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 900static void dump_misc_regs(struct amd64_pvt *pvt)
828{ 901{
829 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 902 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
830 903
@@ -864,13 +937,10 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
864 amd64_dump_dramcfg_low(pvt->dclr1, 1); 937 amd64_dump_dramcfg_low(pvt->dclr1, 1);
865} 938}
866 939
867/* Read in both of DBAM registers */
868static void amd64_read_dbam_reg(struct amd64_pvt *pvt) 940static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
869{ 941{
870 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0); 942 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
871 943 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
872 if (boot_cpu_data.x86 >= 0x10)
873 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
874} 944}
875 945
876/* 946/*
@@ -925,7 +995,7 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
925/* 995/*
926 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers 996 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
927 */ 997 */
928static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) 998static void read_dct_base_mask(struct amd64_pvt *pvt)
929{ 999{
930 int cs, reg; 1000 int cs, reg;
931 1001
@@ -933,37 +1003,33 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
933 1003
934 for (cs = 0; cs < pvt->cs_count; cs++) { 1004 for (cs = 0; cs < pvt->cs_count; cs++) {
935 reg = K8_DCSB0 + (cs * 4); 1005 reg = K8_DCSB0 + (cs * 4);
936 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs])) 1006
1007 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb0[cs]))
937 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 1008 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
938 cs, pvt->dcsb0[cs], reg); 1009 cs, pvt->dcsb0[cs], reg);
939 1010
940 /* If DCT are NOT ganged, then read in DCT1's base */ 1011 if (!dct_ganging_enabled(pvt)) {
941 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
942 reg = F10_DCSB1 + (cs * 4); 1012 reg = F10_DCSB1 + (cs * 4);
943 if (!amd64_read_pci_cfg(pvt->F2, reg, 1013
944 &pvt->dcsb1[cs])) 1014 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb1[cs]))
945 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 1015 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
946 cs, pvt->dcsb1[cs], reg); 1016 cs, pvt->dcsb1[cs], reg);
947 } else {
948 pvt->dcsb1[cs] = 0;
949 } 1017 }
950 } 1018 }
951 1019
952 for (cs = 0; cs < pvt->num_dcsm; cs++) { 1020 for (cs = 0; cs < pvt->num_dcsm; cs++) {
953 reg = K8_DCSM0 + (cs * 4); 1021 reg = K8_DCSM0 + (cs * 4);
954 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs])) 1022
1023 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm0[cs]))
955 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 1024 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
956 cs, pvt->dcsm0[cs], reg); 1025 cs, pvt->dcsm0[cs], reg);
957 1026
958 /* If DCT are NOT ganged, then read in DCT1's mask */ 1027 if (!dct_ganging_enabled(pvt)) {
959 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
960 reg = F10_DCSM1 + (cs * 4); 1028 reg = F10_DCSM1 + (cs * 4);
961 if (!amd64_read_pci_cfg(pvt->F2, reg, 1029
962 &pvt->dcsm1[cs])) 1030 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm1[cs]))
963 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 1031 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
964 cs, pvt->dcsm1[cs], reg); 1032 cs, pvt->dcsm1[cs], reg);
965 } else {
966 pvt->dcsm1[cs] = 0;
967 } 1033 }
968 } 1034 }
969} 1035}
@@ -999,7 +1065,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
999{ 1065{
1000 int flag, err = 0; 1066 int flag, err = 0;
1001 1067
1002 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); 1068 err = amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
1003 if (err) 1069 if (err)
1004 return err; 1070 return err;
1005 1071
@@ -1163,7 +1229,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1163 * both controllers since DIMMs can be placed in either one. 1229 * both controllers since DIMMs can be placed in either one.
1164 */ 1230 */
1165 for (i = 0; i < ARRAY_SIZE(dbams); i++) { 1231 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1166 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam)) 1232 if (amd64_read_dct_pci_cfg(pvt, dbams[i], &dbam))
1167 goto err_reg; 1233 goto err_reg;
1168 1234
1169 for (j = 0; j < 4; j++) { 1235 for (j = 0; j < 4; j++) {
@@ -1255,12 +1321,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1255static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1321static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1256{ 1322{
1257 1323
1258 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW, 1324 if (!amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_LOW, &pvt->dct_sel_low)) {
1259 &pvt->dram_ctl_select_low)) { 1325 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, High range addrs at: 0x%x\n",
1260 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " 1326 pvt->dct_sel_low, dct_sel_baseaddr(pvt));
1261 "High range addresses at: 0x%x\n",
1262 pvt->dram_ctl_select_low,
1263 dct_sel_baseaddr(pvt));
1264 1327
1265 debugf0(" DCT mode: %s, All DCTs on: %s\n", 1328 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1266 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), 1329 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
@@ -1281,8 +1344,7 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1281 dct_sel_interleave_addr(pvt)); 1344 dct_sel_interleave_addr(pvt));
1282 } 1345 }
1283 1346
1284 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH, 1347 amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_HIGH, &pvt->dct_sel_hi);
1285 &pvt->dram_ctl_select_high);
1286} 1348}
1287 1349
1288/* 1350/*
@@ -1292,7 +1354,7 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1292static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1354static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1293 int hi_range_sel, u32 intlv_en) 1355 int hi_range_sel, u32 intlv_en)
1294{ 1356{
1295 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; 1357 u32 cs, temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
1296 1358
1297 if (dct_ganging_enabled(pvt)) 1359 if (dct_ganging_enabled(pvt))
1298 cs = 0; 1360 cs = 0;
@@ -1481,7 +1543,7 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1481 */ 1543 */
1482 hole_off = (pvt->dhar & 0x0000FF80); 1544 hole_off = (pvt->dhar & 0x0000FF80);
1483 hole_valid = (pvt->dhar & 0x1); 1545 hole_valid = (pvt->dhar & 0x1);
1484 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; 1546 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1485 1547
1486 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", 1548 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1487 hole_off, hole_valid, intlv_sel); 1549 hole_off, hole_valid, intlv_sel);
@@ -1668,6 +1730,7 @@ static struct amd64_family_type amd64_family_types[] = {
1668 .read_dram_base_limit = k8_read_dram_base_limit, 1730 .read_dram_base_limit = k8_read_dram_base_limit,
1669 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1731 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1670 .dbam_to_cs = k8_dbam_to_chip_select, 1732 .dbam_to_cs = k8_dbam_to_chip_select,
1733 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1671 } 1734 }
1672 }, 1735 },
1673 [F10_CPUS] = { 1736 [F10_CPUS] = {
@@ -1681,6 +1744,13 @@ static struct amd64_family_type amd64_family_types[] = {
1681 .read_dram_ctl_register = f10_read_dram_ctl_register, 1744 .read_dram_ctl_register = f10_read_dram_ctl_register,
1682 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 1745 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1683 .dbam_to_cs = f10_dbam_to_chip_select, 1746 .dbam_to_cs = f10_dbam_to_chip_select,
1747 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1748 }
1749 },
1750 [F15_CPUS] = {
1751 .ctl_name = "F15h",
1752 .ops = {
1753 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1684 } 1754 }
1685 }, 1755 },
1686}; 1756};
@@ -2081,24 +2151,24 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2081 } 2151 }
2082 } 2152 }
2083 2153
2084 amd64_read_dct_base_mask(pvt); 2154 read_dct_base_mask(pvt);
2085 2155
2086 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar); 2156 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
2087 amd64_read_dbam_reg(pvt); 2157 amd64_read_dbam_reg(pvt);
2088 2158
2089 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); 2159 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2090 2160
2091 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); 2161 amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
2092 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0); 2162 amd64_read_dct_pci_cfg(pvt, F10_DCHR_0, &pvt->dchr0);
2093 2163
2094 if (boot_cpu_data.x86 >= 0x10) { 2164 if (!dct_ganging_enabled(pvt)) {
2095 if (!dct_ganging_enabled(pvt)) { 2165 amd64_read_dct_pci_cfg(pvt, F10_DCLR_1, &pvt->dclr1);
2096 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1); 2166 amd64_read_dct_pci_cfg(pvt, F10_DCHR_1, &pvt->dchr1);
2097 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
2098 }
2099 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2100 } 2167 }
2101 2168
2169 if (boot_cpu_data.x86 >= 0x10)
2170 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2171
2102 if (boot_cpu_data.x86 == 0x10 && 2172 if (boot_cpu_data.x86 == 0x10 &&
2103 boot_cpu_data.x86_model > 7 && 2173 boot_cpu_data.x86_model > 7 &&
2104 /* F3x180[EccSymbolSize]=1 => x8 symbols */ 2174 /* F3x180[EccSymbolSize]=1 => x8 symbols */
@@ -2107,7 +2177,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2107 else 2177 else
2108 pvt->syn_type = 4; 2178 pvt->syn_type = 4;
2109 2179
2110 amd64_dump_misc_regs(pvt); 2180 dump_misc_regs(pvt);
2111} 2181}
2112 2182
2113/* 2183/*
@@ -2342,7 +2412,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2342 s->nbctl_valid = true; 2412 s->nbctl_valid = true;
2343 2413
2344 value |= mask; 2414 value |= mask;
2345 pci_write_config_dword(F3, K8_NBCTL, value); 2415 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2346 2416
2347 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2417 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2348 2418
@@ -2357,7 +2427,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2357 2427
2358 /* Attempt to turn on DRAM ECC Enable */ 2428 /* Attempt to turn on DRAM ECC Enable */
2359 value |= K8_NBCFG_ECC_ENABLE; 2429 value |= K8_NBCFG_ECC_ENABLE;
2360 pci_write_config_dword(F3, K8_NBCFG, value); 2430 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2361 2431
2362 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2432 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2363 2433
@@ -2391,13 +2461,13 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2391 value &= ~mask; 2461 value &= ~mask;
2392 value |= s->old_nbctl; 2462 value |= s->old_nbctl;
2393 2463
2394 pci_write_config_dword(F3, K8_NBCTL, value); 2464 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2395 2465
2396 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ 2466 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2397 if (!s->flags.nb_ecc_prev) { 2467 if (!s->flags.nb_ecc_prev) {
2398 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2468 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2399 value &= ~K8_NBCFG_ECC_ENABLE; 2469 value &= ~K8_NBCFG_ECC_ENABLE;
2400 pci_write_config_dword(F3, K8_NBCFG, value); 2470 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2401 } 2471 }
2402 2472
2403 /* restore the NB Enable MCGCTL bit */ 2473 /* restore the NB Enable MCGCTL bit */