aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2010-10-01 14:11:07 -0400
committerBorislav Petkov <borislav.petkov@amd.com>2011-01-07 05:33:54 -0500
commit8d5b5d9c7b86e44fda29a367db3ccd2815a52f7c (patch)
tree3444029260ba184d6726bd46c48d9fa87a2fc3a5 /drivers/edac
parentb8cfa02f833a614e80f851747c4ce14989a4cfd0 (diff)
amd64_edac: Rename CPU PCI devices
Rename variables representing PCI devices to their BKDG names for faster search and shorter, clearer code. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c153
-rw-r--r--drivers/edac/amd64_edac.h7
-rw-r--r--drivers/edac/amd64_edac_inj.c12
3 files changed, 77 insertions, 95 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 0caea2b76659..26fb0f962605 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -163,7 +163,7 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
163{ 163{
164 struct amd64_pvt *pvt = mci->pvt_info; 164 struct amd64_pvt *pvt = mci->pvt_info;
165 165
166 return __amd64_set_scrub_rate(pvt->misc_f3_ctl, bw, pvt->min_scrubrate); 166 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
167} 167}
168 168
169static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 169static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
@@ -172,7 +172,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
172 u32 scrubval = 0; 172 u32 scrubval = 0;
173 int status = -1, i; 173 int status = -1, i;
174 174
175 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); 175 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
176 176
177 scrubval = scrubval & 0x001F; 177 scrubval = scrubval & 0x001F;
178 178
@@ -882,10 +882,10 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
882/* Read in both of DBAM registers */ 882/* Read in both of DBAM registers */
883static void amd64_read_dbam_reg(struct amd64_pvt *pvt) 883static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
884{ 884{
885 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); 885 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
886 886
887 if (boot_cpu_data.x86 >= 0x10) 887 if (boot_cpu_data.x86 >= 0x10)
888 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); 888 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
889} 889}
890 890
891/* 891/*
@@ -948,14 +948,14 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
948 948
949 for (cs = 0; cs < pvt->cs_count; cs++) { 949 for (cs = 0; cs < pvt->cs_count; cs++) {
950 reg = K8_DCSB0 + (cs * 4); 950 reg = K8_DCSB0 + (cs * 4);
951 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) 951 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
952 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 952 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
953 cs, pvt->dcsb0[cs], reg); 953 cs, pvt->dcsb0[cs], reg);
954 954
955 /* If DCT are NOT ganged, then read in DCT1's base */ 955 /* If DCT are NOT ganged, then read in DCT1's base */
956 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 956 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
957 reg = F10_DCSB1 + (cs * 4); 957 reg = F10_DCSB1 + (cs * 4);
958 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, 958 if (!amd64_read_pci_cfg(pvt->F2, reg,
959 &pvt->dcsb1[cs])) 959 &pvt->dcsb1[cs]))
960 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 960 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
961 cs, pvt->dcsb1[cs], reg); 961 cs, pvt->dcsb1[cs], reg);
@@ -966,14 +966,14 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
966 966
967 for (cs = 0; cs < pvt->num_dcsm; cs++) { 967 for (cs = 0; cs < pvt->num_dcsm; cs++) {
968 reg = K8_DCSM0 + (cs * 4); 968 reg = K8_DCSM0 + (cs * 4);
969 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) 969 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
970 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 970 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
971 cs, pvt->dcsm0[cs], reg); 971 cs, pvt->dcsm0[cs], reg);
972 972
973 /* If DCT are NOT ganged, then read in DCT1's mask */ 973 /* If DCT are NOT ganged, then read in DCT1's mask */
974 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 974 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
975 reg = F10_DCSM1 + (cs * 4); 975 reg = F10_DCSM1 + (cs * 4);
976 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, 976 if (!amd64_read_pci_cfg(pvt->F2, reg,
977 &pvt->dcsm1[cs])) 977 &pvt->dcsm1[cs]))
978 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 978 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
979 cs, pvt->dcsm1[cs], reg); 979 cs, pvt->dcsm1[cs], reg);
@@ -1014,7 +1014,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
1014{ 1014{
1015 int flag, err = 0; 1015 int flag, err = 0;
1016 1016
1017 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 1017 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
1018 if (err) 1018 if (err)
1019 return err; 1019 return err;
1020 1020
@@ -1050,14 +1050,14 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1050 u32 low; 1050 u32 low;
1051 u32 off = dram << 3; /* 8 bytes between DRAM entries */ 1051 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1052 1052
1053 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); 1053 amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
1054 1054
1055 /* Extract parts into separate data entries */ 1055 /* Extract parts into separate data entries */
1056 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1056 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1057 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1057 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1058 pvt->dram_rw_en[dram] = (low & 0x3); 1058 pvt->dram_rw_en[dram] = (low & 0x3);
1059 1059
1060 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); 1060 amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
1061 1061
1062 /* 1062 /*
1063 * Extract parts into separate data entries. Limit is the HIGHEST memory 1063 * Extract parts into separate data entries. Limit is the HIGHEST memory
@@ -1180,7 +1180,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1180 * both controllers since DIMMs can be placed in either one. 1180 * both controllers since DIMMs can be placed in either one.
1181 */ 1181 */
1182 for (i = 0; i < ARRAY_SIZE(dbams); i++) { 1182 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1183 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) 1183 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
1184 goto err_reg; 1184 goto err_reg;
1185 1185
1186 for (j = 0; j < 4; j++) { 1186 for (j = 0; j < 4; j++) {
@@ -1220,11 +1220,11 @@ static void amd64_setup(struct amd64_pvt *pvt)
1220{ 1220{
1221 u32 reg; 1221 u32 reg;
1222 1222
1223 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg); 1223 amd64_read_pci_cfg(pvt->F3, F10_NB_CFG_HIGH, &reg);
1224 1224
1225 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); 1225 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1226 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; 1226 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1227 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); 1227 pci_write_config_dword(pvt->F3, F10_NB_CFG_HIGH, reg);
1228} 1228}
1229 1229
1230/* Restore the extended configuration access via 0xCF8 feature */ 1230/* Restore the extended configuration access via 0xCF8 feature */
@@ -1232,12 +1232,12 @@ static void amd64_teardown(struct amd64_pvt *pvt)
1232{ 1232{
1233 u32 reg; 1233 u32 reg;
1234 1234
1235 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg); 1235 amd64_read_pci_cfg(pvt->F3, F10_NB_CFG_HIGH, &reg);
1236 1236
1237 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; 1237 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1238 if (pvt->flags.cf8_extcfg) 1238 if (pvt->flags.cf8_extcfg)
1239 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; 1239 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1240 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); 1240 pci_write_config_dword(pvt->F3, F10_NB_CFG_HIGH, reg);
1241} 1241}
1242 1242
1243static u64 f10_get_error_address(struct mem_ctl_info *mci, 1243static u64 f10_get_error_address(struct mem_ctl_info *mci,
@@ -1261,10 +1261,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1261 high_offset = F10_DRAM_BASE_HIGH + (dram << 3); 1261 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1262 1262
1263 /* read the 'raw' DRAM BASE Address register */ 1263 /* read the 'raw' DRAM BASE Address register */
1264 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); 1264 amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
1265 1265
1266 /* Read from the ECS data register */ 1266 /* Read from the ECS data register */
1267 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base); 1267 amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
1268 1268
1269 /* Extract parts into separate data entries */ 1269 /* Extract parts into separate data entries */
1270 pvt->dram_rw_en[dram] = (low_base & 0x3); 1270 pvt->dram_rw_en[dram] = (low_base & 0x3);
@@ -1281,10 +1281,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1281 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1281 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1282 1282
1283 /* read the 'raw' LIMIT registers */ 1283 /* read the 'raw' LIMIT registers */
1284 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); 1284 amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
1285 1285
1286 /* Read from the ECS data register for the HIGH portion */ 1286 /* Read from the ECS data register for the HIGH portion */
1287 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit); 1287 amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
1288 1288
1289 pvt->dram_DstNode[dram] = (low_limit & 0x7); 1289 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1290 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; 1290 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
@@ -1301,7 +1301,7 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1301static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1301static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1302{ 1302{
1303 1303
1304 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, 1304 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
1305 &pvt->dram_ctl_select_low)) { 1305 &pvt->dram_ctl_select_low)) {
1306 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " 1306 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1307 "High range addresses at: 0x%x\n", 1307 "High range addresses at: 0x%x\n",
@@ -1327,7 +1327,7 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1327 dct_sel_interleave_addr(pvt)); 1327 dct_sel_interleave_addr(pvt));
1328 } 1328 }
1329 1329
1330 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, 1330 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
1331 &pvt->dram_ctl_select_high); 1331 &pvt->dram_ctl_select_high);
1332} 1332}
1333 1333
@@ -1707,8 +1707,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1707static struct amd64_family_type amd64_family_types[] = { 1707static struct amd64_family_type amd64_family_types[] = {
1708 [K8_CPUS] = { 1708 [K8_CPUS] = {
1709 .ctl_name = "K8", 1709 .ctl_name = "K8",
1710 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 1710 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1711 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, 1711 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1712 .ops = { 1712 .ops = {
1713 .early_channel_count = k8_early_channel_count, 1713 .early_channel_count = k8_early_channel_count,
1714 .get_error_address = k8_get_error_address, 1714 .get_error_address = k8_get_error_address,
@@ -1719,8 +1719,8 @@ static struct amd64_family_type amd64_family_types[] = {
1719 }, 1719 },
1720 [F10_CPUS] = { 1720 [F10_CPUS] = {
1721 .ctl_name = "F10h", 1721 .ctl_name = "F10h",
1722 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, 1722 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1723 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, 1723 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1724 .ops = { 1724 .ops = {
1725 .early_channel_count = f10_early_channel_count, 1725 .early_channel_count = f10_early_channel_count,
1726 .get_error_address = f10_get_error_address, 1726 .get_error_address = f10_get_error_address,
@@ -2035,53 +2035,44 @@ void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
2035} 2035}
2036 2036
2037/* 2037/*
2038 * Use pvt->dram_f2_ctl which contains the F2 CPU PCI device to get the related 2038 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2039 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. 2039 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2040 */ 2040 */
2041static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id, 2041static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id,
2042 u16 f3_id) 2042 u16 f3_id)
2043{ 2043{
2044 /* Reserve the ADDRESS MAP Device */ 2044 /* Reserve the ADDRESS MAP Device */
2045 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, 2045 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2046 f1_id, 2046 if (!pvt->F1) {
2047 pvt->dram_f2_ctl);
2048
2049 if (!pvt->addr_f1_ctl) {
2050 amd64_printk(KERN_ERR, "error address map device not found: " 2047 amd64_printk(KERN_ERR, "error address map device not found: "
2051 "vendor %x device 0x%x (broken BIOS?)\n", 2048 "vendor %x device 0x%x (broken BIOS?)\n",
2052 PCI_VENDOR_ID_AMD, f1_id); 2049 PCI_VENDOR_ID_AMD, f1_id);
2053 return -ENODEV; 2050 return -ENODEV;
2054 } 2051 }
2055 2052
2056 /* Reserve the MISC Device */ 2053 /* Reserve the MISC Device */
2057 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, 2054 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2058 f3_id, 2055 if (!pvt->F3) {
2059 pvt->dram_f2_ctl); 2056 pci_dev_put(pvt->F1);
2057 pvt->F1 = NULL;
2060 2058
2061 if (!pvt->misc_f3_ctl) { 2059 amd64_printk(KERN_ERR, "error F3 device not found: "
2062 pci_dev_put(pvt->addr_f1_ctl); 2060 "vendor %x device 0x%x (broken BIOS?)\n",
2063 pvt->addr_f1_ctl = NULL; 2061 PCI_VENDOR_ID_AMD, f3_id);
2064 2062
2065 amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2066 "vendor %x device 0x%x (broken BIOS?)\n",
2067 PCI_VENDOR_ID_AMD, f3_id);
2068 return -ENODEV; 2063 return -ENODEV;
2069 } 2064 }
2070 2065 debugf1("F1: %s\n", pci_name(pvt->F1));
2071 debugf1(" Addr Map device PCI Bus ID:\t%s\n", 2066 debugf1("F2: %s\n", pci_name(pvt->F2));
2072 pci_name(pvt->addr_f1_ctl)); 2067 debugf1("F3: %s\n", pci_name(pvt->F3));
2073 debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
2074 pci_name(pvt->dram_f2_ctl));
2075 debugf1(" Misc device PCI Bus ID:\t%s\n",
2076 pci_name(pvt->misc_f3_ctl));
2077 2068
2078 return 0; 2069 return 0;
2079} 2070}
2080 2071
2081static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) 2072static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2082{ 2073{
2083 pci_dev_put(pvt->addr_f1_ctl); 2074 pci_dev_put(pvt->F1);
2084 pci_dev_put(pvt->misc_f3_ctl); 2075 pci_dev_put(pvt->F3);
2085} 2076}
2086 2077
2087/* 2078/*
@@ -2109,7 +2100,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2109 } else 2100 } else
2110 debugf0(" TOP_MEM2 disabled.\n"); 2101 debugf0(" TOP_MEM2 disabled.\n");
2111 2102
2112 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); 2103 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
2113 2104
2114 if (pvt->ops->read_dram_ctl_register) 2105 if (pvt->ops->read_dram_ctl_register)
2115 pvt->ops->read_dram_ctl_register(pvt); 2106 pvt->ops->read_dram_ctl_register(pvt);
@@ -2146,21 +2137,20 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2146 2137
2147 amd64_read_dct_base_mask(pvt); 2138 amd64_read_dct_base_mask(pvt);
2148 2139
2149 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); 2140 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
2150 amd64_read_dbam_reg(pvt); 2141 amd64_read_dbam_reg(pvt);
2151 2142
2152 amd64_read_pci_cfg(pvt->misc_f3_ctl, 2143 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2153 F10_ONLINE_SPARE, &pvt->online_spare);
2154 2144
2155 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 2145 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
2156 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); 2146 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
2157 2147
2158 if (boot_cpu_data.x86 >= 0x10) { 2148 if (boot_cpu_data.x86 >= 0x10) {
2159 if (!dct_ganging_enabled(pvt)) { 2149 if (!dct_ganging_enabled(pvt)) {
2160 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); 2150 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
2161 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); 2151 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
2162 } 2152 }
2163 amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp); 2153 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2164 } 2154 }
2165 2155
2166 if (boot_cpu_data.x86 == 0x10 && 2156 if (boot_cpu_data.x86 == 0x10 &&
@@ -2249,7 +2239,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2249 2239
2250 pvt = mci->pvt_info; 2240 pvt = mci->pvt_info;
2251 2241
2252 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); 2242 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &pvt->nbcfg);
2253 2243
2254 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, 2244 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2255 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2245 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2394,20 +2384,20 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2394 struct amd64_pvt *pvt = mci->pvt_info; 2384 struct amd64_pvt *pvt = mci->pvt_info;
2395 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2385 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2396 2386
2397 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); 2387 amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value);
2398 2388
2399 /* turn on UECCn and CECCEn bits */ 2389 /* turn on UECCn and CECCEn bits */
2400 pvt->old_nbctl = value & mask; 2390 pvt->old_nbctl = value & mask;
2401 pvt->nbctl_mcgctl_saved = 1; 2391 pvt->nbctl_mcgctl_saved = 1;
2402 2392
2403 value |= mask; 2393 value |= mask;
2404 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2394 pci_write_config_dword(pvt->F3, K8_NBCTL, value);
2405 2395
2406 if (amd64_toggle_ecc_err_reporting(pvt, ON)) 2396 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2407 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " 2397 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2408 "MCGCTL!\n"); 2398 "MCGCTL!\n");
2409 2399
2410 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); 2400 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2411 2401
2412 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, 2402 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2413 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2403 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2422,9 +2412,9 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2422 2412
2423 /* Attempt to turn on DRAM ECC Enable */ 2413 /* Attempt to turn on DRAM ECC Enable */
2424 value |= K8_NBCFG_ECC_ENABLE; 2414 value |= K8_NBCFG_ECC_ENABLE;
2425 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); 2415 pci_write_config_dword(pvt->F3, K8_NBCFG, value);
2426 2416
2427 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); 2417 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2428 2418
2429 if (!(value & K8_NBCFG_ECC_ENABLE)) { 2419 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2430 amd64_printk(KERN_WARNING, 2420 amd64_printk(KERN_WARNING,
@@ -2452,17 +2442,17 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2452 if (!pvt->nbctl_mcgctl_saved) 2442 if (!pvt->nbctl_mcgctl_saved)
2453 return; 2443 return;
2454 2444
2455 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); 2445 amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value);
2456 value &= ~mask; 2446 value &= ~mask;
2457 value |= pvt->old_nbctl; 2447 value |= pvt->old_nbctl;
2458 2448
2459 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2449 pci_write_config_dword(pvt->F3, K8_NBCTL, value);
2460 2450
2461 /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */ 2451 /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
2462 if (!pvt->flags.nb_ecc_prev) { 2452 if (!pvt->flags.nb_ecc_prev) {
2463 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); 2453 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2464 value &= ~K8_NBCFG_ECC_ENABLE; 2454 value &= ~K8_NBCFG_ECC_ENABLE;
2465 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); 2455 pci_write_config_dword(pvt->F3, K8_NBCFG, value);
2466 } 2456 }
2467 2457
2468 /* restore the NB Enable MCGCTL bit */ 2458 /* restore the NB Enable MCGCTL bit */
@@ -2488,13 +2478,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2488 u8 ecc_enabled = 0; 2478 u8 ecc_enabled = 0;
2489 bool nb_mce_en = false; 2479 bool nb_mce_en = false;
2490 2480
2491 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); 2481 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2492 2482
2493 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2483 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2494 if (!ecc_enabled) 2484 if (!ecc_enabled)
2495 amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " 2485 amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
2496 "is currently disabled, set F3x%x[22] (%s).\n", 2486 "is currently disabled, set F3x%x[22] (%s).\n",
2497 K8_NBCFG, pci_name(pvt->misc_f3_ctl)); 2487 K8_NBCFG, pci_name(pvt->F3));
2498 else 2488 else
2499 amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n"); 2489 amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
2500 2490
@@ -2554,7 +2544,7 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2554 mci->mod_name = EDAC_MOD_STR; 2544 mci->mod_name = EDAC_MOD_STR;
2555 mci->mod_ver = EDAC_AMD64_VERSION; 2545 mci->mod_ver = EDAC_AMD64_VERSION;
2556 mci->ctl_name = pvt->ctl_name; 2546 mci->ctl_name = pvt->ctl_name;
2557 mci->dev_name = pci_name(pvt->dram_f2_ctl); 2547 mci->dev_name = pci_name(pvt->F2);
2558 mci->ctl_page_to_phys = NULL; 2548 mci->ctl_page_to_phys = NULL;
2559 2549
2560 /* memory scrubber interface */ 2550 /* memory scrubber interface */
@@ -2611,7 +2601,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2611 * later come back in a finish-setup function to perform that final 2601 * later come back in a finish-setup function to perform that final
2612 * initialization. See also amd64_init_2nd_stage() for that. 2602 * initialization. See also amd64_init_2nd_stage() for that.
2613 */ 2603 */
2614static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl) 2604static int amd64_probe_one_instance(struct pci_dev *F2)
2615{ 2605{
2616 struct amd64_pvt *pvt = NULL; 2606 struct amd64_pvt *pvt = NULL;
2617 struct amd64_family_type *fam_type = NULL; 2607 struct amd64_family_type *fam_type = NULL;
@@ -2622,8 +2612,8 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl)
2622 if (!pvt) 2612 if (!pvt)
2623 goto err_exit; 2613 goto err_exit;
2624 2614
2625 pvt->mc_node_id = get_node_id(dram_f2_ctl); 2615 pvt->mc_node_id = get_node_id(F2);
2626 pvt->dram_f2_ctl = dram_f2_ctl; 2616 pvt->F2 = F2;
2627 2617
2628 ret = -EINVAL; 2618 ret = -EINVAL;
2629 fam_type = amd64_per_family_init(pvt); 2619 fam_type = amd64_per_family_init(pvt);
@@ -2631,8 +2621,8 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl)
2631 goto err_free; 2621 goto err_free;
2632 2622
2633 ret = -ENODEV; 2623 ret = -ENODEV;
2634 err = amd64_reserve_mc_sibling_devices(pvt, fam_type->addr_f1_ctl, 2624 err = amd64_reserve_mc_sibling_devices(pvt, fam_type->f1_id,
2635 fam_type->misc_f3_ctl); 2625 fam_type->f3_id);
2636 if (err) 2626 if (err)
2637 goto err_free; 2627 goto err_free;
2638 2628
@@ -2695,7 +2685,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2695 2685
2696 mci->pvt_info = pvt; 2686 mci->pvt_info = pvt;
2697 2687
2698 mci->dev = &pvt->dram_f2_ctl->dev; 2688 mci->dev = &pvt->F2->dev;
2699 amd64_setup_mci_misc_attributes(mci); 2689 amd64_setup_mci_misc_attributes(mci);
2700 2690
2701 if (amd64_init_csrows(mci)) 2691 if (amd64_init_csrows(mci))
@@ -2839,8 +2829,7 @@ static void amd64_setup_pci_device(void)
2839 2829
2840 pvt = mci->pvt_info; 2830 pvt = mci->pvt_info;
2841 amd64_ctl_pci = 2831 amd64_ctl_pci =
2842 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev, 2832 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2843 EDAC_MOD_STR);
2844 2833
2845 if (!amd64_ctl_pci) { 2834 if (!amd64_ctl_pci) {
2846 pr_warning("%s(): Unable to create PCI control\n", 2835 pr_warning("%s(): Unable to create PCI control\n",
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 007b68a436c5..76760a8043ca 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -386,9 +386,7 @@ struct amd64_pvt {
386 struct low_ops *ops; 386 struct low_ops *ops;
387 387
388 /* pci_device handles which we utilize */ 388 /* pci_device handles which we utilize */
389 struct pci_dev *addr_f1_ctl; 389 struct pci_dev *F1, *F2, *F3;
390 struct pci_dev *dram_f2_ctl;
391 struct pci_dev *misc_f3_ctl;
392 390
393 int mc_node_id; /* MC index of this MC node */ 391 int mc_node_id; /* MC index of this MC node */
394 int ext_model; /* extended model value of this node */ 392 int ext_model; /* extended model value of this node */
@@ -518,8 +516,7 @@ struct low_ops {
518 516
519struct amd64_family_type { 517struct amd64_family_type {
520 const char *ctl_name; 518 const char *ctl_name;
521 u16 addr_f1_ctl; 519 u16 f1_id, f3_id;
522 u16 misc_f3_ctl;
523 struct low_ops ops; 520 struct low_ops ops;
524}; 521};
525 522
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 29f1f7a612d9..523ce4a6edc6 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -122,15 +122,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
122 /* Form value to choose 16-byte section of cacheline */ 122 /* Form value to choose 16-byte section of cacheline */
123 section = F10_NB_ARRAY_DRAM_ECC | 123 section = F10_NB_ARRAY_DRAM_ECC |
124 SET_NB_ARRAY_ADDRESS(pvt->injection.section); 124 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
125 pci_write_config_dword(pvt->misc_f3_ctl, 125 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
126 F10_NB_ARRAY_ADDR, section);
127 126
128 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, 127 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
129 pvt->injection.bit_map); 128 pvt->injection.bit_map);
130 129
131 /* Issue 'word' and 'bit' along with the READ request */ 130 /* Issue 'word' and 'bit' along with the READ request */
132 pci_write_config_dword(pvt->misc_f3_ctl, 131 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
133 F10_NB_ARRAY_DATA, word_bits);
134 132
135 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 133 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
136 134
@@ -157,15 +155,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
157 /* Form value to choose 16-byte section of cacheline */ 155 /* Form value to choose 16-byte section of cacheline */
158 section = F10_NB_ARRAY_DRAM_ECC | 156 section = F10_NB_ARRAY_DRAM_ECC |
159 SET_NB_ARRAY_ADDRESS(pvt->injection.section); 157 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
160 pci_write_config_dword(pvt->misc_f3_ctl, 158 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
161 F10_NB_ARRAY_ADDR, section);
162 159
163 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, 160 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
164 pvt->injection.bit_map); 161 pvt->injection.bit_map);
165 162
166 /* Issue 'word' and 'bit' along with the READ request */ 163 /* Issue 'word' and 'bit' along with the READ request */
167 pci_write_config_dword(pvt->misc_f3_ctl, 164 pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
168 F10_NB_ARRAY_DATA, word_bits);
169 165
170 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); 166 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
171 167