aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>2014-10-30 07:16:09 -0400
committerBorislav Petkov <bp@suse.de>2014-10-30 08:42:48 -0400
commita597d2a5d9820dbbadd70583170c48c7290427df (patch)
tree95b92ca110f29c6d66d9f0d9b0b0eacf06616959 /drivers
parent5c43cbdf78b55f9de3e3e9546c9f4e909d1d31be (diff)
amd64_edac: Add F15h M60h support
This patch adds support for ECC error decoding for F15h M60h processor. Aside from the usual changes, the patch adds support for some new features in the processor: - DDR4(unbuffered, registered); LRDIMM DDR3 support - relevant debug messages have been modified/added to report these memory types - new dbam_to_cs mappers - if (F15h M60h && LRDIMM); we need a 'multiplier' value to find cs_size. This multiplier value is obtained from the per-dimm DCSM register. So, change the interface to accept a 'cs_mask_nr' value to facilitate this calculation - switch-casing determine_memory_type() - done to cleanse the function of too many if-else statements and improve readability - This is now called early in read_mc_regs() to cache dram_type Misc cleanup: - amd64_pci_table[] is condensed by using PCI_VDEVICE macro. Testing details: Tested the patch by injecting 'ECC' type errors using mce_amd_inj and error decoding works fine. Signed-off-by: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com> Link: http://lkml.kernel.org/r/1414617483-4941-1-git-send-email-Aravind.Gopalakrishnan@amd.com [ Boris: determine_memory_type() cleanups ] Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/edac/amd64_edac.c255
-rw-r--r--drivers/edac/amd64_edac.h15
2 files changed, 188 insertions, 82 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index bbd65149cdb2..1a1d7c43a20f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -692,9 +692,19 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
692{ 692{
693 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 693 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
694 694
695 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n", 695 if (pvt->dram_type == MEM_LRDDR3) {
696 (dclr & BIT(16)) ? "un" : "", 696 u32 dcsm = pvt->csels[chan].csmasks[0];
697 (dclr & BIT(19)) ? "yes" : "no"); 697 /*
698 * It's assumed all LRDIMMs in a DCT are going to be of
699 * same 'type' until proven otherwise. So, use a cs
700 * value of '0' here to get dcsm value.
701 */
702 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
703 }
704
705 edac_dbg(1, "All DIMMs support ECC:%s\n",
706 (dclr & BIT(19)) ? "yes" : "no");
707
698 708
699 edac_dbg(1, " PAR/ERR parity: %s\n", 709 edac_dbg(1, " PAR/ERR parity: %s\n",
700 (dclr & BIT(8)) ? "enabled" : "disabled"); 710 (dclr & BIT(8)) ? "enabled" : "disabled");
@@ -756,7 +766,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
756 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { 766 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
757 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; 767 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
758 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; 768 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
759 } else if (pvt->fam == 0x15 && pvt->model >= 0x30) { 769 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
760 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; 770 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
761 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; 771 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
762 } else { 772 } else {
@@ -813,25 +823,63 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
813 } 823 }
814} 824}
815 825
816static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs) 826static void determine_memory_type(struct amd64_pvt *pvt)
817{ 827{
818 enum mem_type type; 828 u32 dram_ctrl, dcsm;
819 829
820 /* F15h supports only DDR3 */ 830 switch (pvt->fam) {
821 if (pvt->fam >= 0x15) 831 case 0xf:
822 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 832 if (pvt->ext_model >= K8_REV_F)
823 else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) { 833 goto ddr3;
834
835 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
836 return;
837
838 case 0x10:
824 if (pvt->dchr0 & DDR3_MODE) 839 if (pvt->dchr0 & DDR3_MODE)
825 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 840 goto ddr3;
841
842 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
843 return;
844
845 case 0x15:
846 if (pvt->model < 0x60)
847 goto ddr3;
848
849 /*
850 * Model 0x60h needs special handling:
851 *
852 * We use a Chip Select value of '0' to obtain dcsm.
853 * Theoretically, it is possible to populate LRDIMMs of different
854 * 'Rank' value on a DCT. But this is not the common case. So,
855 * it's reasonable to assume all DIMMs are going to be of same
856 * 'type' until proven otherwise.
857 */
858 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
859 dcsm = pvt->csels[0].csmasks[0];
860
861 if (((dram_ctrl >> 8) & 0x7) == 0x2)
862 pvt->dram_type = MEM_DDR4;
863 else if (pvt->dclr0 & BIT(16))
864 pvt->dram_type = MEM_DDR3;
865 else if (dcsm & 0x3)
866 pvt->dram_type = MEM_LRDDR3;
826 else 867 else
827 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 868 pvt->dram_type = MEM_RDDR3;
828 } else {
829 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
830 }
831 869
832 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); 870 return;
871
872 case 0x16:
873 goto ddr3;
874
875 default:
876 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
877 pvt->dram_type = MEM_EMPTY;
878 }
879 return;
833 880
834 return type; 881ddr3:
882 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
835} 883}
836 884
837/* Get the number of DCT channels the memory controller is using. */ 885/* Get the number of DCT channels the memory controller is using. */
@@ -958,8 +1006,12 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
958 if (WARN_ON(!nb)) 1006 if (WARN_ON(!nb))
959 return; 1007 return;
960 1008
961 pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 1009 if (pvt->model == 0x60)
962 : PCI_DEVICE_ID_AMD_15H_NB_F1; 1010 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1011 else if (pvt->model == 0x30)
1012 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1013 else
1014 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
963 1015
964 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc); 1016 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
965 if (WARN_ON(!f1)) 1017 if (WARN_ON(!f1))
@@ -1049,7 +1101,7 @@ static int ddr2_cs_size(unsigned i, bool dct_width)
1049} 1101}
1050 1102
1051static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1103static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1052 unsigned cs_mode) 1104 unsigned cs_mode, int cs_mask_nr)
1053{ 1105{
1054 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1106 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1055 1107
@@ -1167,8 +1219,43 @@ static int ddr3_cs_size(unsigned i, bool dct_width)
1167 return cs_size; 1219 return cs_size;
1168} 1220}
1169 1221
1222static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1223{
1224 unsigned shift = 0;
1225 int cs_size = 0;
1226
1227 if (i < 4 || i == 6)
1228 cs_size = -1;
1229 else if (i == 12)
1230 shift = 7;
1231 else if (!(i & 0x1))
1232 shift = i >> 1;
1233 else
1234 shift = (i + 1) >> 1;
1235
1236 if (cs_size != -1)
1237 cs_size = rank_multiply * (128 << shift);
1238
1239 return cs_size;
1240}
1241
1242static int ddr4_cs_size(unsigned i)
1243{
1244 int cs_size = 0;
1245
1246 if (i == 0)
1247 cs_size = -1;
1248 else if (i == 1)
1249 cs_size = 1024;
1250 else
1251 /* Min cs_size = 1G */
1252 cs_size = 1024 * (1 << (i >> 1));
1253
1254 return cs_size;
1255}
1256
1170static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1257static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1171 unsigned cs_mode) 1258 unsigned cs_mode, int cs_mask_nr)
1172{ 1259{
1173 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1260 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1174 1261
@@ -1184,18 +1271,49 @@ static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1184 * F15h supports only 64bit DCT interfaces 1271 * F15h supports only 64bit DCT interfaces
1185 */ 1272 */
1186static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1273static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1187 unsigned cs_mode) 1274 unsigned cs_mode, int cs_mask_nr)
1188{ 1275{
1189 WARN_ON(cs_mode > 12); 1276 WARN_ON(cs_mode > 12);
1190 1277
1191 return ddr3_cs_size(cs_mode, false); 1278 return ddr3_cs_size(cs_mode, false);
1192} 1279}
1193 1280
1281/* F15h M60h supports DDR4 mapping as well.. */
1282static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1283 unsigned cs_mode, int cs_mask_nr)
1284{
1285 int cs_size;
1286 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1287
1288 WARN_ON(cs_mode > 12);
1289
1290 if (pvt->dram_type == MEM_DDR4) {
1291 if (cs_mode > 9)
1292 return -1;
1293
1294 cs_size = ddr4_cs_size(cs_mode);
1295 } else if (pvt->dram_type == MEM_LRDDR3) {
1296 unsigned rank_multiply = dcsm & 0xf;
1297
1298 if (rank_multiply == 3)
1299 rank_multiply = 4;
1300 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1301 } else {
1302 /* Minimum cs size is 512mb for F15hM60h*/
1303 if (cs_mode == 0x1)
1304 return -1;
1305
1306 cs_size = ddr3_cs_size(cs_mode, false);
1307 }
1308
1309 return cs_size;
1310}
1311
1194/* 1312/*
1195 * F16h and F15h model 30h have only limited cs_modes. 1313 * F16h and F15h model 30h have only limited cs_modes.
1196 */ 1314 */
1197static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1315static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1198 unsigned cs_mode) 1316 unsigned cs_mode, int cs_mask_nr)
1199{ 1317{
1200 WARN_ON(cs_mode > 12); 1318 WARN_ON(cs_mode > 12);
1201 1319
@@ -1757,13 +1875,20 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1757 1875
1758 size0 = 0; 1876 size0 = 0;
1759 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 1877 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1878 /* For f15m60h, need multiplier for LRDIMM cs_size
1879 * calculation. We pass 'dimm' value to the dbam_to_cs
1880 * mapper so we can find the multiplier from the
1881 * corresponding DCSM.
1882 */
1760 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 1883 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1761 DBAM_DIMM(dimm, dbam)); 1884 DBAM_DIMM(dimm, dbam),
1885 dimm);
1762 1886
1763 size1 = 0; 1887 size1 = 0;
1764 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 1888 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1765 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 1889 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1766 DBAM_DIMM(dimm, dbam)); 1890 DBAM_DIMM(dimm, dbam),
1891 dimm);
1767 1892
1768 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 1893 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1769 dimm * 2, size0, 1894 dimm * 2, size0,
@@ -1812,6 +1937,16 @@ static struct amd64_family_type family_types[] = {
1812 .dbam_to_cs = f16_dbam_to_chip_select, 1937 .dbam_to_cs = f16_dbam_to_chip_select,
1813 } 1938 }
1814 }, 1939 },
1940 [F15_M60H_CPUS] = {
1941 .ctl_name = "F15h_M60h",
1942 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
1943 .f3_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F3,
1944 .ops = {
1945 .early_channel_count = f1x_early_channel_count,
1946 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1947 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
1948 }
1949 },
1815 [F16_CPUS] = { 1950 [F16_CPUS] = {
1816 .ctl_name = "F16h", 1951 .ctl_name = "F16h",
1817 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, 1952 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
@@ -2175,6 +2310,8 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2175 } 2310 }
2176 2311
2177 pvt->ecc_sym_sz = 4; 2312 pvt->ecc_sym_sz = 4;
2313 determine_memory_type(pvt);
2314 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2178 2315
2179 if (pvt->fam >= 0x10) { 2316 if (pvt->fam >= 0x10) {
2180 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); 2317 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
@@ -2238,7 +2375,8 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2238 */ 2375 */
2239 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam); 2376 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2240 2377
2241 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); 2378 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
2379 << (20 - PAGE_SHIFT);
2242 2380
2243 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2381 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2244 csrow_nr, dct, cs_mode); 2382 csrow_nr, dct, cs_mode);
@@ -2257,7 +2395,6 @@ static int init_csrows(struct mem_ctl_info *mci)
2257 struct csrow_info *csrow; 2395 struct csrow_info *csrow;
2258 struct dimm_info *dimm; 2396 struct dimm_info *dimm;
2259 enum edac_type edac_mode; 2397 enum edac_type edac_mode;
2260 enum mem_type mtype;
2261 int i, j, empty = 1; 2398 int i, j, empty = 1;
2262 int nr_pages = 0; 2399 int nr_pages = 0;
2263 u32 val; 2400 u32 val;
@@ -2302,8 +2439,6 @@ static int init_csrows(struct mem_ctl_info *mci)
2302 nr_pages += row_dct1_pages; 2439 nr_pages += row_dct1_pages;
2303 } 2440 }
2304 2441
2305 mtype = determine_memory_type(pvt, i);
2306
2307 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); 2442 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2308 2443
2309 /* 2444 /*
@@ -2317,7 +2452,7 @@ static int init_csrows(struct mem_ctl_info *mci)
2317 2452
2318 for (j = 0; j < pvt->channel_count; j++) { 2453 for (j = 0; j < pvt->channel_count; j++) {
2319 dimm = csrow->channels[j]->dimm; 2454 dimm = csrow->channels[j]->dimm;
2320 dimm->mtype = mtype; 2455 dimm->mtype = pvt->dram_type;
2321 dimm->edac_mode = edac_mode; 2456 dimm->edac_mode = edac_mode;
2322 } 2457 }
2323 } 2458 }
@@ -2604,6 +2739,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2604 fam_type = &family_types[F15_M30H_CPUS]; 2739 fam_type = &family_types[F15_M30H_CPUS];
2605 pvt->ops = &family_types[F15_M30H_CPUS].ops; 2740 pvt->ops = &family_types[F15_M30H_CPUS].ops;
2606 break; 2741 break;
2742 } else if (pvt->model == 0x60) {
2743 fam_type = &family_types[F15_M60H_CPUS];
2744 pvt->ops = &family_types[F15_M60H_CPUS].ops;
2745 break;
2607 } 2746 }
2608 2747
2609 fam_type = &family_types[F15_CPUS]; 2748 fam_type = &family_types[F15_CPUS];
@@ -2828,55 +2967,13 @@ static void remove_one_instance(struct pci_dev *pdev)
2828 * inquiry this table to see if this driver is for a given device found. 2967 * inquiry this table to see if this driver is for a given device found.
2829 */ 2968 */
2830static const struct pci_device_id amd64_pci_table[] = { 2969static const struct pci_device_id amd64_pci_table[] = {
2831 { 2970 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL) },
2832 .vendor = PCI_VENDOR_ID_AMD, 2971 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM) },
2833 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 2972 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F2) },
2834 .subvendor = PCI_ANY_ID, 2973 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F2) },
2835 .subdevice = PCI_ANY_ID, 2974 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F2) },
2836 .class = 0, 2975 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F2) },
2837 .class_mask = 0, 2976 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F2) },
2838 },
2839 {
2840 .vendor = PCI_VENDOR_ID_AMD,
2841 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2842 .subvendor = PCI_ANY_ID,
2843 .subdevice = PCI_ANY_ID,
2844 .class = 0,
2845 .class_mask = 0,
2846 },
2847 {
2848 .vendor = PCI_VENDOR_ID_AMD,
2849 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2850 .subvendor = PCI_ANY_ID,
2851 .subdevice = PCI_ANY_ID,
2852 .class = 0,
2853 .class_mask = 0,
2854 },
2855 {
2856 .vendor = PCI_VENDOR_ID_AMD,
2857 .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2858 .subvendor = PCI_ANY_ID,
2859 .subdevice = PCI_ANY_ID,
2860 .class = 0,
2861 .class_mask = 0,
2862 },
2863 {
2864 .vendor = PCI_VENDOR_ID_AMD,
2865 .device = PCI_DEVICE_ID_AMD_16H_NB_F2,
2866 .subvendor = PCI_ANY_ID,
2867 .subdevice = PCI_ANY_ID,
2868 .class = 0,
2869 .class_mask = 0,
2870 },
2871 {
2872 .vendor = PCI_VENDOR_ID_AMD,
2873 .device = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2874 .subvendor = PCI_ANY_ID,
2875 .subdevice = PCI_ANY_ID,
2876 .class = 0,
2877 .class_mask = 0,
2878 },
2879
2880 {0, } 2977 {0, }
2881}; 2978};
2882MODULE_DEVICE_TABLE(pci, amd64_pci_table); 2979MODULE_DEVICE_TABLE(pci, amd64_pci_table);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 55fb5941c6d4..d8468c667925 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -162,10 +162,12 @@
162/* 162/*
163 * PCI-defined configuration space registers 163 * PCI-defined configuration space registers
164 */ 164 */
165#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
166#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c
167#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 165#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
168#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 166#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
167#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
168#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c
169#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F1 0x1571
170#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F2 0x1572
169#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531 171#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
170#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532 172#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
171#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581 173#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
@@ -221,6 +223,8 @@
221 223
222#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) 224#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
223 225
226#define DRAM_CONTROL 0x78
227
224#define DBAM0 0x80 228#define DBAM0 0x80
225#define DBAM1 0x180 229#define DBAM1 0x180
226 230
@@ -301,6 +305,7 @@ enum amd_families {
301 F10_CPUS, 305 F10_CPUS,
302 F15_CPUS, 306 F15_CPUS,
303 F15_M30H_CPUS, 307 F15_M30H_CPUS,
308 F15_M60H_CPUS,
304 F16_CPUS, 309 F16_CPUS,
305 F16_M30H_CPUS, 310 F16_M30H_CPUS,
306 NUM_FAMILIES, 311 NUM_FAMILIES,
@@ -379,6 +384,9 @@ struct amd64_pvt {
379 384
380 /* place to store error injection parameters prior to issue */ 385 /* place to store error injection parameters prior to issue */
381 struct error_injection injection; 386 struct error_injection injection;
387
388 /* cache the dram_type */
389 enum mem_type dram_type;
382}; 390};
383 391
384enum err_codes { 392enum err_codes {
@@ -480,7 +488,8 @@ struct low_ops {
480 int (*early_channel_count) (struct amd64_pvt *pvt); 488 int (*early_channel_count) (struct amd64_pvt *pvt);
481 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, 489 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
482 struct err_info *); 490 struct err_info *);
483 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); 491 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
492 unsigned cs_mode, int cs_mask_nr);
484}; 493};
485 494
486struct amd64_family_type { 495struct amd64_family_type {