aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/edac/amd64_edac.c194
-rw-r--r--drivers/edac/amd64_edac.h34
2 files changed, 108 insertions, 120 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ed9b07a4cf8f..7e6705449dc9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -19,26 +19,48 @@ static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; 19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
20 20
21/* 21/*
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only 22 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
23 * for DDR2 DRAM mapping. 23 * later.
24 */ 24 */
25u32 revf_quad_ddr2_shift[] = { 25static int ddr2_dbam_revCG[] = {
26 0, /* 0000b NULL DIMM (128mb) */ 26 [0] = 32,
27 28, /* 0001b 256mb */ 27 [1] = 64,
28 29, /* 0010b 512mb */ 28 [2] = 128,
29 29, /* 0011b 512mb */ 29 [3] = 256,
30 29, /* 0100b 512mb */ 30 [4] = 512,
31 30, /* 0101b 1gb */ 31 [5] = 1024,
32 30, /* 0110b 1gb */ 32 [6] = 2048,
33 31, /* 0111b 2gb */ 33};
34 31, /* 1000b 2gb */ 34
35 32, /* 1001b 4gb */ 35static int ddr2_dbam_revD[] = {
36 32, /* 1010b 4gb */ 36 [0] = 32,
37 33, /* 1011b 8gb */ 37 [1] = 64,
38 0, /* 1100b future */ 38 [2 ... 3] = 128,
39 0, /* 1101b future */ 39 [4] = 256,
40 0, /* 1110b future */ 40 [5] = 512,
41 0 /* 1111b future */ 41 [6] = 256,
42 [7] = 512,
43 [8 ... 9] = 1024,
44 [10] = 2048,
45};
46
47static int ddr2_dbam[] = { [0] = 128,
48 [1] = 256,
49 [2 ... 4] = 512,
50 [5 ... 6] = 1024,
51 [7 ... 8] = 2048,
52 [9 ... 10] = 4096,
53 [11] = 8192,
54};
55
56static int ddr3_dbam[] = { [0] = -1,
57 [1] = 256,
58 [2] = 512,
59 [3 ... 4] = -1,
60 [5 ... 6] = 1024,
61 [7 ... 8] = 2048,
62 [9 ... 10] = 4096,
63 [11] = 8192,
42}; 64};
43 65
44/* 66/*
@@ -187,7 +209,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
187/* Map from a CSROW entry to the mask entry that operates on it */ 209/* Map from a CSROW entry to the mask entry that operates on it */
188static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 210static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
189{ 211{
190 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) 212 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
191 return csrow; 213 return csrow;
192 else 214 else
193 return csrow >> 1; 215 return csrow >> 1;
@@ -435,7 +457,7 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
435 u64 base; 457 u64 base;
436 458
437 /* only revE and later have the DRAM Hole Address Register */ 459 /* only revE and later have the DRAM Hole Address Register */
438 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) { 460 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
439 debugf1(" revision %d for node %d does not support DHAR\n", 461 debugf1(" revision %d for node %d does not support DHAR\n",
440 pvt->ext_model, pvt->mc_node_id); 462 pvt->ext_model, pvt->mc_node_id);
441 return 1; 463 return 1;
@@ -795,7 +817,7 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
795 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); 817 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
796 else if (boot_cpu_data.x86 == 0xf) 818 else if (boot_cpu_data.x86 == 0xf)
797 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", 819 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
798 (pvt->ext_model >= OPTERON_CPU_REV_F) ? 820 (pvt->ext_model >= K8_REV_F) ?
799 "Rev F or later" : "Rev E or earlier"); 821 "Rev F or later" : "Rev E or earlier");
800 else 822 else
801 /* we'll hardly ever ever get here */ 823 /* we'll hardly ever ever get here */
@@ -811,7 +833,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
811 int bit; 833 int bit;
812 enum dev_type edac_cap = EDAC_FLAG_NONE; 834 enum dev_type edac_cap = EDAC_FLAG_NONE;
813 835
814 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F) 836 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
815 ? 19 837 ? 19
816 : 17; 838 : 17;
817 839
@@ -936,7 +958,7 @@ static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
936static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 958static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
937{ 959{
938 960
939 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) { 961 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
940 pvt->dcsb_base = REV_E_DCSB_BASE_BITS; 962 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
941 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; 963 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
942 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; 964 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
@@ -1009,7 +1031,7 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1009{ 1031{
1010 enum mem_type type; 1032 enum mem_type type;
1011 1033
1012 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) { 1034 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
1013 /* Rev F and later */ 1035 /* Rev F and later */
1014 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 1036 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1015 } else { 1037 } else {
@@ -1042,7 +1064,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
1042 if (err) 1064 if (err)
1043 return err; 1065 return err;
1044 1066
1045 if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) { 1067 if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) {
1046 /* RevF (NPT) and later */ 1068 /* RevF (NPT) and later */
1047 flag = pvt->dclr0 & F10_WIDTH_128; 1069 flag = pvt->dclr0 & F10_WIDTH_128;
1048 } else { 1070 } else {
@@ -1158,36 +1180,18 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1158 } 1180 }
1159} 1181}
1160 1182
1161/* 1183static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1162 * determrine the number of PAGES in for this DIMM's size based on its DRAM
1163 * Address Mapping.
1164 *
1165 * First step is to calc the number of bits to shift a value of 1 left to
1166 * indicate show many pages. Start with the DBAM value as the starting bits,
1167 * then proceed to adjust those shift bits, based on CPU rev and the table.
1168 * See BKDG on the DBAM
1169 */
1170static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1171{ 1184{
1172 int nr_pages; 1185 int *dbam_map;
1173 1186
1174 if (pvt->ext_model >= OPTERON_CPU_REV_F) { 1187 if (pvt->ext_model >= K8_REV_F)
1175 nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT); 1188 dbam_map = ddr2_dbam;
1176 } else { 1189 else if (pvt->ext_model >= K8_REV_D)
1177 /* 1190 dbam_map = ddr2_dbam_revD;
1178 * RevE and less section; this line is tricky. It collapses the 1191 else
1179 * table used by RevD and later to one that matches revisions CG 1192 dbam_map = ddr2_dbam_revCG;
1180 * and earlier.
1181 */
1182 dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
1183 (dram_map > 8 ? 4 : (dram_map > 5 ?
1184 3 : (dram_map > 2 ? 1 : 0))) : 0;
1185
1186 /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
1187 nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
1188 }
1189 1193
1190 return nr_pages; 1194 return dbam_map[cs_mode];
1191} 1195}
1192 1196
1193/* 1197/*
@@ -1249,9 +1253,16 @@ err_reg:
1249 1253
1250} 1254}
1251 1255
1252static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map) 1256static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1253{ 1257{
1254 return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT); 1258 int *dbam_map;
1259
1260 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1261 dbam_map = ddr3_dbam;
1262 else
1263 dbam_map = ddr2_dbam;
1264
1265 return dbam_map[cs_mode];
1255} 1266}
1256 1267
1257/* Enable extended configuration access via 0xCF8 feature */ 1268/* Enable extended configuration access via 0xCF8 feature */
@@ -1706,23 +1717,6 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1706} 1717}
1707 1718
1708/* 1719/*
1709 * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
1710 * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
1711 * indicates an empty DIMM slot, as reported by Hardware on empty slots.
1712 *
1713 * Normalize to 128MB by subracting 27 bit shift.
1714 */
1715static int map_dbam_to_csrow_size(int index)
1716{
1717 int mega_bytes = 0;
1718
1719 if (index > 0 && index <= DBAM_MAX_VALUE)
1720 mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
1721
1722 return mega_bytes;
1723}
1724
1725/*
1726 * debug routine to display the memory sizes of all logical DIMMs and its 1720 * debug routine to display the memory sizes of all logical DIMMs and its
1727 * CSROWs as well 1721 * CSROWs as well
1728 */ 1722 */
@@ -1734,7 +1728,7 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1734 1728
1735 if (boot_cpu_data.x86 == 0xf) { 1729 if (boot_cpu_data.x86 == 0xf) {
1736 /* K8 families < revF not supported yet */ 1730 /* K8 families < revF not supported yet */
1737 if (pvt->ext_model < OPTERON_CPU_REV_F) 1731 if (pvt->ext_model < K8_REV_F)
1738 return; 1732 return;
1739 else 1733 else
1740 WARN_ON(ctrl != 0); 1734 WARN_ON(ctrl != 0);
@@ -1753,11 +1747,11 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1753 1747
1754 size0 = 0; 1748 size0 = 0;
1755 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) 1749 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1756 size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam)); 1750 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1757 1751
1758 size1 = 0; 1752 size1 = 0;
1759 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) 1753 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1760 size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam)); 1754 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1761 1755
1762 edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", 1756 edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
1763 dimm * 2, size0, dimm * 2 + 1, size1); 1757 dimm * 2, size0, dimm * 2 + 1, size1);
@@ -1780,8 +1774,8 @@ static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
1780 * If we are on a DDR3 machine, we don't know yet if 1774 * If we are on a DDR3 machine, we don't know yet if
1781 * we support that properly at this time 1775 * we support that properly at this time
1782 */ 1776 */
1783 if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) || 1777 if ((pvt->dchr0 & DDR3_MODE) ||
1784 (pvt->dchr1 & F10_DCHR_Ddr3Mode)) { 1778 (pvt->dchr1 & DDR3_MODE)) {
1785 1779
1786 amd64_printk(KERN_WARNING, 1780 amd64_printk(KERN_WARNING,
1787 "%s() This machine is running with DDR3 memory. " 1781 "%s() This machine is running with DDR3 memory. "
@@ -1817,11 +1811,11 @@ static struct amd64_family_type amd64_family_types[] = {
1817 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 1811 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1818 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, 1812 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1819 .ops = { 1813 .ops = {
1820 .early_channel_count = k8_early_channel_count, 1814 .early_channel_count = k8_early_channel_count,
1821 .get_error_address = k8_get_error_address, 1815 .get_error_address = k8_get_error_address,
1822 .read_dram_base_limit = k8_read_dram_base_limit, 1816 .read_dram_base_limit = k8_read_dram_base_limit,
1823 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1817 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1824 .dbam_map_to_pages = k8_dbam_map_to_pages, 1818 .dbam_to_cs = k8_dbam_to_chip_select,
1825 } 1819 }
1826 }, 1820 },
1827 [F10_CPUS] = { 1821 [F10_CPUS] = {
@@ -1829,13 +1823,13 @@ static struct amd64_family_type amd64_family_types[] = {
1829 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, 1823 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1830 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, 1824 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1831 .ops = { 1825 .ops = {
1832 .probe_valid_hardware = f10_probe_valid_hardware, 1826 .probe_valid_hardware = f10_probe_valid_hardware,
1833 .early_channel_count = f10_early_channel_count, 1827 .early_channel_count = f10_early_channel_count,
1834 .get_error_address = f10_get_error_address, 1828 .get_error_address = f10_get_error_address,
1835 .read_dram_base_limit = f10_read_dram_base_limit, 1829 .read_dram_base_limit = f10_read_dram_base_limit,
1836 .read_dram_ctl_register = f10_read_dram_ctl_register, 1830 .read_dram_ctl_register = f10_read_dram_ctl_register,
1837 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 1831 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1838 .dbam_map_to_pages = f10_dbam_map_to_pages, 1832 .dbam_to_cs = f10_dbam_to_chip_select,
1839 } 1833 }
1840 }, 1834 },
1841 [F11_CPUS] = { 1835 [F11_CPUS] = {
@@ -1843,13 +1837,13 @@ static struct amd64_family_type amd64_family_types[] = {
1843 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, 1837 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1844 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, 1838 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1845 .ops = { 1839 .ops = {
1846 .probe_valid_hardware = f10_probe_valid_hardware, 1840 .probe_valid_hardware = f10_probe_valid_hardware,
1847 .early_channel_count = f10_early_channel_count, 1841 .early_channel_count = f10_early_channel_count,
1848 .get_error_address = f10_get_error_address, 1842 .get_error_address = f10_get_error_address,
1849 .read_dram_base_limit = f10_read_dram_base_limit, 1843 .read_dram_base_limit = f10_read_dram_base_limit,
1850 .read_dram_ctl_register = f10_read_dram_ctl_register, 1844 .read_dram_ctl_register = f10_read_dram_ctl_register,
1851 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 1845 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1852 .dbam_map_to_pages = f10_dbam_map_to_pages, 1846 .dbam_to_cs = f10_dbam_to_chip_select,
1853 } 1847 }
1854 }, 1848 },
1855}; 1849};
@@ -2425,7 +2419,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2425 */ 2419 */
2426static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) 2420static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2427{ 2421{
2428 u32 dram_map, nr_pages; 2422 u32 cs_mode, nr_pages;
2429 2423
2430 /* 2424 /*
2431 * The math on this doesn't look right on the surface because x/2*4 can 2425 * The math on this doesn't look right on the surface because x/2*4 can
@@ -2434,9 +2428,9 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2434 * number of bits to shift the DBAM register to extract the proper CSROW 2428 * number of bits to shift the DBAM register to extract the proper CSROW
2435 * field. 2429 * field.
2436 */ 2430 */
2437 dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; 2431 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2438 2432
2439 nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map); 2433 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2440 2434
2441 /* 2435 /*
2442 * If dual channel then double the memory size of single channel. 2436 * If dual channel then double the memory size of single channel.
@@ -2444,7 +2438,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2444 */ 2438 */
2445 nr_pages <<= (pvt->channel_count - 1); 2439 nr_pages <<= (pvt->channel_count - 1);
2446 2440
2447 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map); 2441 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2448 debugf0(" nr_pages= %u channel-count = %d\n", 2442 debugf0(" nr_pages= %u channel-count = %d\n",
2449 nr_pages, pvt->channel_count); 2443 nr_pages, pvt->channel_count);
2450 2444
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 24e280423de0..f8c187ea6e38 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -135,13 +135,9 @@
135#define EDAC_MAX_NUMNODES 8 135#define EDAC_MAX_NUMNODES 8
136 136
137/* Extended Model from CPUID, for CPU Revision numbers */ 137/* Extended Model from CPUID, for CPU Revision numbers */
138#define OPTERON_CPU_LE_REV_C 0 138#define K8_REV_D 1
139#define OPTERON_CPU_REV_D 1 139#define K8_REV_E 2
140#define OPTERON_CPU_REV_E 2 140#define K8_REV_F 4
141
142/* NPT processors have the following Extended Models */
143#define OPTERON_CPU_REV_F 4
144#define OPTERON_CPU_REV_FA 5
145 141
146/* Hardware limit on ChipSelect rows per MC and processors per system */ 142/* Hardware limit on ChipSelect rows per MC and processors per system */
147#define MAX_CS_COUNT 8 143#define MAX_CS_COUNT 8
@@ -243,7 +239,7 @@
243#define F10_DCHR_1 0x194 239#define F10_DCHR_1 0x194
244 240
245#define F10_DCHR_FOUR_RANK_DIMM BIT(18) 241#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
246#define F10_DCHR_Ddr3Mode BIT(8) 242#define DDR3_MODE BIT(8)
247#define F10_DCHR_MblMode BIT(6) 243#define F10_DCHR_MblMode BIT(6)
248 244
249 245
@@ -501,7 +497,6 @@ struct scrubrate {
501}; 497};
502 498
503extern struct scrubrate scrubrates[23]; 499extern struct scrubrate scrubrates[23];
504extern u32 revf_quad_ddr2_shift[16];
505extern const char *tt_msgs[4]; 500extern const char *tt_msgs[4];
506extern const char *ll_msgs[4]; 501extern const char *ll_msgs[4];
507extern const char *rrrr_msgs[16]; 502extern const char *rrrr_msgs[16];
@@ -531,17 +526,16 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
531 * functions and per device encoding/decoding logic. 526 * functions and per device encoding/decoding logic.
532 */ 527 */
533struct low_ops { 528struct low_ops {
534 int (*probe_valid_hardware)(struct amd64_pvt *pvt); 529 int (*probe_valid_hardware) (struct amd64_pvt *pvt);
535 int (*early_channel_count)(struct amd64_pvt *pvt); 530 int (*early_channel_count) (struct amd64_pvt *pvt);
536 531
537 u64 (*get_error_address)(struct mem_ctl_info *mci, 532 u64 (*get_error_address) (struct mem_ctl_info *mci,
538 struct err_regs *info); 533 struct err_regs *info);
539 void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram); 534 void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
540 void (*read_dram_ctl_register)(struct amd64_pvt *pvt); 535 void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
541 void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci, 536 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
542 struct err_regs *info, 537 struct err_regs *info, u64 SystemAddr);
543 u64 SystemAddr); 538 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
544 int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
545}; 539};
546 540
547struct amd64_family_type { 541struct amd64_family_type {