aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2011-01-18 13:16:08 -0500
committerBorislav Petkov <borislav.petkov@amd.com>2011-03-17 09:46:24 -0400
commit41d8bfaba70311c2fa0666554ef160ea8ffc9daf (patch)
tree87b236d16240872304638ed0011bb2a9c244c0ee /drivers/edac
parent5a5d237169152d4d7e4b6105eab15831829fb8e7 (diff)
amd64_edac: Improve DRAM address mapping
Drop static tables which map the bits in F2x80 to a chip select size in favor of functions doing the mapping with some bit fiddling. Also, add F15 support. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c149
-rw-r--r--drivers/edac/amd64_edac.h4
2 files changed, 83 insertions, 70 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index b85487d4de8d..eb6b6bace683 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis;
25static struct ecc_settings **ecc_stngs; 25static struct ecc_settings **ecc_stngs;
26 26
27/* 27/*
28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
29 * later.
30 */
31static int ddr2_dbam_revCG[] = {
32 [0] = 32,
33 [1] = 64,
34 [2] = 128,
35 [3] = 256,
36 [4] = 512,
37 [5] = 1024,
38 [6] = 2048,
39};
40
41static int ddr2_dbam_revD[] = {
42 [0] = 32,
43 [1] = 64,
44 [2 ... 3] = 128,
45 [4] = 256,
46 [5] = 512,
47 [6] = 256,
48 [7] = 512,
49 [8 ... 9] = 1024,
50 [10] = 2048,
51};
52
53static int ddr2_dbam[] = { [0] = 128,
54 [1] = 256,
55 [2 ... 4] = 512,
56 [5 ... 6] = 1024,
57 [7 ... 8] = 2048,
58 [9 ... 10] = 4096,
59 [11] = 8192,
60};
61
62static int ddr3_dbam[] = { [0] = -1,
63 [1] = 256,
64 [2] = 512,
65 [3 ... 4] = -1,
66 [5 ... 6] = 1024,
67 [7 ... 8] = 2048,
68 [9 ... 10] = 4096,
69 [11] = 8192,
70};
71
72/*
73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing 28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- 29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
75 * or higher value'. 30 * or higher value'.
76 * 31 *
77 *FIXME: Produce a better mapping/linearisation. 32 *FIXME: Produce a better mapping/linearisation.
78 */ 33 */
79
80
81struct scrubrate { 34struct scrubrate {
82 u32 scrubval; /* bit pattern for scrub rate */ 35 u32 scrubval; /* bit pattern for scrub rate */
83 u32 bandwidth; /* bandwidth consumed (bytes/sec) */ 36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
@@ -962,7 +915,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
962 915
963 if (pvt->ext_model >= K8_REV_F) 916 if (pvt->ext_model >= K8_REV_F)
964 /* RevF (NPT) and later */ 917 /* RevF (NPT) and later */
965 flag = pvt->dclr0 & F10_WIDTH_128; 918 flag = pvt->dclr0 & WIDTH_128;
966 else 919 else
967 /* RevE and earlier */ 920 /* RevE and earlier */
968 flag = pvt->dclr0 & REVE_WIDTH_128; 921 flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -1062,18 +1015,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1062 } 1015 }
1063} 1016}
1064 1017
1065static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 1018static int ddr2_cs_size(unsigned i, bool dct_width)
1066{ 1019{
1067 int *dbam_map; 1020 unsigned shift = 0;
1068 1021
1069 if (pvt->ext_model >= K8_REV_F) 1022 if (i <= 2)
1070 dbam_map = ddr2_dbam; 1023 shift = i;
1071 else if (pvt->ext_model >= K8_REV_D) 1024 else if (!(i & 0x1))
1072 dbam_map = ddr2_dbam_revD; 1025 shift = i >> 1;
1073 else 1026 else
1074 dbam_map = ddr2_dbam_revCG; 1027 shift = (i + 1) >> 1;
1028
1029 return 128 << (shift + !!dct_width);
1030}
1031
1032static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1033 unsigned cs_mode)
1034{
1035 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1036
1037 if (pvt->ext_model >= K8_REV_F) {
1038 WARN_ON(cs_mode > 11);
1039 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1040 }
1041 else if (pvt->ext_model >= K8_REV_D) {
1042 WARN_ON(cs_mode > 10);
1075 1043
1076 return dbam_map[cs_mode]; 1044 if (cs_mode == 3 || cs_mode == 8)
1045 return 32 << (cs_mode - 1);
1046 else
1047 return 32 << cs_mode;
1048 }
1049 else {
1050 WARN_ON(cs_mode > 6);
1051 return 32 << cs_mode;
1052 }
1077} 1053}
1078 1054
1079/* 1055/*
@@ -1089,7 +1065,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
1089 int i, j, channels = 0; 1065 int i, j, channels = 0;
1090 1066
1091 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ 1067 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1092 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & F10_WIDTH_128)) 1068 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1093 return 2; 1069 return 2;
1094 1070
1095 /* 1071 /*
@@ -1126,16 +1102,50 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
1126 return channels; 1102 return channels;
1127} 1103}
1128 1104
1129static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 1105static int ddr3_cs_size(unsigned i, bool dct_width)
1106{
1107 unsigned shift = 0;
1108 int cs_size = 0;
1109
1110 if (i == 0 || i == 3 || i == 4)
1111 cs_size = -1;
1112 else if (i <= 2)
1113 shift = i;
1114 else if (i == 12)
1115 shift = 7;
1116 else if (!(i & 0x1))
1117 shift = i >> 1;
1118 else
1119 shift = (i + 1) >> 1;
1120
1121 if (cs_size != -1)
1122 cs_size = (128 * (1 << !!dct_width)) << shift;
1123
1124 return cs_size;
1125}
1126
1127static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1128 unsigned cs_mode)
1130{ 1129{
1131 int *dbam_map; 1130 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1131
1132 WARN_ON(cs_mode > 11);
1132 1133
1133 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) 1134 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1134 dbam_map = ddr3_dbam; 1135 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1135 else 1136 else
1136 dbam_map = ddr2_dbam; 1137 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1138}
1139
1140/*
1141 * F15h supports only 64bit DCT interfaces
1142 */
1143static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1144 unsigned cs_mode)
1145{
1146 WARN_ON(cs_mode > 12);
1137 1147
1138 return dbam_map[cs_mode]; 1148 return ddr3_cs_size(cs_mode, false);
1139} 1149}
1140 1150
1141static void read_dram_ctl_register(struct amd64_pvt *pvt) 1151static void read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1528,7 +1538,7 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1528 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 1538 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1529 1539
1530 if (boot_cpu_data.x86 == 0xf) { 1540 if (boot_cpu_data.x86 == 0xf) {
1531 if (pvt->dclr0 & F10_WIDTH_128) 1541 if (pvt->dclr0 & WIDTH_128)
1532 factor = 1; 1542 factor = 1;
1533 1543
1534 /* K8 families < revF not supported yet */ 1544 /* K8 families < revF not supported yet */
@@ -1551,11 +1561,13 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1551 1561
1552 size0 = 0; 1562 size0 = 0;
1553 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 1563 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1554 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1564 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1565 DBAM_DIMM(dimm, dbam));
1555 1566
1556 size1 = 0; 1567 size1 = 0;
1557 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 1568 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1558 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1569 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1570 DBAM_DIMM(dimm, dbam));
1559 1571
1560 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 1572 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1561 dimm * 2, size0 << factor, 1573 dimm * 2, size0 << factor,
@@ -1591,6 +1603,7 @@ static struct amd64_family_type amd64_family_types[] = {
1591 .ops = { 1603 .ops = {
1592 .early_channel_count = f1x_early_channel_count, 1604 .early_channel_count = f1x_early_channel_count,
1593 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1605 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1606 .dbam_to_cs = f15_dbam_to_chip_select,
1594 .read_dct_pci_cfg = f15_read_dct_pci_cfg, 1607 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1595 } 1608 }
1596 }, 1609 },
@@ -2030,7 +2043,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2030 * encompasses 2043 * encompasses
2031 * 2044 *
2032 */ 2045 */
2033static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) 2046static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2034{ 2047{
2035 u32 cs_mode, nr_pages; 2048 u32 cs_mode, nr_pages;
2036 2049
@@ -2043,7 +2056,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2043 */ 2056 */
2044 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; 2057 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2045 2058
2046 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); 2059 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2047 2060
2048 /* 2061 /*
2049 * If dual channel then double the memory size of single channel. 2062 * If dual channel then double the memory size of single channel.
@@ -2091,7 +2104,7 @@ static int init_csrows(struct mem_ctl_info *mci)
2091 i, pvt->mc_node_id); 2104 i, pvt->mc_node_id);
2092 2105
2093 empty = 0; 2106 empty = 0;
2094 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); 2107 csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2095 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); 2108 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2096 sys_addr = input_addr_to_sys_addr(mci, input_addr_min); 2109 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2097 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); 2110 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index e14a8d0ad19f..8e431ab6a983 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -221,7 +221,7 @@
221#define DCLR0 0x90 221#define DCLR0 0x90
222#define DCLR1 0x190 222#define DCLR1 0x190
223#define REVE_WIDTH_128 BIT(16) 223#define REVE_WIDTH_128 BIT(16)
224#define F10_WIDTH_128 BIT(11) 224#define WIDTH_128 BIT(11)
225 225
226#define DCHR0 0x94 226#define DCHR0 0x94
227#define DCHR1 0x194 227#define DCHR1 0x194
@@ -445,7 +445,7 @@ struct low_ops {
445 int (*early_channel_count) (struct amd64_pvt *pvt); 445 int (*early_channel_count) (struct amd64_pvt *pvt);
446 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, 446 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
447 u16 syndrome); 447 u16 syndrome);
448 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); 448 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
449 int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, 449 int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
450 u32 *val, const char *func); 450 u32 *val, const char *func);
451}; 451};