aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac/amd64_edac.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 20:21:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 20:21:32 -0400
commit978ca164bd9f30bd51f71dad86d8c3797f7add76 (patch)
treee7cbd50aa6b2709ea27a59bc2adafe2ff27e8a33 /drivers/edac/amd64_edac.c
parent02e4c627d862427653fc088ce299746ea7d85600 (diff)
parentd34a6ecd45c1362d388af8d83ed329c609d1712b (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: (38 commits) amd64_edac: Fix decode_syndrome types amd64_edac: Fix DCT argument type amd64_edac: Fix ranges signedness amd64_edac: Drop local variable amd64_edac: Fix PCI config addressing types amd64_edac: Fix DRAM base macros amd64_edac: Fix node id signedness amd64_edac: Drop redundant declarations amd64_edac: Enable driver on F15h amd64_edac: Adjust ECC symbol size to F15h amd64_edac: Simplify scrubrate setting PCI: Rename CPU PCI id define amd64_edac: Improve DRAM address mapping amd64_edac: Sanitize ->read_dram_ctl_register amd64_edac: Adjust sys_addr to chip select conversion routine to F15h amd64_edac: Beef up early exit reporting amd64_edac: Revamp online spare handling amd64_edac: Fix channel interleave removal amd64_edac: Correct node interleaving removal amd64_edac: Add support for interleaved region swapping ... Fix up trivial conflict in include/linux/pci_ids.h due to AMD_15H_NB_MISC being renamed as AMD_15H_NB_F3 next to the new AMD_15H_NB_LINK entry.
Diffstat (limited to 'drivers/edac/amd64_edac.c')
-rw-r--r--drivers/edac/amd64_edac.c1442
1 files changed, 678 insertions, 764 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 23e03554f0d3..0be30e978c85 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis;
25static struct ecc_settings **ecc_stngs; 25static struct ecc_settings **ecc_stngs;
26 26
27/* 27/*
28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
29 * later.
30 */
31static int ddr2_dbam_revCG[] = {
32 [0] = 32,
33 [1] = 64,
34 [2] = 128,
35 [3] = 256,
36 [4] = 512,
37 [5] = 1024,
38 [6] = 2048,
39};
40
41static int ddr2_dbam_revD[] = {
42 [0] = 32,
43 [1] = 64,
44 [2 ... 3] = 128,
45 [4] = 256,
46 [5] = 512,
47 [6] = 256,
48 [7] = 512,
49 [8 ... 9] = 1024,
50 [10] = 2048,
51};
52
53static int ddr2_dbam[] = { [0] = 128,
54 [1] = 256,
55 [2 ... 4] = 512,
56 [5 ... 6] = 1024,
57 [7 ... 8] = 2048,
58 [9 ... 10] = 4096,
59 [11] = 8192,
60};
61
62static int ddr3_dbam[] = { [0] = -1,
63 [1] = 256,
64 [2] = 512,
65 [3 ... 4] = -1,
66 [5 ... 6] = 1024,
67 [7 ... 8] = 2048,
68 [9 ... 10] = 4096,
69 [11] = 8192,
70};
71
72/*
73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing 28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- 29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
75 * or higher value'. 30 * or higher value'.
76 * 31 *
77 *FIXME: Produce a better mapping/linearisation. 32 *FIXME: Produce a better mapping/linearisation.
78 */ 33 */
79
80
81struct scrubrate { 34struct scrubrate {
82 u32 scrubval; /* bit pattern for scrub rate */ 35 u32 scrubval; /* bit pattern for scrub rate */
83 u32 bandwidth; /* bandwidth consumed (bytes/sec) */ 36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
@@ -107,6 +60,79 @@ struct scrubrate {
107 { 0x00, 0UL}, /* scrubbing off */ 60 { 0x00, 0UL}, /* scrubbing off */
108}; 61};
109 62
63static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
65{
66 int err = 0;
67
68 err = pci_read_config_dword(pdev, offset, val);
69 if (err)
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
72
73 return err;
74}
75
76int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
78{
79 int err = 0;
80
81 err = pci_write_config_dword(pdev, offset, val);
82 if (err)
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
85
86 return err;
87}
88
89/*
90 *
91 * Depending on the family, F2 DCT reads need special handling:
92 *
93 * K8: has a single DCT only
94 *
95 * F10h: each DCT has its own set of regs
96 * DCT0 -> F2x040..
97 * DCT1 -> F2x140..
98 *
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100 *
101 */
102static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
103 const char *func)
104{
105 if (addr >= 0x100)
106 return -EINVAL;
107
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
109}
110
111static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
112 const char *func)
113{
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
115}
116
117static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
118 const char *func)
119{
120 u32 reg = 0;
121 u8 dct = 0;
122
123 if (addr >= 0x140 && addr <= 0x1a0) {
124 dct = 1;
125 addr -= 0x100;
126 }
127
128 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
129 reg &= 0xfffffffe;
130 reg |= dct;
131 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
132
133 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
134}
135
110/* 136/*
111 * Memory scrubber control interface. For K8, memory scrubbing is handled by 137 * Memory scrubber control interface. For K8, memory scrubbing is handled by
112 * hardware and can involve L2 cache, dcache as well as the main memory. With 138 * hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
156 182
157 scrubval = scrubrates[i].scrubval; 183 scrubval = scrubrates[i].scrubval;
158 184
159 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); 185 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
160 186
161 if (scrubval) 187 if (scrubval)
162 return scrubrates[i].bandwidth; 188 return scrubrates[i].bandwidth;
@@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
167static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 193static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
168{ 194{
169 struct amd64_pvt *pvt = mci->pvt_info; 195 struct amd64_pvt *pvt = mci->pvt_info;
196 u32 min_scrubrate = 0x5;
197
198 if (boot_cpu_data.x86 == 0xf)
199 min_scrubrate = 0x0;
170 200
171 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate); 201 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
172} 202}
173 203
174static int amd64_get_scrub_rate(struct mem_ctl_info *mci) 204static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
@@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
177 u32 scrubval = 0; 207 u32 scrubval = 0;
178 int i, retval = -EINVAL; 208 int i, retval = -EINVAL;
179 209
180 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); 210 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
181 211
182 scrubval = scrubval & 0x001F; 212 scrubval = scrubval & 0x001F;
183 213
@@ -192,63 +222,14 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
192 return retval; 222 return retval;
193} 223}
194 224
195/* Map from a CSROW entry to the mask entry that operates on it */
196static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
197{
198 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
199 return csrow;
200 else
201 return csrow >> 1;
202}
203
204/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
205static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
206{
207 if (dct == 0)
208 return pvt->dcsb0[csrow];
209 else
210 return pvt->dcsb1[csrow];
211}
212
213/*
214 * Return the 'mask' address the i'th CS entry. This function is needed because
215 * there number of DCSM registers on Rev E and prior vs Rev F and later is
216 * different.
217 */
218static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
219{
220 if (dct == 0)
221 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
222 else
223 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
224}
225
226
227/* 225/*
228 * In *base and *limit, pass back the full 40-bit base and limit physical 226 * returns true if the SysAddr given by sys_addr matches the
229 * addresses for the node given by node_id. This information is obtained from 227 * DRAM base/limit associated with node_id
230 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
231 * base and limit addresses are of type SysAddr, as defined at the start of
232 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
233 * in the address range they represent.
234 */ 228 */
235static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, 229static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
236 u64 *base, u64 *limit) 230 unsigned nid)
237{ 231{
238 *base = pvt->dram_base[node_id]; 232 u64 addr;
239 *limit = pvt->dram_limit[node_id];
240}
241
242/*
243 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
244 * with node_id
245 */
246static int amd64_base_limit_match(struct amd64_pvt *pvt,
247 u64 sys_addr, int node_id)
248{
249 u64 base, limit, addr;
250
251 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
252 233
253 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be 234 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
254 * all ones if the most significant implemented address bit is 1. 235 * all ones if the most significant implemented address bit is 1.
@@ -258,7 +239,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt,
258 */ 239 */
259 addr = sys_addr & 0x000000ffffffffffull; 240 addr = sys_addr & 0x000000ffffffffffull;
260 241
261 return (addr >= base) && (addr <= limit); 242 return ((addr >= get_dram_base(pvt, nid)) &&
243 (addr <= get_dram_limit(pvt, nid)));
262} 244}
263 245
264/* 246/*
@@ -271,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
271 u64 sys_addr) 253 u64 sys_addr)
272{ 254{
273 struct amd64_pvt *pvt; 255 struct amd64_pvt *pvt;
274 int node_id; 256 unsigned node_id;
275 u32 intlv_en, bits; 257 u32 intlv_en, bits;
276 258
277 /* 259 /*
@@ -285,10 +267,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
285 * registers. Therefore we arbitrarily choose to read it from the 267 * registers. Therefore we arbitrarily choose to read it from the
286 * register for node 0. 268 * register for node 0.
287 */ 269 */
288 intlv_en = pvt->dram_IntlvEn[0]; 270 intlv_en = dram_intlv_en(pvt, 0);
289 271
290 if (intlv_en == 0) { 272 if (intlv_en == 0) {
291 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { 273 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
292 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 274 if (amd64_base_limit_match(pvt, sys_addr, node_id))
293 goto found; 275 goto found;
294 } 276 }
@@ -305,10 +287,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
305 bits = (((u32) sys_addr) >> 12) & intlv_en; 287 bits = (((u32) sys_addr) >> 12) & intlv_en;
306 288
307 for (node_id = 0; ; ) { 289 for (node_id = 0; ; ) {
308 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) 290 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
309 break; /* intlv_sel field matches */ 291 break; /* intlv_sel field matches */
310 292
311 if (++node_id >= DRAM_REG_COUNT) 293 if (++node_id >= DRAM_RANGES)
312 goto err_no_match; 294 goto err_no_match;
313 } 295 }
314 296
@@ -321,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
321 } 303 }
322 304
323found: 305found:
324 return edac_mc_find(node_id); 306 return edac_mc_find((int)node_id);
325 307
326err_no_match: 308err_no_match:
327 debugf2("sys_addr 0x%lx doesn't match any node\n", 309 debugf2("sys_addr 0x%lx doesn't match any node\n",
@@ -331,37 +313,50 @@ err_no_match:
331} 313}
332 314
333/* 315/*
334 * Extract the DRAM CS base address from selected csrow register. 316 * compute the CS base address of the @csrow on the DRAM controller @dct.
317 * For details see F2x[5C:40] in the processor's BKDG
335 */ 318 */
336static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) 319static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
320 u64 *base, u64 *mask)
337{ 321{
338 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << 322 u64 csbase, csmask, base_bits, mask_bits;
339 pvt->dcs_shift; 323 u8 addr_shift;
340}
341 324
342/* 325 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
343 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. 326 csbase = pvt->csels[dct].csbases[csrow];
344 */ 327 csmask = pvt->csels[dct].csmasks[csrow];
345static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) 328 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
346{ 329 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
347 u64 dcsm_bits, other_bits; 330 addr_shift = 4;
348 u64 mask; 331 } else {
349 332 csbase = pvt->csels[dct].csbases[csrow];
350 /* Extract bits from DRAM CS Mask. */ 333 csmask = pvt->csels[dct].csmasks[csrow >> 1];
351 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; 334 addr_shift = 8;
352 335
353 other_bits = pvt->dcsm_mask; 336 if (boot_cpu_data.x86 == 0x15)
354 other_bits = ~(other_bits << pvt->dcs_shift); 337 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
338 else
339 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
340 }
355 341
356 /* 342 *base = (csbase & base_bits) << addr_shift;
357 * The extracted bits from DCSM belong in the spaces represented by
358 * the cleared bits in other_bits.
359 */
360 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
361 343
362 return mask; 344 *mask = ~0ULL;
345 /* poke holes for the csmask */
346 *mask &= ~(mask_bits << addr_shift);
347 /* OR them in */
348 *mask |= (csmask & mask_bits) << addr_shift;
363} 349}
364 350
351#define for_each_chip_select(i, dct, pvt) \
352 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
353
354#define chip_select_base(i, dct, pvt) \
355 pvt->csels[dct].csbases[i]
356
357#define for_each_chip_select_mask(i, dct, pvt) \
358 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
359
365/* 360/*
366 * @input_addr is an InputAddr associated with the node given by mci. Return the 361 * @input_addr is an InputAddr associated with the node given by mci. Return the
367 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). 362 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -374,19 +369,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
374 369
375 pvt = mci->pvt_info; 370 pvt = mci->pvt_info;
376 371
377 /* 372 for_each_chip_select(csrow, 0, pvt) {
378 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS 373 if (!csrow_enabled(csrow, 0, pvt))
379 * base/mask register pair, test the condition shown near the start of
380 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
381 */
382 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
383
384 /* This DRAM chip select is disabled on this node */
385 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
386 continue; 374 continue;
387 375
388 base = base_from_dct_base(pvt, csrow); 376 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
389 mask = ~mask_from_dct_mask(pvt, csrow); 377
378 mask = ~mask;
390 379
391 if ((input_addr & mask) == (base & mask)) { 380 if ((input_addr & mask) == (base & mask)) {
392 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", 381 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
@@ -396,7 +385,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
396 return csrow; 385 return csrow;
397 } 386 }
398 } 387 }
399
400 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", 388 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
401 (unsigned long)input_addr, pvt->mc_node_id); 389 (unsigned long)input_addr, pvt->mc_node_id);
402 390
@@ -404,19 +392,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
404} 392}
405 393
406/* 394/*
407 * Return the base value defined by the DRAM Base register for the node
408 * represented by mci. This function returns the full 40-bit value despite the
409 * fact that the register only stores bits 39-24 of the value. See section
410 * 3.4.4.1 (BKDG #26094, K8, revA-E)
411 */
412static inline u64 get_dram_base(struct mem_ctl_info *mci)
413{
414 struct amd64_pvt *pvt = mci->pvt_info;
415
416 return pvt->dram_base[pvt->mc_node_id];
417}
418
419/*
420 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) 395 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
421 * for the node represented by mci. Info is passed back in *hole_base, 396 * for the node represented by mci. Info is passed back in *hole_base,
422 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if 397 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
@@ -445,14 +420,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
445 return 1; 420 return 1;
446 } 421 }
447 422
448 /* only valid for Fam10h */ 423 /* valid for Fam10h and above */
449 if (boot_cpu_data.x86 == 0x10 && 424 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
450 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
451 debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); 425 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
452 return 1; 426 return 1;
453 } 427 }
454 428
455 if ((pvt->dhar & DHAR_VALID) == 0) { 429 if (!dhar_valid(pvt)) {
456 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", 430 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
457 pvt->mc_node_id); 431 pvt->mc_node_id);
458 return 1; 432 return 1;
@@ -476,15 +450,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
476 * addresses in the hole so that they start at 0x100000000. 450 * addresses in the hole so that they start at 0x100000000.
477 */ 451 */
478 452
479 base = dhar_base(pvt->dhar); 453 base = dhar_base(pvt);
480 454
481 *hole_base = base; 455 *hole_base = base;
482 *hole_size = (0x1ull << 32) - base; 456 *hole_size = (0x1ull << 32) - base;
483 457
484 if (boot_cpu_data.x86 > 0xf) 458 if (boot_cpu_data.x86 > 0xf)
485 *hole_offset = f10_dhar_offset(pvt->dhar); 459 *hole_offset = f10_dhar_offset(pvt);
486 else 460 else
487 *hole_offset = k8_dhar_offset(pvt->dhar); 461 *hole_offset = k8_dhar_offset(pvt);
488 462
489 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", 463 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
490 pvt->mc_node_id, (unsigned long)*hole_base, 464 pvt->mc_node_id, (unsigned long)*hole_base,
@@ -525,10 +499,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
525 */ 499 */
526static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) 500static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
527{ 501{
502 struct amd64_pvt *pvt = mci->pvt_info;
528 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; 503 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
529 int ret = 0; 504 int ret = 0;
530 505
531 dram_base = get_dram_base(mci); 506 dram_base = get_dram_base(pvt, pvt->mc_node_id);
532 507
533 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 508 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
534 &hole_size); 509 &hole_size);
@@ -556,7 +531,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
556 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture 531 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
557 * Programmer's Manual Volume 1 Application Programming. 532 * Programmer's Manual Volume 1 Application Programming.
558 */ 533 */
559 dram_addr = (sys_addr & 0xffffffffffull) - dram_base; 534 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
560 535
561 debugf2("using DRAM Base register to translate SysAddr 0x%lx to " 536 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
562 "DramAddr 0x%lx\n", (unsigned long)sys_addr, 537 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
@@ -592,9 +567,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
592 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 567 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
593 * concerning translating a DramAddr to an InputAddr. 568 * concerning translating a DramAddr to an InputAddr.
594 */ 569 */
595 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); 570 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
596 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + 571 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
597 (dram_addr & 0xfff); 572 (dram_addr & 0xfff);
598 573
599 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", 574 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
600 intlv_shift, (unsigned long)dram_addr, 575 intlv_shift, (unsigned long)dram_addr,
@@ -628,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
628static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) 603static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
629{ 604{
630 struct amd64_pvt *pvt; 605 struct amd64_pvt *pvt;
631 int node_id, intlv_shift; 606 unsigned node_id, intlv_shift;
632 u64 bits, dram_addr; 607 u64 bits, dram_addr;
633 u32 intlv_sel; 608 u32 intlv_sel;
634 609
@@ -642,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
642 */ 617 */
643 pvt = mci->pvt_info; 618 pvt = mci->pvt_info;
644 node_id = pvt->mc_node_id; 619 node_id = pvt->mc_node_id;
645 BUG_ON((node_id < 0) || (node_id > 7));
646 620
647 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); 621 BUG_ON(node_id > 7);
648 622
623 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
649 if (intlv_shift == 0) { 624 if (intlv_shift == 0) {
650 debugf1(" InputAddr 0x%lx translates to DramAddr of " 625 debugf1(" InputAddr 0x%lx translates to DramAddr of "
651 "same value\n", (unsigned long)input_addr); 626 "same value\n", (unsigned long)input_addr);
@@ -653,10 +628,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
653 return input_addr; 628 return input_addr;
654 } 629 }
655 630
656 bits = ((input_addr & 0xffffff000ull) << intlv_shift) + 631 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
657 (input_addr & 0xfff); 632 (input_addr & 0xfff);
658 633
659 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); 634 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
660 dram_addr = bits + (intlv_sel << 12); 635 dram_addr = bits + (intlv_sel << 12);
661 636
662 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " 637 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
@@ -673,7 +648,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
673static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) 648static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
674{ 649{
675 struct amd64_pvt *pvt = mci->pvt_info; 650 struct amd64_pvt *pvt = mci->pvt_info;
676 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; 651 u64 hole_base, hole_offset, hole_size, base, sys_addr;
677 int ret = 0; 652 int ret = 0;
678 653
679 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 654 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
@@ -691,7 +666,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
691 } 666 }
692 } 667 }
693 668
694 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); 669 base = get_dram_base(pvt, pvt->mc_node_id);
695 sys_addr = dram_addr + base; 670 sys_addr = dram_addr + base;
696 671
697 /* 672 /*
@@ -736,13 +711,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
736 u64 base, mask; 711 u64 base, mask;
737 712
738 pvt = mci->pvt_info; 713 pvt = mci->pvt_info;
739 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); 714 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
740 715
741 base = base_from_dct_base(pvt, csrow); 716 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
742 mask = mask_from_dct_mask(pvt, csrow);
743 717
744 *input_addr_min = base & ~mask; 718 *input_addr_min = base & ~mask;
745 *input_addr_max = base | mask | pvt->dcs_mask_notused; 719 *input_addr_max = base | mask;
746} 720}
747 721
748/* Map the Error address to a PAGE and PAGE OFFSET. */ 722/* Map the Error address to a PAGE and PAGE OFFSET. */
@@ -775,18 +749,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
775 749
776static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); 750static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
777 751
778static u16 extract_syndrome(struct err_regs *err)
779{
780 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
781}
782
783/* 752/*
784 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 753 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
785 * are ECC capable. 754 * are ECC capable.
786 */ 755 */
787static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) 756static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
788{ 757{
789 int bit; 758 u8 bit;
790 enum dev_type edac_cap = EDAC_FLAG_NONE; 759 enum dev_type edac_cap = EDAC_FLAG_NONE;
791 760
792 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) 761 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
@@ -799,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
799 return edac_cap; 768 return edac_cap;
800} 769}
801 770
802 771static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
803static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
804 772
805static void amd64_dump_dramcfg_low(u32 dclr, int chan) 773static void amd64_dump_dramcfg_low(u32 dclr, int chan)
806{ 774{
@@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
813 debugf1(" PAR/ERR parity: %s\n", 781 debugf1(" PAR/ERR parity: %s\n",
814 (dclr & BIT(8)) ? "enabled" : "disabled"); 782 (dclr & BIT(8)) ? "enabled" : "disabled");
815 783
816 debugf1(" DCT 128bit mode width: %s\n", 784 if (boot_cpu_data.x86 == 0x10)
817 (dclr & BIT(11)) ? "128b" : "64b"); 785 debugf1(" DCT 128bit mode width: %s\n",
786 (dclr & BIT(11)) ? "128b" : "64b");
818 787
819 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", 788 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
820 (dclr & BIT(12)) ? "yes" : "no", 789 (dclr & BIT(12)) ? "yes" : "no",
@@ -824,16 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
824} 793}
825 794
826/* Display and decode various NB registers for debug purposes. */ 795/* Display and decode various NB registers for debug purposes. */
827static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 796static void dump_misc_regs(struct amd64_pvt *pvt)
828{ 797{
829 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 798 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
830 799
831 debugf1(" NB two channel DRAM capable: %s\n", 800 debugf1(" NB two channel DRAM capable: %s\n",
832 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); 801 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
833 802
834 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", 803 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
835 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", 804 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
836 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); 805 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
837 806
838 amd64_dump_dramcfg_low(pvt->dclr0, 0); 807 amd64_dump_dramcfg_low(pvt->dclr0, 0);
839 808
@@ -841,130 +810,84 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
841 810
842 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " 811 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
843 "offset: 0x%08x\n", 812 "offset: 0x%08x\n",
844 pvt->dhar, 813 pvt->dhar, dhar_base(pvt),
845 dhar_base(pvt->dhar), 814 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
846 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) 815 : f10_dhar_offset(pvt));
847 : f10_dhar_offset(pvt->dhar));
848 816
849 debugf1(" DramHoleValid: %s\n", 817 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
850 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
851 818
852 amd64_debug_display_dimm_sizes(0, pvt); 819 amd64_debug_display_dimm_sizes(pvt, 0);
853 820
854 /* everything below this point is Fam10h and above */ 821 /* everything below this point is Fam10h and above */
855 if (boot_cpu_data.x86 == 0xf) 822 if (boot_cpu_data.x86 == 0xf)
856 return; 823 return;
857 824
858 amd64_debug_display_dimm_sizes(1, pvt); 825 amd64_debug_display_dimm_sizes(pvt, 1);
859 826
860 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); 827 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
861 828
862 /* Only if NOT ganged does dclr1 have valid info */ 829 /* Only if NOT ganged does dclr1 have valid info */
863 if (!dct_ganging_enabled(pvt)) 830 if (!dct_ganging_enabled(pvt))
864 amd64_dump_dramcfg_low(pvt->dclr1, 1); 831 amd64_dump_dramcfg_low(pvt->dclr1, 1);
865} 832}
866 833
867/* Read in both of DBAM registers */
868static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
869{
870 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
871
872 if (boot_cpu_data.x86 >= 0x10)
873 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
874}
875
876/* 834/*
877 * NOTE: CPU Revision Dependent code: Rev E and Rev F 835 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
878 *
879 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
880 * set the shift factor for the DCSB and DCSM values.
881 *
882 * ->dcs_mask_notused, RevE:
883 *
884 * To find the max InputAddr for the csrow, start with the base address and set
885 * all bits that are "don't care" bits in the test at the start of section
886 * 3.5.4 (p. 84).
887 *
888 * The "don't care" bits are all set bits in the mask and all bits in the gaps
889 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
890 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
891 * gaps.
892 *
893 * ->dcs_mask_notused, RevF and later:
894 *
895 * To find the max InputAddr for the csrow, start with the base address and set
896 * all bits that are "don't care" bits in the test at the start of NPT section
897 * 4.5.4 (p. 87).
898 *
899 * The "don't care" bits are all set bits in the mask and all bits in the gaps
900 * between bit ranges [36:27] and [21:13].
901 *
902 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
903 * which are all bits in the above-mentioned gaps.
904 */ 836 */
905static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 837static void prep_chip_selects(struct amd64_pvt *pvt)
906{ 838{
907
908 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { 839 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
909 pvt->dcsb_base = REV_E_DCSB_BASE_BITS; 840 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
910 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; 841 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
911 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
912 pvt->dcs_shift = REV_E_DCS_SHIFT;
913 pvt->cs_count = 8;
914 pvt->num_dcsm = 8;
915 } else { 842 } else {
916 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 843 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
917 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 844 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
918 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
919 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
920 pvt->cs_count = 8;
921 pvt->num_dcsm = 4;
922 } 845 }
923} 846}
924 847
925/* 848/*
926 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers 849 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
927 */ 850 */
928static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) 851static void read_dct_base_mask(struct amd64_pvt *pvt)
929{ 852{
930 int cs, reg; 853 int cs;
931 854
932 amd64_set_dct_base_and_mask(pvt); 855 prep_chip_selects(pvt);
933 856
934 for (cs = 0; cs < pvt->cs_count; cs++) { 857 for_each_chip_select(cs, 0, pvt) {
935 reg = K8_DCSB0 + (cs * 4); 858 int reg0 = DCSB0 + (cs * 4);
936 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs])) 859 int reg1 = DCSB1 + (cs * 4);
860 u32 *base0 = &pvt->csels[0].csbases[cs];
861 u32 *base1 = &pvt->csels[1].csbases[cs];
862
863 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
937 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 864 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
938 cs, pvt->dcsb0[cs], reg); 865 cs, *base0, reg0);
939 866
940 /* If DCT are NOT ganged, then read in DCT1's base */ 867 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
941 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 868 continue;
942 reg = F10_DCSB1 + (cs * 4); 869
943 if (!amd64_read_pci_cfg(pvt->F2, reg, 870 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
944 &pvt->dcsb1[cs])) 871 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
945 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 872 cs, *base1, reg1);
946 cs, pvt->dcsb1[cs], reg);
947 } else {
948 pvt->dcsb1[cs] = 0;
949 }
950 } 873 }
951 874
952 for (cs = 0; cs < pvt->num_dcsm; cs++) { 875 for_each_chip_select_mask(cs, 0, pvt) {
953 reg = K8_DCSM0 + (cs * 4); 876 int reg0 = DCSM0 + (cs * 4);
954 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs])) 877 int reg1 = DCSM1 + (cs * 4);
878 u32 *mask0 = &pvt->csels[0].csmasks[cs];
879 u32 *mask1 = &pvt->csels[1].csmasks[cs];
880
881 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
955 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 882 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
956 cs, pvt->dcsm0[cs], reg); 883 cs, *mask0, reg0);
957 884
958 /* If DCT are NOT ganged, then read in DCT1's mask */ 885 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
959 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 886 continue;
960 reg = F10_DCSM1 + (cs * 4); 887
961 if (!amd64_read_pci_cfg(pvt->F2, reg, 888 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
962 &pvt->dcsm1[cs])) 889 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
963 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 890 cs, *mask1, reg1);
964 cs, pvt->dcsm1[cs], reg);
965 } else {
966 pvt->dcsm1[cs] = 0;
967 }
968 } 891 }
969} 892}
970 893
@@ -972,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
972{ 895{
973 enum mem_type type; 896 enum mem_type type;
974 897
975 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { 898 /* F15h supports only DDR3 */
899 if (boot_cpu_data.x86 >= 0x15)
900 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
901 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
976 if (pvt->dchr0 & DDR3_MODE) 902 if (pvt->dchr0 & DDR3_MODE)
977 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 903 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
978 else 904 else
@@ -986,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
986 return type; 912 return type;
987} 913}
988 914
989/* 915/* Get the number of DCT channels the memory controller is using. */
990 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
991 * and the later RevF memory controllers (DDR vs DDR2)
992 *
993 * Return:
994 * number of memory channels in operation
995 * Pass back:
996 * contents of the DCL0_LOW register
997 */
998static int k8_early_channel_count(struct amd64_pvt *pvt) 916static int k8_early_channel_count(struct amd64_pvt *pvt)
999{ 917{
1000 int flag, err = 0; 918 int flag;
1001
1002 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
1003 if (err)
1004 return err;
1005 919
1006 if (pvt->ext_model >= K8_REV_F) 920 if (pvt->ext_model >= K8_REV_F)
1007 /* RevF (NPT) and later */ 921 /* RevF (NPT) and later */
1008 flag = pvt->dclr0 & F10_WIDTH_128; 922 flag = pvt->dclr0 & WIDTH_128;
1009 else 923 else
1010 /* RevE and earlier */ 924 /* RevE and earlier */
1011 flag = pvt->dclr0 & REVE_WIDTH_128; 925 flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -1016,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
1016 return (flag) ? 2 : 1; 930 return (flag) ? 2 : 1;
1017} 931}
1018 932
1019/* extract the ERROR ADDRESS for the K8 CPUs */ 933/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1020static u64 k8_get_error_address(struct mem_ctl_info *mci, 934static u64 get_error_address(struct mce *m)
1021 struct err_regs *info)
1022{ 935{
1023 return (((u64) (info->nbeah & 0xff)) << 32) + 936 u8 start_bit = 1;
1024 (info->nbeal & ~0x03); 937 u8 end_bit = 47;
938
939 if (boot_cpu_data.x86 == 0xf) {
940 start_bit = 3;
941 end_bit = 39;
942 }
943
944 return m->addr & GENMASK(start_bit, end_bit);
1025} 945}
1026 946
1027/* 947static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1028 * Read the Base and Limit registers for K8 based Memory controllers; extract
1029 * fields from the 'raw' reg into separate data fields
1030 *
1031 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1032 */
1033static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1034{ 948{
1035 u32 low; 949 int off = range << 3;
1036 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1037 950
1038 amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low); 951 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
952 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1039 953
1040 /* Extract parts into separate data entries */ 954 if (boot_cpu_data.x86 == 0xf)
1041 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 955 return;
1042 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1043 pvt->dram_rw_en[dram] = (low & 0x3);
1044 956
1045 amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low); 957 if (!dram_rw(pvt, range))
958 return;
1046 959
1047 /* 960 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1048 * Extract parts into separate data entries. Limit is the HIGHEST memory 961 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1049 * location of the region, so lower 24 bits need to be all ones
1050 */
1051 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1052 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1053 pvt->dram_DstNode[dram] = (low & 0x7);
1054} 962}
1055 963
1056static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 964static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1057 struct err_regs *err_info, u64 sys_addr) 965 u16 syndrome)
1058{ 966{
1059 struct mem_ctl_info *src_mci; 967 struct mem_ctl_info *src_mci;
968 struct amd64_pvt *pvt = mci->pvt_info;
1060 int channel, csrow; 969 int channel, csrow;
1061 u32 page, offset; 970 u32 page, offset;
1062 u16 syndrome;
1063
1064 syndrome = extract_syndrome(err_info);
1065 971
1066 /* CHIPKILL enabled */ 972 /* CHIPKILL enabled */
1067 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { 973 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1068 channel = get_channel_from_ecc_syndrome(mci, syndrome); 974 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1069 if (channel < 0) { 975 if (channel < 0) {
1070 /* 976 /*
@@ -1113,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1113 } 1019 }
1114} 1020}
1115 1021
1116static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 1022static int ddr2_cs_size(unsigned i, bool dct_width)
1117{ 1023{
1118 int *dbam_map; 1024 unsigned shift = 0;
1119 1025
1120 if (pvt->ext_model >= K8_REV_F) 1026 if (i <= 2)
1121 dbam_map = ddr2_dbam; 1027 shift = i;
1122 else if (pvt->ext_model >= K8_REV_D) 1028 else if (!(i & 0x1))
1123 dbam_map = ddr2_dbam_revD; 1029 shift = i >> 1;
1124 else 1030 else
1125 dbam_map = ddr2_dbam_revCG; 1031 shift = (i + 1) >> 1;
1126 1032
1127 return dbam_map[cs_mode]; 1033 return 128 << (shift + !!dct_width);
1034}
1035
1036static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1037 unsigned cs_mode)
1038{
1039 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1040
1041 if (pvt->ext_model >= K8_REV_F) {
1042 WARN_ON(cs_mode > 11);
1043 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1044 }
1045 else if (pvt->ext_model >= K8_REV_D) {
1046 WARN_ON(cs_mode > 10);
1047
1048 if (cs_mode == 3 || cs_mode == 8)
1049 return 32 << (cs_mode - 1);
1050 else
1051 return 32 << cs_mode;
1052 }
1053 else {
1054 WARN_ON(cs_mode > 6);
1055 return 32 << cs_mode;
1056 }
1128} 1057}
1129 1058
1130/* 1059/*
@@ -1135,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1135 * Pass back: 1064 * Pass back:
1136 * contents of the DCL0_LOW register 1065 * contents of the DCL0_LOW register
1137 */ 1066 */
1138static int f10_early_channel_count(struct amd64_pvt *pvt) 1067static int f1x_early_channel_count(struct amd64_pvt *pvt)
1139{ 1068{
1140 int dbams[] = { DBAM0, DBAM1 };
1141 int i, j, channels = 0; 1069 int i, j, channels = 0;
1142 u32 dbam;
1143 1070
1144 /* If we are in 128 bit mode, then we are using 2 channels */ 1071 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1145 if (pvt->dclr0 & F10_WIDTH_128) { 1072 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1146 channels = 2; 1073 return 2;
1147 return channels;
1148 }
1149 1074
1150 /* 1075 /*
1151 * Need to check if in unganged mode: In such, there are 2 channels, 1076 * Need to check if in unganged mode: In such, there are 2 channels,
@@ -1162,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1162 * is more than just one DIMM present in unganged mode. Need to check 1087 * is more than just one DIMM present in unganged mode. Need to check
1163 * both controllers since DIMMs can be placed in either one. 1088 * both controllers since DIMMs can be placed in either one.
1164 */ 1089 */
1165 for (i = 0; i < ARRAY_SIZE(dbams); i++) { 1090 for (i = 0; i < 2; i++) {
1166 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam)) 1091 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1167 goto err_reg;
1168 1092
1169 for (j = 0; j < 4; j++) { 1093 for (j = 0; j < 4; j++) {
1170 if (DBAM_DIMM(j, dbam) > 0) { 1094 if (DBAM_DIMM(j, dbam) > 0) {
@@ -1180,216 +1104,191 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1180 amd64_info("MCT channel count: %d\n", channels); 1104 amd64_info("MCT channel count: %d\n", channels);
1181 1105
1182 return channels; 1106 return channels;
1183
1184err_reg:
1185 return -1;
1186
1187} 1107}
1188 1108
1189static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 1109static int ddr3_cs_size(unsigned i, bool dct_width)
1190{ 1110{
1191 int *dbam_map; 1111 unsigned shift = 0;
1112 int cs_size = 0;
1192 1113
1193 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) 1114 if (i == 0 || i == 3 || i == 4)
1194 dbam_map = ddr3_dbam; 1115 cs_size = -1;
1116 else if (i <= 2)
1117 shift = i;
1118 else if (i == 12)
1119 shift = 7;
1120 else if (!(i & 0x1))
1121 shift = i >> 1;
1195 else 1122 else
1196 dbam_map = ddr2_dbam; 1123 shift = (i + 1) >> 1;
1124
1125 if (cs_size != -1)
1126 cs_size = (128 * (1 << !!dct_width)) << shift;
1197 1127
1198 return dbam_map[cs_mode]; 1128 return cs_size;
1199} 1129}
1200 1130
1201static u64 f10_get_error_address(struct mem_ctl_info *mci, 1131static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1202 struct err_regs *info) 1132 unsigned cs_mode)
1203{ 1133{
1204 return (((u64) (info->nbeah & 0xffff)) << 32) + 1134 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1205 (info->nbeal & ~0x01); 1135
1136 WARN_ON(cs_mode > 11);
1137
1138 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1139 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1140 else
1141 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1206} 1142}
1207 1143
1208/* 1144/*
1209 * Read the Base and Limit registers for F10 based Memory controllers. Extract 1145 * F15h supports only 64bit DCT interfaces
1210 * fields from the 'raw' reg into separate data fields.
1211 *
1212 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1213 */ 1146 */
1214static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) 1147static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1148 unsigned cs_mode)
1215{ 1149{
1216 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; 1150 WARN_ON(cs_mode > 12);
1217
1218 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1219 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1220
1221 /* read the 'raw' DRAM BASE Address register */
1222 amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
1223 amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
1224
1225 /* Extract parts into separate data entries */
1226 pvt->dram_rw_en[dram] = (low_base & 0x3);
1227
1228 if (pvt->dram_rw_en[dram] == 0)
1229 return;
1230
1231 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1232
1233 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1234 (((u64)low_base & 0xFFFF0000) << 8);
1235
1236 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1237 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1238
1239 /* read the 'raw' LIMIT registers */
1240 amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
1241 amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
1242
1243 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1244 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1245 1151
1246 /* 1152 return ddr3_cs_size(cs_mode, false);
1247 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1248 * memory location of the region, so low 24 bits need to be all ones.
1249 */
1250 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1251 (((u64) low_limit & 0xFFFF0000) << 8) |
1252 0x00FFFFFF;
1253} 1153}
1254 1154
1255static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1155static void read_dram_ctl_register(struct amd64_pvt *pvt)
1256{ 1156{
1257 1157
1258 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW, 1158 if (boot_cpu_data.x86 == 0xf)
1259 &pvt->dram_ctl_select_low)) { 1159 return;
1260 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " 1160
1261 "High range addresses at: 0x%x\n", 1161 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1262 pvt->dram_ctl_select_low, 1162 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1263 dct_sel_baseaddr(pvt)); 1163 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1264 1164
1265 debugf0(" DCT mode: %s, All DCTs on: %s\n", 1165 debugf0(" DCTs operate in %s mode.\n",
1266 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), 1166 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1267 (dct_dram_enabled(pvt) ? "yes" : "no"));
1268 1167
1269 if (!dct_ganging_enabled(pvt)) 1168 if (!dct_ganging_enabled(pvt))
1270 debugf0(" Address range split per DCT: %s\n", 1169 debugf0(" Address range split per DCT: %s\n",
1271 (dct_high_range_enabled(pvt) ? "yes" : "no")); 1170 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1272 1171
1273 debugf0(" DCT data interleave for ECC: %s, " 1172 debugf0(" data interleave for ECC: %s, "
1274 "DRAM cleared since last warm reset: %s\n", 1173 "DRAM cleared since last warm reset: %s\n",
1275 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), 1174 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1276 (dct_memory_cleared(pvt) ? "yes" : "no")); 1175 (dct_memory_cleared(pvt) ? "yes" : "no"));
1277 1176
1278 debugf0(" DCT channel interleave: %s, " 1177 debugf0(" channel interleave: %s, "
1279 "DCT interleave bits selector: 0x%x\n", 1178 "interleave bits selector: 0x%x\n",
1280 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), 1179 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1281 dct_sel_interleave_addr(pvt)); 1180 dct_sel_interleave_addr(pvt));
1282 } 1181 }
1283 1182
1284 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH, 1183 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1285 &pvt->dram_ctl_select_high);
1286} 1184}
1287 1185
1288/* 1186/*
1289 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory 1187 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1290 * Interleaving Modes. 1188 * Interleaving Modes.
1291 */ 1189 */
1292static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1190static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1293 int hi_range_sel, u32 intlv_en) 1191 bool hi_range_sel, u8 intlv_en)
1294{ 1192{
1295 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; 1193 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1296 1194
1297 if (dct_ganging_enabled(pvt)) 1195 if (dct_ganging_enabled(pvt))
1298 cs = 0; 1196 return 0;
1299 else if (hi_range_sel)
1300 cs = dct_sel_high;
1301 else if (dct_interleave_enabled(pvt)) {
1302 /*
1303 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1304 */
1305 if (dct_sel_interleave_addr(pvt) == 0)
1306 cs = sys_addr >> 6 & 1;
1307 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1308 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1309 1197
1310 if (dct_sel_interleave_addr(pvt) & 1) 1198 if (hi_range_sel)
1311 cs = (sys_addr >> 9 & 1) ^ temp; 1199 return dct_sel_high;
1312 else
1313 cs = (sys_addr >> 6 & 1) ^ temp;
1314 } else if (intlv_en & 4)
1315 cs = sys_addr >> 15 & 1;
1316 else if (intlv_en & 2)
1317 cs = sys_addr >> 14 & 1;
1318 else if (intlv_en & 1)
1319 cs = sys_addr >> 13 & 1;
1320 else
1321 cs = sys_addr >> 12 & 1;
1322 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1323 cs = ~dct_sel_high & 1;
1324 else
1325 cs = 0;
1326 1200
1327 return cs; 1201 /*
1328} 1202 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1203 */
1204 if (dct_interleave_enabled(pvt)) {
1205 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1329 1206
1330static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) 1207 /* return DCT select function: 0=DCT0, 1=DCT1 */
1331{ 1208 if (!intlv_addr)
1332 if (intlv_en == 1) 1209 return sys_addr >> 6 & 1;
1333 return 1; 1210
1334 else if (intlv_en == 3) 1211 if (intlv_addr & 0x2) {
1335 return 2; 1212 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1336 else if (intlv_en == 7) 1213 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1337 return 3; 1214
1215 return ((sys_addr >> shift) & 1) ^ temp;
1216 }
1217
1218 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1219 }
1220
1221 if (dct_high_range_enabled(pvt))
1222 return ~dct_sel_high & 1;
1338 1223
1339 return 0; 1224 return 0;
1340} 1225}
1341 1226
1342/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ 1227/* Convert the sys_addr to the normalized DCT address */
1343static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, 1228static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1344 u32 dct_sel_base_addr, 1229 u64 sys_addr, bool hi_rng,
1345 u64 dct_sel_base_off, 1230 u32 dct_sel_base_addr)
1346 u32 hole_valid, u32 hole_off,
1347 u64 dram_base)
1348{ 1231{
1349 u64 chan_off; 1232 u64 chan_off;
1233 u64 dram_base = get_dram_base(pvt, range);
1234 u64 hole_off = f10_dhar_offset(pvt);
1235 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1350 1236
1351 if (hi_range_sel) { 1237 if (hi_rng) {
1352 if (!(dct_sel_base_addr & 0xFFFF0000) && 1238 /*
1353 hole_valid && (sys_addr >= 0x100000000ULL)) 1239 * if
1354 chan_off = hole_off << 16; 1240 * base address of high range is below 4Gb
1241 * (bits [47:27] at [31:11])
1242 * DRAM address space on this DCT is hoisted above 4Gb &&
1243 * sys_addr > 4Gb
1244 *
1245 * remove hole offset from sys_addr
1246 * else
1247 * remove high range offset from sys_addr
1248 */
1249 if ((!(dct_sel_base_addr >> 16) ||
1250 dct_sel_base_addr < dhar_base(pvt)) &&
1251 dhar_valid(pvt) &&
1252 (sys_addr >= BIT_64(32)))
1253 chan_off = hole_off;
1355 else 1254 else
1356 chan_off = dct_sel_base_off; 1255 chan_off = dct_sel_base_off;
1357 } else { 1256 } else {
1358 if (hole_valid && (sys_addr >= 0x100000000ULL)) 1257 /*
1359 chan_off = hole_off << 16; 1258 * if
1259 * we have a valid hole &&
1260 * sys_addr > 4Gb
1261 *
1262 * remove hole
1263 * else
1264 * remove dram base to normalize to DCT address
1265 */
1266 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1267 chan_off = hole_off;
1360 else 1268 else
1361 chan_off = dram_base & 0xFFFFF8000000ULL; 1269 chan_off = dram_base;
1362 } 1270 }
1363 1271
1364 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - 1272 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1365 (chan_off & 0x0000FFFFFF800000ULL);
1366} 1273}
1367 1274
1368/* Hack for the time being - Can we get this from BIOS?? */
1369#define CH0SPARE_RANK 0
1370#define CH1SPARE_RANK 1
1371
1372/* 1275/*
1373 * checks if the csrow passed in is marked as SPARED, if so returns the new 1276 * checks if the csrow passed in is marked as SPARED, if so returns the new
1374 * spare row 1277 * spare row
1375 */ 1278 */
1376static inline int f10_process_possible_spare(int csrow, 1279static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1377 u32 cs, struct amd64_pvt *pvt) 1280{
1378{ 1281 int tmp_cs;
1379 u32 swap_done; 1282
1380 u32 bad_dram_cs; 1283 if (online_spare_swap_done(pvt, dct) &&
1381 1284 csrow == online_spare_bad_dramcs(pvt, dct)) {
1382 /* Depending on channel, isolate respective SPARING info */ 1285
1383 if (cs) { 1286 for_each_chip_select(tmp_cs, dct, pvt) {
1384 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); 1287 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1385 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); 1288 csrow = tmp_cs;
1386 if (swap_done && (csrow == bad_dram_cs)) 1289 break;
1387 csrow = CH1SPARE_RANK; 1290 }
1388 } else { 1291 }
1389 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1390 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1391 if (swap_done && (csrow == bad_dram_cs))
1392 csrow = CH0SPARE_RANK;
1393 } 1292 }
1394 return csrow; 1293 return csrow;
1395} 1294}
@@ -1402,11 +1301,11 @@ static inline int f10_process_possible_spare(int csrow,
1402 * -EINVAL: NOT FOUND 1301 * -EINVAL: NOT FOUND
1403 * 0..csrow = Chip-Select Row 1302 * 0..csrow = Chip-Select Row
1404 */ 1303 */
1405static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) 1304static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1406{ 1305{
1407 struct mem_ctl_info *mci; 1306 struct mem_ctl_info *mci;
1408 struct amd64_pvt *pvt; 1307 struct amd64_pvt *pvt;
1409 u32 cs_base, cs_mask; 1308 u64 cs_base, cs_mask;
1410 int cs_found = -EINVAL; 1309 int cs_found = -EINVAL;
1411 int csrow; 1310 int csrow;
1412 1311
@@ -1416,39 +1315,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1416 1315
1417 pvt = mci->pvt_info; 1316 pvt = mci->pvt_info;
1418 1317
1419 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 1318 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1420
1421 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1422 1319
1423 cs_base = amd64_get_dct_base(pvt, cs, csrow); 1320 for_each_chip_select(csrow, dct, pvt) {
1424 if (!(cs_base & K8_DCSB_CS_ENABLE)) 1321 if (!csrow_enabled(csrow, dct, pvt))
1425 continue; 1322 continue;
1426 1323
1427 /* 1324 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1428 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1429 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1430 * of the actual address.
1431 */
1432 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1433
1434 /*
1435 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1436 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1437 */
1438 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1439 1325
1440 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", 1326 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1441 csrow, cs_base, cs_mask); 1327 csrow, cs_base, cs_mask);
1442 1328
1443 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; 1329 cs_mask = ~cs_mask;
1444 1330
1445 debugf1(" Final CSMask=0x%x\n", cs_mask); 1331 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1446 debugf1(" (InputAddr & ~CSMask)=0x%x " 1332 "(CSBase & ~CSMask)=0x%llx\n",
1447 "(CSBase & ~CSMask)=0x%x\n", 1333 (in_addr & cs_mask), (cs_base & cs_mask));
1448 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1449 1334
1450 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { 1335 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1451 cs_found = f10_process_possible_spare(csrow, cs, pvt); 1336 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1452 1337
1453 debugf1(" MATCH csrow=%d\n", cs_found); 1338 debugf1(" MATCH csrow=%d\n", cs_found);
1454 break; 1339 break;
@@ -1457,38 +1342,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1457 return cs_found; 1342 return cs_found;
1458} 1343}
1459 1344
1460/* For a given @dram_range, check if @sys_addr falls within it. */ 1345/*
1461static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, 1346 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1462 u64 sys_addr, int *nid, int *chan_sel) 1347 * swapped with a region located at the bottom of memory so that the GPU can use
1348 * the interleaved region and thus two channels.
1349 */
1350static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1463{ 1351{
1464 int node_id, cs_found = -EINVAL, high_range = 0; 1352 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1465 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1466 u32 hole_valid, tmp, dct_sel_base, channel;
1467 u64 dram_base, chan_addr, dct_sel_base_off;
1468 1353
1469 dram_base = pvt->dram_base[dram_range]; 1354 if (boot_cpu_data.x86 == 0x10) {
1470 intlv_en = pvt->dram_IntlvEn[dram_range]; 1355 /* only revC3 and revE have that feature */
1356 if (boot_cpu_data.x86_model < 4 ||
1357 (boot_cpu_data.x86_model < 0xa &&
1358 boot_cpu_data.x86_mask < 3))
1359 return sys_addr;
1360 }
1471 1361
1472 node_id = pvt->dram_DstNode[dram_range]; 1362 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1473 intlv_sel = pvt->dram_IntlvSel[dram_range];
1474 1363
1475 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", 1364 if (!(swap_reg & 0x1))
1476 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); 1365 return sys_addr;
1477 1366
1478 /* 1367 swap_base = (swap_reg >> 3) & 0x7f;
1479 * This assumes that one node's DHAR is the same as all the other 1368 swap_limit = (swap_reg >> 11) & 0x7f;
1480 * nodes' DHAR. 1369 rgn_size = (swap_reg >> 20) & 0x7f;
1481 */ 1370 tmp_addr = sys_addr >> 27;
1482 hole_off = (pvt->dhar & 0x0000FF80);
1483 hole_valid = (pvt->dhar & 0x1);
1484 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1485 1371
1486 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", 1372 if (!(sys_addr >> 34) &&
1487 hole_off, hole_valid, intlv_sel); 1373 (((tmp_addr >= swap_base) &&
1374 (tmp_addr <= swap_limit)) ||
1375 (tmp_addr < rgn_size)))
1376 return sys_addr ^ (u64)swap_base << 27;
1377
1378 return sys_addr;
1379}
1380
1381/* For a given @dram_range, check if @sys_addr falls within it. */
1382static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1383 u64 sys_addr, int *nid, int *chan_sel)
1384{
1385 int cs_found = -EINVAL;
1386 u64 chan_addr;
1387 u32 dct_sel_base;
1388 u8 channel;
1389 bool high_range = false;
1390
1391 u8 node_id = dram_dst_node(pvt, range);
1392 u8 intlv_en = dram_intlv_en(pvt, range);
1393 u32 intlv_sel = dram_intlv_sel(pvt, range);
1394
1395 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1396 range, sys_addr, get_dram_limit(pvt, range));
1397
1398 if (dhar_valid(pvt) &&
1399 dhar_base(pvt) <= sys_addr &&
1400 sys_addr < BIT_64(32)) {
1401 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1402 sys_addr);
1403 return -EINVAL;
1404 }
1488 1405
1489 if (intlv_en && 1406 if (intlv_en &&
1490 (intlv_sel != ((sys_addr >> 12) & intlv_en))) 1407 (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
1408 amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
1409 intlv_en, intlv_sel);
1491 return -EINVAL; 1410 return -EINVAL;
1411 }
1412
1413 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1492 1414
1493 dct_sel_base = dct_sel_baseaddr(pvt); 1415 dct_sel_base = dct_sel_baseaddr(pvt);
1494 1416
@@ -1499,38 +1421,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1499 if (dct_high_range_enabled(pvt) && 1421 if (dct_high_range_enabled(pvt) &&
1500 !dct_ganging_enabled(pvt) && 1422 !dct_ganging_enabled(pvt) &&
1501 ((sys_addr >> 27) >= (dct_sel_base >> 11))) 1423 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1502 high_range = 1; 1424 high_range = true;
1503
1504 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1505 1425
1506 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, 1426 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1507 dct_sel_base_off, hole_valid,
1508 hole_off, dram_base);
1509 1427
1510 intlv_shift = f10_map_intlv_en_to_shift(intlv_en); 1428 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1429 high_range, dct_sel_base);
1511 1430
1512 /* remove Node ID (in case of memory interleaving) */ 1431 /* Remove node interleaving, see F1x120 */
1513 tmp = chan_addr & 0xFC0; 1432 if (intlv_en)
1433 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1434 (chan_addr & 0xfff);
1514 1435
1515 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; 1436 /* remove channel interleave */
1516
1517 /* remove channel interleave and hash */
1518 if (dct_interleave_enabled(pvt) && 1437 if (dct_interleave_enabled(pvt) &&
1519 !dct_high_range_enabled(pvt) && 1438 !dct_high_range_enabled(pvt) &&
1520 !dct_ganging_enabled(pvt)) { 1439 !dct_ganging_enabled(pvt)) {
1521 if (dct_sel_interleave_addr(pvt) != 1) 1440
1522 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; 1441 if (dct_sel_interleave_addr(pvt) != 1) {
1523 else { 1442 if (dct_sel_interleave_addr(pvt) == 0x3)
1524 tmp = chan_addr & 0xFC0; 1443 /* hash 9 */
1525 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) 1444 chan_addr = ((chan_addr >> 10) << 9) |
1526 | tmp; 1445 (chan_addr & 0x1ff);
1527 } 1446 else
1447 /* A[6] or hash 6 */
1448 chan_addr = ((chan_addr >> 7) << 6) |
1449 (chan_addr & 0x3f);
1450 } else
1451 /* A[12] */
1452 chan_addr = ((chan_addr >> 13) << 12) |
1453 (chan_addr & 0xfff);
1528 } 1454 }
1529 1455
1530 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", 1456 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
1531 chan_addr, (u32)(chan_addr >> 8));
1532 1457
1533 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); 1458 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1534 1459
1535 if (cs_found >= 0) { 1460 if (cs_found >= 0) {
1536 *nid = node_id; 1461 *nid = node_id;
@@ -1539,23 +1464,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1539 return cs_found; 1464 return cs_found;
1540} 1465}
1541 1466
1542static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, 1467static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1543 int *node, int *chan_sel) 1468 int *node, int *chan_sel)
1544{ 1469{
1545 int dram_range, cs_found = -EINVAL; 1470 int cs_found = -EINVAL;
1546 u64 dram_base, dram_limit; 1471 unsigned range;
1547 1472
1548 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { 1473 for (range = 0; range < DRAM_RANGES; range++) {
1549 1474
1550 if (!pvt->dram_rw_en[dram_range]) 1475 if (!dram_rw(pvt, range))
1551 continue; 1476 continue;
1552 1477
1553 dram_base = pvt->dram_base[dram_range]; 1478 if ((get_dram_base(pvt, range) <= sys_addr) &&
1554 dram_limit = pvt->dram_limit[dram_range]; 1479 (get_dram_limit(pvt, range) >= sys_addr)) {
1555 1480
1556 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { 1481 cs_found = f1x_match_to_this_node(pvt, range,
1557
1558 cs_found = f10_match_to_this_node(pvt, dram_range,
1559 sys_addr, node, 1482 sys_addr, node,
1560 chan_sel); 1483 chan_sel);
1561 if (cs_found >= 0) 1484 if (cs_found >= 0)
@@ -1572,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1572 * The @sys_addr is usually an error address received from the hardware 1495 * The @sys_addr is usually an error address received from the hardware
1573 * (MCX_ADDR). 1496 * (MCX_ADDR).
1574 */ 1497 */
1575static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 1498static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1576 struct err_regs *err_info, 1499 u16 syndrome)
1577 u64 sys_addr)
1578{ 1500{
1579 struct amd64_pvt *pvt = mci->pvt_info; 1501 struct amd64_pvt *pvt = mci->pvt_info;
1580 u32 page, offset; 1502 u32 page, offset;
1581 int nid, csrow, chan = 0; 1503 int nid, csrow, chan = 0;
1582 u16 syndrome;
1583 1504
1584 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1505 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1585 1506
1586 if (csrow < 0) { 1507 if (csrow < 0) {
1587 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1508 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
@@ -1590,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1590 1511
1591 error_address_to_page_and_offset(sys_addr, &page, &offset); 1512 error_address_to_page_and_offset(sys_addr, &page, &offset);
1592 1513
1593 syndrome = extract_syndrome(err_info);
1594
1595 /* 1514 /*
1596 * We need the syndromes for channel detection only when we're 1515 * We need the syndromes for channel detection only when we're
1597 * ganged. Otherwise @chan should already contain the channel at 1516 * ganged. Otherwise @chan should already contain the channel at
1598 * this point. 1517 * this point.
1599 */ 1518 */
1600 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) 1519 if (dct_ganging_enabled(pvt))
1601 chan = get_channel_from_ecc_syndrome(mci, syndrome); 1520 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1602 1521
1603 if (chan >= 0) 1522 if (chan >= 0)
@@ -1614,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1614 1533
1615/* 1534/*
1616 * debug routine to display the memory sizes of all logical DIMMs and its 1535 * debug routine to display the memory sizes of all logical DIMMs and its
1617 * CSROWs as well 1536 * CSROWs
1618 */ 1537 */
1619static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) 1538static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1620{ 1539{
1621 int dimm, size0, size1, factor = 0; 1540 int dimm, size0, size1, factor = 0;
1622 u32 dbam; 1541 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1623 u32 *dcsb; 1542 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1624 1543
1625 if (boot_cpu_data.x86 == 0xf) { 1544 if (boot_cpu_data.x86 == 0xf) {
1626 if (pvt->dclr0 & F10_WIDTH_128) 1545 if (pvt->dclr0 & WIDTH_128)
1627 factor = 1; 1546 factor = 1;
1628 1547
1629 /* K8 families < revF not supported yet */ 1548 /* K8 families < revF not supported yet */
@@ -1634,7 +1553,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1634 } 1553 }
1635 1554
1636 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; 1555 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1637 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0; 1556 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1557 : pvt->csels[0].csbases;
1638 1558
1639 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); 1559 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1640 1560
@@ -1644,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1644 for (dimm = 0; dimm < 4; dimm++) { 1564 for (dimm = 0; dimm < 4; dimm++) {
1645 1565
1646 size0 = 0; 1566 size0 = 0;
1647 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) 1567 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1648 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1568 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1569 DBAM_DIMM(dimm, dbam));
1649 1570
1650 size1 = 0; 1571 size1 = 0;
1651 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) 1572 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1652 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1573 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1574 DBAM_DIMM(dimm, dbam));
1653 1575
1654 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 1576 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1655 dimm * 2, size0 << factor, 1577 dimm * 2, size0 << factor,
@@ -1664,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = {
1664 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, 1586 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1665 .ops = { 1587 .ops = {
1666 .early_channel_count = k8_early_channel_count, 1588 .early_channel_count = k8_early_channel_count,
1667 .get_error_address = k8_get_error_address,
1668 .read_dram_base_limit = k8_read_dram_base_limit,
1669 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1589 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1670 .dbam_to_cs = k8_dbam_to_chip_select, 1590 .dbam_to_cs = k8_dbam_to_chip_select,
1591 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1671 } 1592 }
1672 }, 1593 },
1673 [F10_CPUS] = { 1594 [F10_CPUS] = {
@@ -1675,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = {
1675 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, 1596 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1676 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, 1597 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1677 .ops = { 1598 .ops = {
1678 .early_channel_count = f10_early_channel_count, 1599 .early_channel_count = f1x_early_channel_count,
1679 .get_error_address = f10_get_error_address, 1600 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1680 .read_dram_base_limit = f10_read_dram_base_limit,
1681 .read_dram_ctl_register = f10_read_dram_ctl_register,
1682 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1683 .dbam_to_cs = f10_dbam_to_chip_select, 1601 .dbam_to_cs = f10_dbam_to_chip_select,
1602 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1603 }
1604 },
1605 [F15_CPUS] = {
1606 .ctl_name = "F15h",
1607 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1608 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1609 .ops = {
1610 .early_channel_count = f1x_early_channel_count,
1611 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1612 .dbam_to_cs = f15_dbam_to_chip_select,
1613 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1684 } 1614 }
1685 }, 1615 },
1686}; 1616};
@@ -1770,15 +1700,15 @@ static u16 x8_vectors[] = {
1770 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, 1700 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1771}; 1701};
1772 1702
1773static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, 1703static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1774 int v_dim) 1704 unsigned v_dim)
1775{ 1705{
1776 unsigned int i, err_sym; 1706 unsigned int i, err_sym;
1777 1707
1778 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { 1708 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1779 u16 s = syndrome; 1709 u16 s = syndrome;
1780 int v_idx = err_sym * v_dim; 1710 unsigned v_idx = err_sym * v_dim;
1781 int v_end = (err_sym + 1) * v_dim; 1711 unsigned v_end = (err_sym + 1) * v_dim;
1782 1712
1783 /* walk over all 16 bits of the syndrome */ 1713 /* walk over all 16 bits of the syndrome */
1784 for (i = 1; i < (1U << 16); i <<= 1) { 1714 for (i = 1; i < (1U << 16); i <<= 1) {
@@ -1850,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1850 struct amd64_pvt *pvt = mci->pvt_info; 1780 struct amd64_pvt *pvt = mci->pvt_info;
1851 int err_sym = -1; 1781 int err_sym = -1;
1852 1782
1853 if (pvt->syn_type == 8) 1783 if (pvt->ecc_sym_sz == 8)
1854 err_sym = decode_syndrome(syndrome, x8_vectors, 1784 err_sym = decode_syndrome(syndrome, x8_vectors,
1855 ARRAY_SIZE(x8_vectors), 1785 ARRAY_SIZE(x8_vectors),
1856 pvt->syn_type); 1786 pvt->ecc_sym_sz);
1857 else if (pvt->syn_type == 4) 1787 else if (pvt->ecc_sym_sz == 4)
1858 err_sym = decode_syndrome(syndrome, x4_vectors, 1788 err_sym = decode_syndrome(syndrome, x4_vectors,
1859 ARRAY_SIZE(x4_vectors), 1789 ARRAY_SIZE(x4_vectors),
1860 pvt->syn_type); 1790 pvt->ecc_sym_sz);
1861 else { 1791 else {
1862 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type); 1792 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1863 return err_sym; 1793 return err_sym;
1864 } 1794 }
1865 1795
1866 return map_err_sym_to_channel(err_sym, pvt->syn_type); 1796 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1867} 1797}
1868 1798
1869/* 1799/*
1870 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR 1800 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1871 * ADDRESS and process. 1801 * ADDRESS and process.
1872 */ 1802 */
1873static void amd64_handle_ce(struct mem_ctl_info *mci, 1803static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1874 struct err_regs *info)
1875{ 1804{
1876 struct amd64_pvt *pvt = mci->pvt_info; 1805 struct amd64_pvt *pvt = mci->pvt_info;
1877 u64 sys_addr; 1806 u64 sys_addr;
1807 u16 syndrome;
1878 1808
1879 /* Ensure that the Error Address is VALID */ 1809 /* Ensure that the Error Address is VALID */
1880 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { 1810 if (!(m->status & MCI_STATUS_ADDRV)) {
1881 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1811 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1882 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1812 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1883 return; 1813 return;
1884 } 1814 }
1885 1815
1886 sys_addr = pvt->ops->get_error_address(mci, info); 1816 sys_addr = get_error_address(m);
1817 syndrome = extract_syndrome(m->status);
1887 1818
1888 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); 1819 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1889 1820
1890 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); 1821 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1891} 1822}
1892 1823
1893/* Handle any Un-correctable Errors (UEs) */ 1824/* Handle any Un-correctable Errors (UEs) */
1894static void amd64_handle_ue(struct mem_ctl_info *mci, 1825static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1895 struct err_regs *info)
1896{ 1826{
1897 struct amd64_pvt *pvt = mci->pvt_info;
1898 struct mem_ctl_info *log_mci, *src_mci = NULL; 1827 struct mem_ctl_info *log_mci, *src_mci = NULL;
1899 int csrow; 1828 int csrow;
1900 u64 sys_addr; 1829 u64 sys_addr;
@@ -1902,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
1902 1831
1903 log_mci = mci; 1832 log_mci = mci;
1904 1833
1905 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { 1834 if (!(m->status & MCI_STATUS_ADDRV)) {
1906 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1835 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1907 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1836 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1908 return; 1837 return;
1909 } 1838 }
1910 1839
1911 sys_addr = pvt->ops->get_error_address(mci, info); 1840 sys_addr = get_error_address(m);
1912 1841
1913 /* 1842 /*
1914 * Find out which node the error address belongs to. This may be 1843 * Find out which node the error address belongs to. This may be
@@ -1936,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
1936} 1865}
1937 1866
1938static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, 1867static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1939 struct err_regs *info) 1868 struct mce *m)
1940{ 1869{
1941 u16 ec = EC(info->nbsl); 1870 u16 ec = EC(m->status);
1942 u8 xec = XEC(info->nbsl, 0x1f); 1871 u8 xec = XEC(m->status, 0x1f);
1943 int ecc_type = (info->nbsh >> 13) & 0x3; 1872 u8 ecc_type = (m->status >> 45) & 0x3;
1944 1873
1945 /* Bail early out if this was an 'observed' error */ 1874 /* Bail early out if this was an 'observed' error */
1946 if (PP(ec) == K8_NBSL_PP_OBS) 1875 if (PP(ec) == NBSL_PP_OBS)
1947 return; 1876 return;
1948 1877
1949 /* Do only ECC errors */ 1878 /* Do only ECC errors */
@@ -1951,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1951 return; 1880 return;
1952 1881
1953 if (ecc_type == 2) 1882 if (ecc_type == 2)
1954 amd64_handle_ce(mci, info); 1883 amd64_handle_ce(mci, m);
1955 else if (ecc_type == 1) 1884 else if (ecc_type == 1)
1956 amd64_handle_ue(mci, info); 1885 amd64_handle_ue(mci, m);
1957} 1886}
1958 1887
1959void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) 1888void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1960{ 1889{
1961 struct mem_ctl_info *mci = mcis[node_id]; 1890 struct mem_ctl_info *mci = mcis[node_id];
1962 struct err_regs regs;
1963
1964 regs.nbsl = (u32) m->status;
1965 regs.nbsh = (u32)(m->status >> 32);
1966 regs.nbeal = (u32) m->addr;
1967 regs.nbeah = (u32)(m->addr >> 32);
1968 regs.nbcfg = nbcfg;
1969
1970 __amd64_decode_bus_error(mci, &regs);
1971
1972 /*
1973 * Check the UE bit of the NB status high register, if set generate some
1974 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1975 * If it was a GART error, skip that process.
1976 *
1977 * FIXME: this should go somewhere else, if at all.
1978 */
1979 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
1980 edac_mc_handle_ue_no_info(mci, "UE bit is set");
1981 1891
1892 __amd64_decode_bus_error(mci, m);
1982} 1893}
1983 1894
1984/* 1895/*
@@ -2027,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2027 */ 1938 */
2028static void read_mc_regs(struct amd64_pvt *pvt) 1939static void read_mc_regs(struct amd64_pvt *pvt)
2029{ 1940{
1941 struct cpuinfo_x86 *c = &boot_cpu_data;
2030 u64 msr_val; 1942 u64 msr_val;
2031 u32 tmp; 1943 u32 tmp;
2032 int dram; 1944 unsigned range;
2033 1945
2034 /* 1946 /*
2035 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 1947 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -2046,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2046 } else 1958 } else
2047 debugf0(" TOP_MEM2 disabled.\n"); 1959 debugf0(" TOP_MEM2 disabled.\n");
2048 1960
2049 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap); 1961 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2050 1962
2051 if (pvt->ops->read_dram_ctl_register) 1963 read_dram_ctl_register(pvt);
2052 pvt->ops->read_dram_ctl_register(pvt);
2053 1964
2054 for (dram = 0; dram < DRAM_REG_COUNT; dram++) { 1965 for (range = 0; range < DRAM_RANGES; range++) {
2055 /* 1966 u8 rw;
2056 * Call CPU specific READ function to get the DRAM Base and
2057 * Limit values from the DCT.
2058 */
2059 pvt->ops->read_dram_base_limit(pvt, dram);
2060 1967
2061 /* 1968 /* read settings for this DRAM range */
2062 * Only print out debug info on rows with both R and W Enabled. 1969 read_dram_base_limit_regs(pvt, range);
2063 * Normal processing, compiler should optimize this whole 'if' 1970
2064 * debug output block away. 1971 rw = dram_rw(pvt, range);
2065 */ 1972 if (!rw)
2066 if (pvt->dram_rw_en[dram] != 0) { 1973 continue;
2067 debugf1(" DRAM-BASE[%d]: 0x%016llx " 1974
2068 "DRAM-LIMIT: 0x%016llx\n", 1975 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2069 dram, 1976 range,
2070 pvt->dram_base[dram], 1977 get_dram_base(pvt, range),
2071 pvt->dram_limit[dram]); 1978 get_dram_limit(pvt, range));
2072 1979
2073 debugf1(" IntlvEn=%s %s %s " 1980 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2074 "IntlvSel=%d DstNode=%d\n", 1981 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2075 pvt->dram_IntlvEn[dram] ? 1982 (rw & 0x1) ? "R" : "-",
2076 "Enabled" : "Disabled", 1983 (rw & 0x2) ? "W" : "-",
2077 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", 1984 dram_intlv_sel(pvt, range),
2078 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", 1985 dram_dst_node(pvt, range));
2079 pvt->dram_IntlvSel[dram],
2080 pvt->dram_DstNode[dram]);
2081 }
2082 } 1986 }
2083 1987
2084 amd64_read_dct_base_mask(pvt); 1988 read_dct_base_mask(pvt);
2085 1989
2086 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar); 1990 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2087 amd64_read_dbam_reg(pvt); 1991 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2088 1992
2089 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); 1993 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2090 1994
2091 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); 1995 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2092 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0); 1996 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2093 1997
2094 if (boot_cpu_data.x86 >= 0x10) { 1998 if (!dct_ganging_enabled(pvt)) {
2095 if (!dct_ganging_enabled(pvt)) { 1999 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2096 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1); 2000 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2097 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
2098 }
2099 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2100 } 2001 }
2101 2002
2102 if (boot_cpu_data.x86 == 0x10 && 2003 pvt->ecc_sym_sz = 4;
2103 boot_cpu_data.x86_model > 7 &&
2104 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2105 tmp & BIT(25))
2106 pvt->syn_type = 8;
2107 else
2108 pvt->syn_type = 4;
2109 2004
2110 amd64_dump_misc_regs(pvt); 2005 if (c->x86 >= 0x10) {
2006 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2007 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2008
2009 /* F10h, revD and later can do x8 ECC too */
2010 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2011 pvt->ecc_sym_sz = 8;
2012 }
2013 dump_misc_regs(pvt);
2111} 2014}
2112 2015
2113/* 2016/*
2114 * NOTE: CPU Revision Dependent code 2017 * NOTE: CPU Revision Dependent code
2115 * 2018 *
2116 * Input: 2019 * Input:
2117 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) 2020 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2118 * k8 private pointer to --> 2021 * k8 private pointer to -->
2119 * DRAM Bank Address mapping register 2022 * DRAM Bank Address mapping register
2120 * node_id 2023 * node_id
@@ -2144,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2144 * encompasses 2047 * encompasses
2145 * 2048 *
2146 */ 2049 */
2147static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) 2050static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2148{ 2051{
2149 u32 cs_mode, nr_pages; 2052 u32 cs_mode, nr_pages;
2150 2053
@@ -2157,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2157 */ 2060 */
2158 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; 2061 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2159 2062
2160 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); 2063 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2161 2064
2162 /* 2065 /*
2163 * If dual channel then double the memory size of single channel. 2066 * If dual channel then double the memory size of single channel.
@@ -2180,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci)
2180{ 2083{
2181 struct csrow_info *csrow; 2084 struct csrow_info *csrow;
2182 struct amd64_pvt *pvt = mci->pvt_info; 2085 struct amd64_pvt *pvt = mci->pvt_info;
2183 u64 input_addr_min, input_addr_max, sys_addr; 2086 u64 input_addr_min, input_addr_max, sys_addr, base, mask;
2184 u32 val; 2087 u32 val;
2185 int i, empty = 1; 2088 int i, empty = 1;
2186 2089
2187 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val); 2090 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2188 2091
2189 pvt->nbcfg = val; 2092 pvt->nbcfg = val;
2190 pvt->ctl_error_info.nbcfg = val;
2191 2093
2192 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2094 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2193 pvt->mc_node_id, val, 2095 pvt->mc_node_id, val,
2194 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE)); 2096 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2195 2097
2196 for (i = 0; i < pvt->cs_count; i++) { 2098 for_each_chip_select(i, 0, pvt) {
2197 csrow = &mci->csrows[i]; 2099 csrow = &mci->csrows[i];
2198 2100
2199 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 2101 if (!csrow_enabled(i, 0, pvt)) {
2200 debugf1("----CSROW %d EMPTY for node %d\n", i, 2102 debugf1("----CSROW %d EMPTY for node %d\n", i,
2201 pvt->mc_node_id); 2103 pvt->mc_node_id);
2202 continue; 2104 continue;
@@ -2206,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci)
2206 i, pvt->mc_node_id); 2108 i, pvt->mc_node_id);
2207 2109
2208 empty = 0; 2110 empty = 0;
2209 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); 2111 csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2210 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); 2112 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2211 sys_addr = input_addr_to_sys_addr(mci, input_addr_min); 2113 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2212 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); 2114 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2213 sys_addr = input_addr_to_sys_addr(mci, input_addr_max); 2115 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2214 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); 2116 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2215 csrow->page_mask = ~mask_from_dct_mask(pvt, i); 2117
2118 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2119 csrow->page_mask = ~mask;
2216 /* 8 bytes of resolution */ 2120 /* 8 bytes of resolution */
2217 2121
2218 csrow->mtype = amd64_determine_memory_type(pvt, i); 2122 csrow->mtype = amd64_determine_memory_type(pvt, i);
@@ -2231,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci)
2231 /* 2135 /*
2232 * determine whether CHIPKILL or JUST ECC or NO ECC is operating 2136 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2233 */ 2137 */
2234 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) 2138 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2235 csrow->edac_mode = 2139 csrow->edac_mode =
2236 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? 2140 (pvt->nbcfg & NBCFG_CHIPKILL) ?
2237 EDAC_S4ECD4ED : EDAC_SECDED; 2141 EDAC_S4ECD4ED : EDAC_SECDED;
2238 else 2142 else
2239 csrow->edac_mode = EDAC_NONE; 2143 csrow->edac_mode = EDAC_NONE;
@@ -2243,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci)
2243} 2147}
2244 2148
2245/* get all cores on this DCT */ 2149/* get all cores on this DCT */
2246static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) 2150static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2247{ 2151{
2248 int cpu; 2152 int cpu;
2249 2153
@@ -2253,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2253} 2157}
2254 2158
2255/* check MCG_CTL on all the cpus on this node */ 2159/* check MCG_CTL on all the cpus on this node */
2256static bool amd64_nb_mce_bank_enabled_on_node(int nid) 2160static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2257{ 2161{
2258 cpumask_var_t mask; 2162 cpumask_var_t mask;
2259 int cpu, nbe; 2163 int cpu, nbe;
@@ -2270,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2270 2174
2271 for_each_cpu(cpu, mask) { 2175 for_each_cpu(cpu, mask) {
2272 struct msr *reg = per_cpu_ptr(msrs, cpu); 2176 struct msr *reg = per_cpu_ptr(msrs, cpu);
2273 nbe = reg->l & K8_MSR_MCGCTL_NBE; 2177 nbe = reg->l & MSR_MCGCTL_NBE;
2274 2178
2275 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2179 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2276 cpu, reg->q, 2180 cpu, reg->q,
@@ -2305,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2305 struct msr *reg = per_cpu_ptr(msrs, cpu); 2209 struct msr *reg = per_cpu_ptr(msrs, cpu);
2306 2210
2307 if (on) { 2211 if (on) {
2308 if (reg->l & K8_MSR_MCGCTL_NBE) 2212 if (reg->l & MSR_MCGCTL_NBE)
2309 s->flags.nb_mce_enable = 1; 2213 s->flags.nb_mce_enable = 1;
2310 2214
2311 reg->l |= K8_MSR_MCGCTL_NBE; 2215 reg->l |= MSR_MCGCTL_NBE;
2312 } else { 2216 } else {
2313 /* 2217 /*
2314 * Turn off NB MCE reporting only when it was off before 2218 * Turn off NB MCE reporting only when it was off before
2315 */ 2219 */
2316 if (!s->flags.nb_mce_enable) 2220 if (!s->flags.nb_mce_enable)
2317 reg->l &= ~K8_MSR_MCGCTL_NBE; 2221 reg->l &= ~MSR_MCGCTL_NBE;
2318 } 2222 }
2319 } 2223 }
2320 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2224 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2328,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2328 struct pci_dev *F3) 2232 struct pci_dev *F3)
2329{ 2233{
2330 bool ret = true; 2234 bool ret = true;
2331 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2235 u32 value, mask = 0x3; /* UECC/CECC enable */
2332 2236
2333 if (toggle_ecc_err_reporting(s, nid, ON)) { 2237 if (toggle_ecc_err_reporting(s, nid, ON)) {
2334 amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); 2238 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2335 return false; 2239 return false;
2336 } 2240 }
2337 2241
2338 amd64_read_pci_cfg(F3, K8_NBCTL, &value); 2242 amd64_read_pci_cfg(F3, NBCTL, &value);
2339 2243
2340 /* turn on UECCEn and CECCEn bits */
2341 s->old_nbctl = value & mask; 2244 s->old_nbctl = value & mask;
2342 s->nbctl_valid = true; 2245 s->nbctl_valid = true;
2343 2246
2344 value |= mask; 2247 value |= mask;
2345 pci_write_config_dword(F3, K8_NBCTL, value); 2248 amd64_write_pci_cfg(F3, NBCTL, value);
2346 2249
2347 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2250 amd64_read_pci_cfg(F3, NBCFG, &value);
2348 2251
2349 debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2252 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2350 nid, value, 2253 nid, value, !!(value & NBCFG_ECC_ENABLE));
2351 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2352 2254
2353 if (!(value & K8_NBCFG_ECC_ENABLE)) { 2255 if (!(value & NBCFG_ECC_ENABLE)) {
2354 amd64_warn("DRAM ECC disabled on this node, enabling...\n"); 2256 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2355 2257
2356 s->flags.nb_ecc_prev = 0; 2258 s->flags.nb_ecc_prev = 0;
2357 2259
2358 /* Attempt to turn on DRAM ECC Enable */ 2260 /* Attempt to turn on DRAM ECC Enable */
2359 value |= K8_NBCFG_ECC_ENABLE; 2261 value |= NBCFG_ECC_ENABLE;
2360 pci_write_config_dword(F3, K8_NBCFG, value); 2262 amd64_write_pci_cfg(F3, NBCFG, value);
2361 2263
2362 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2264 amd64_read_pci_cfg(F3, NBCFG, &value);
2363 2265
2364 if (!(value & K8_NBCFG_ECC_ENABLE)) { 2266 if (!(value & NBCFG_ECC_ENABLE)) {
2365 amd64_warn("Hardware rejected DRAM ECC enable," 2267 amd64_warn("Hardware rejected DRAM ECC enable,"
2366 "check memory DIMM configuration.\n"); 2268 "check memory DIMM configuration.\n");
2367 ret = false; 2269 ret = false;
@@ -2372,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2372 s->flags.nb_ecc_prev = 1; 2274 s->flags.nb_ecc_prev = 1;
2373 } 2275 }
2374 2276
2375 debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2277 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2376 nid, value, 2278 nid, value, !!(value & NBCFG_ECC_ENABLE));
2377 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2378 2279
2379 return ret; 2280 return ret;
2380} 2281}
@@ -2382,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2382static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, 2283static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2383 struct pci_dev *F3) 2284 struct pci_dev *F3)
2384{ 2285{
2385 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2286 u32 value, mask = 0x3; /* UECC/CECC enable */
2287
2386 2288
2387 if (!s->nbctl_valid) 2289 if (!s->nbctl_valid)
2388 return; 2290 return;
2389 2291
2390 amd64_read_pci_cfg(F3, K8_NBCTL, &value); 2292 amd64_read_pci_cfg(F3, NBCTL, &value);
2391 value &= ~mask; 2293 value &= ~mask;
2392 value |= s->old_nbctl; 2294 value |= s->old_nbctl;
2393 2295
2394 pci_write_config_dword(F3, K8_NBCTL, value); 2296 amd64_write_pci_cfg(F3, NBCTL, value);
2395 2297
2396 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ 2298 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2397 if (!s->flags.nb_ecc_prev) { 2299 if (!s->flags.nb_ecc_prev) {
2398 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2300 amd64_read_pci_cfg(F3, NBCFG, &value);
2399 value &= ~K8_NBCFG_ECC_ENABLE; 2301 value &= ~NBCFG_ECC_ENABLE;
2400 pci_write_config_dword(F3, K8_NBCFG, value); 2302 amd64_write_pci_cfg(F3, NBCFG, value);
2401 } 2303 }
2402 2304
2403 /* restore the NB Enable MCGCTL bit */ 2305 /* restore the NB Enable MCGCTL bit */
@@ -2423,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2423 u8 ecc_en = 0; 2325 u8 ecc_en = 0;
2424 bool nb_mce_en = false; 2326 bool nb_mce_en = false;
2425 2327
2426 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2328 amd64_read_pci_cfg(F3, NBCFG, &value);
2427 2329
2428 ecc_en = !!(value & K8_NBCFG_ECC_ENABLE); 2330 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2429 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); 2331 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2430 2332
2431 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); 2333 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
@@ -2463,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2463 mci->mc_driver_sysfs_attributes = sysfs_attrs; 2365 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2464} 2366}
2465 2367
2466static void setup_mci_misc_attrs(struct mem_ctl_info *mci) 2368static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2369 struct amd64_family_type *fam)
2467{ 2370{
2468 struct amd64_pvt *pvt = mci->pvt_info; 2371 struct amd64_pvt *pvt = mci->pvt_info;
2469 2372
2470 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; 2373 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2471 mci->edac_ctl_cap = EDAC_FLAG_NONE; 2374 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2472 2375
2473 if (pvt->nbcap & K8_NBCAP_SECDED) 2376 if (pvt->nbcap & NBCAP_SECDED)
2474 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 2377 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2475 2378
2476 if (pvt->nbcap & K8_NBCAP_CHIPKILL) 2379 if (pvt->nbcap & NBCAP_CHIPKILL)
2477 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 2380 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2478 2381
2479 mci->edac_cap = amd64_determine_edac_cap(pvt); 2382 mci->edac_cap = amd64_determine_edac_cap(pvt);
2480 mci->mod_name = EDAC_MOD_STR; 2383 mci->mod_name = EDAC_MOD_STR;
2481 mci->mod_ver = EDAC_AMD64_VERSION; 2384 mci->mod_ver = EDAC_AMD64_VERSION;
2482 mci->ctl_name = pvt->ctl_name; 2385 mci->ctl_name = fam->ctl_name;
2483 mci->dev_name = pci_name(pvt->F2); 2386 mci->dev_name = pci_name(pvt->F2);
2484 mci->ctl_page_to_phys = NULL; 2387 mci->ctl_page_to_phys = NULL;
2485 2388
@@ -2500,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2500 case 0xf: 2403 case 0xf:
2501 fam_type = &amd64_family_types[K8_CPUS]; 2404 fam_type = &amd64_family_types[K8_CPUS];
2502 pvt->ops = &amd64_family_types[K8_CPUS].ops; 2405 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2503 pvt->ctl_name = fam_type->ctl_name;
2504 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2505 break; 2406 break;
2407
2506 case 0x10: 2408 case 0x10:
2507 fam_type = &amd64_family_types[F10_CPUS]; 2409 fam_type = &amd64_family_types[F10_CPUS];
2508 pvt->ops = &amd64_family_types[F10_CPUS].ops; 2410 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2509 pvt->ctl_name = fam_type->ctl_name; 2411 break;
2510 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS; 2412
2413 case 0x15:
2414 fam_type = &amd64_family_types[F15_CPUS];
2415 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2511 break; 2416 break;
2512 2417
2513 default: 2418 default:
@@ -2517,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2517 2422
2518 pvt->ext_model = boot_cpu_data.x86_model >> 4; 2423 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2519 2424
2520 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name, 2425 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2521 (fam == 0xf ? 2426 (fam == 0xf ?
2522 (pvt->ext_model >= K8_REV_F ? "revF or later " 2427 (pvt->ext_model >= K8_REV_F ? "revF or later "
2523 : "revE or earlier ") 2428 : "revE or earlier ")
@@ -2564,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2564 goto err_siblings; 2469 goto err_siblings;
2565 2470
2566 ret = -ENOMEM; 2471 ret = -ENOMEM;
2567 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid); 2472 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
2568 if (!mci) 2473 if (!mci)
2569 goto err_siblings; 2474 goto err_siblings;
2570 2475
2571 mci->pvt_info = pvt; 2476 mci->pvt_info = pvt;
2572 mci->dev = &pvt->F2->dev; 2477 mci->dev = &pvt->F2->dev;
2573 2478
2574 setup_mci_misc_attrs(mci); 2479 setup_mci_misc_attrs(mci, fam_type);
2575 2480
2576 if (init_csrows(mci)) 2481 if (init_csrows(mci))
2577 mci->edac_cap = EDAC_FLAG_NONE; 2482 mci->edac_cap = EDAC_FLAG_NONE;
@@ -2714,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2714 .class = 0, 2619 .class = 0,
2715 .class_mask = 0, 2620 .class_mask = 0,
2716 }, 2621 },
2622 {
2623 .vendor = PCI_VENDOR_ID_AMD,
2624 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2625 .subvendor = PCI_ANY_ID,
2626 .subdevice = PCI_ANY_ID,
2627 .class = 0,
2628 .class_mask = 0,
2629 },
2630
2717 {0, } 2631 {0, }
2718}; 2632};
2719MODULE_DEVICE_TABLE(pci, amd64_pci_table); 2633MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2754,7 +2668,7 @@ static int __init amd64_edac_init(void)
2754{ 2668{
2755 int err = -ENODEV; 2669 int err = -ENODEV;
2756 2670
2757 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); 2671 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2758 2672
2759 opstate_init(); 2673 opstate_init();
2760 2674