diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 20:21:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 20:21:32 -0400 |
commit | 978ca164bd9f30bd51f71dad86d8c3797f7add76 (patch) | |
tree | e7cbd50aa6b2709ea27a59bc2adafe2ff27e8a33 /drivers/edac | |
parent | 02e4c627d862427653fc088ce299746ea7d85600 (diff) | |
parent | d34a6ecd45c1362d388af8d83ed329c609d1712b (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: (38 commits)
amd64_edac: Fix decode_syndrome types
amd64_edac: Fix DCT argument type
amd64_edac: Fix ranges signedness
amd64_edac: Drop local variable
amd64_edac: Fix PCI config addressing types
amd64_edac: Fix DRAM base macros
amd64_edac: Fix node id signedness
amd64_edac: Drop redundant declarations
amd64_edac: Enable driver on F15h
amd64_edac: Adjust ECC symbol size to F15h
amd64_edac: Simplify scrubrate setting
PCI: Rename CPU PCI id define
amd64_edac: Improve DRAM address mapping
amd64_edac: Sanitize ->read_dram_ctl_register
amd64_edac: Adjust sys_addr to chip select conversion routine to F15h
amd64_edac: Beef up early exit reporting
amd64_edac: Revamp online spare handling
amd64_edac: Fix channel interleave removal
amd64_edac: Correct node interleaving removal
amd64_edac: Add support for interleaved region swapping
...
Fix up trivial conflict in include/linux/pci_ids.h due to
AMD_15H_NB_MISC being renamed as AMD_15H_NB_F3 next to the new
AMD_15H_NB_LINK entry.
Diffstat (limited to 'drivers/edac')
-rw-r--r-- | drivers/edac/amd64_edac.c | 1442 | ||||
-rw-r--r-- | drivers/edac/amd64_edac.h | 369 | ||||
-rw-r--r-- | drivers/edac/amd64_edac_inj.c | 8 | ||||
-rw-r--r-- | drivers/edac/edac_mc_sysfs.c | 26 | ||||
-rw-r--r-- | drivers/edac/mce_amd.c | 8 | ||||
-rw-r--r-- | drivers/edac/mce_amd.h | 28 |
6 files changed, 840 insertions, 1041 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 23e03554f0d3..0be30e978c85 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis; | |||
25 | static struct ecc_settings **ecc_stngs; | 25 | static struct ecc_settings **ecc_stngs; |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and | ||
29 | * later. | ||
30 | */ | ||
31 | static int ddr2_dbam_revCG[] = { | ||
32 | [0] = 32, | ||
33 | [1] = 64, | ||
34 | [2] = 128, | ||
35 | [3] = 256, | ||
36 | [4] = 512, | ||
37 | [5] = 1024, | ||
38 | [6] = 2048, | ||
39 | }; | ||
40 | |||
41 | static int ddr2_dbam_revD[] = { | ||
42 | [0] = 32, | ||
43 | [1] = 64, | ||
44 | [2 ... 3] = 128, | ||
45 | [4] = 256, | ||
46 | [5] = 512, | ||
47 | [6] = 256, | ||
48 | [7] = 512, | ||
49 | [8 ... 9] = 1024, | ||
50 | [10] = 2048, | ||
51 | }; | ||
52 | |||
53 | static int ddr2_dbam[] = { [0] = 128, | ||
54 | [1] = 256, | ||
55 | [2 ... 4] = 512, | ||
56 | [5 ... 6] = 1024, | ||
57 | [7 ... 8] = 2048, | ||
58 | [9 ... 10] = 4096, | ||
59 | [11] = 8192, | ||
60 | }; | ||
61 | |||
62 | static int ddr3_dbam[] = { [0] = -1, | ||
63 | [1] = 256, | ||
64 | [2] = 512, | ||
65 | [3 ... 4] = -1, | ||
66 | [5 ... 6] = 1024, | ||
67 | [7 ... 8] = 2048, | ||
68 | [9 ... 10] = 4096, | ||
69 | [11] = 8192, | ||
70 | }; | ||
71 | |||
72 | /* | ||
73 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing | 28 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing |
74 | * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- | 29 | * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- |
75 | * or higher value'. | 30 | * or higher value'. |
76 | * | 31 | * |
77 | *FIXME: Produce a better mapping/linearisation. | 32 | *FIXME: Produce a better mapping/linearisation. |
78 | */ | 33 | */ |
79 | |||
80 | |||
81 | struct scrubrate { | 34 | struct scrubrate { |
82 | u32 scrubval; /* bit pattern for scrub rate */ | 35 | u32 scrubval; /* bit pattern for scrub rate */ |
83 | u32 bandwidth; /* bandwidth consumed (bytes/sec) */ | 36 | u32 bandwidth; /* bandwidth consumed (bytes/sec) */ |
@@ -107,6 +60,79 @@ struct scrubrate { | |||
107 | { 0x00, 0UL}, /* scrubbing off */ | 60 | { 0x00, 0UL}, /* scrubbing off */ |
108 | }; | 61 | }; |
109 | 62 | ||
63 | static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
64 | u32 *val, const char *func) | ||
65 | { | ||
66 | int err = 0; | ||
67 | |||
68 | err = pci_read_config_dword(pdev, offset, val); | ||
69 | if (err) | ||
70 | amd64_warn("%s: error reading F%dx%03x.\n", | ||
71 | func, PCI_FUNC(pdev->devfn), offset); | ||
72 | |||
73 | return err; | ||
74 | } | ||
75 | |||
76 | int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
77 | u32 val, const char *func) | ||
78 | { | ||
79 | int err = 0; | ||
80 | |||
81 | err = pci_write_config_dword(pdev, offset, val); | ||
82 | if (err) | ||
83 | amd64_warn("%s: error writing to F%dx%03x.\n", | ||
84 | func, PCI_FUNC(pdev->devfn), offset); | ||
85 | |||
86 | return err; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * | ||
91 | * Depending on the family, F2 DCT reads need special handling: | ||
92 | * | ||
93 | * K8: has a single DCT only | ||
94 | * | ||
95 | * F10h: each DCT has its own set of regs | ||
96 | * DCT0 -> F2x040.. | ||
97 | * DCT1 -> F2x140.. | ||
98 | * | ||
99 | * F15h: we select which DCT we access using F1x10C[DctCfgSel] | ||
100 | * | ||
101 | */ | ||
102 | static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
103 | const char *func) | ||
104 | { | ||
105 | if (addr >= 0x100) | ||
106 | return -EINVAL; | ||
107 | |||
108 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
109 | } | ||
110 | |||
111 | static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
112 | const char *func) | ||
113 | { | ||
114 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
115 | } | ||
116 | |||
117 | static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
118 | const char *func) | ||
119 | { | ||
120 | u32 reg = 0; | ||
121 | u8 dct = 0; | ||
122 | |||
123 | if (addr >= 0x140 && addr <= 0x1a0) { | ||
124 | dct = 1; | ||
125 | addr -= 0x100; | ||
126 | } | ||
127 | |||
128 | amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); | ||
129 | reg &= 0xfffffffe; | ||
130 | reg |= dct; | ||
131 | amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); | ||
132 | |||
133 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
134 | } | ||
135 | |||
110 | /* | 136 | /* |
111 | * Memory scrubber control interface. For K8, memory scrubbing is handled by | 137 | * Memory scrubber control interface. For K8, memory scrubbing is handled by |
112 | * hardware and can involve L2 cache, dcache as well as the main memory. With | 138 | * hardware and can involve L2 cache, dcache as well as the main memory. With |
@@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) | |||
156 | 182 | ||
157 | scrubval = scrubrates[i].scrubval; | 183 | scrubval = scrubrates[i].scrubval; |
158 | 184 | ||
159 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); | 185 | pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F); |
160 | 186 | ||
161 | if (scrubval) | 187 | if (scrubval) |
162 | return scrubrates[i].bandwidth; | 188 | return scrubrates[i].bandwidth; |
@@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) | |||
167 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) | 193 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) |
168 | { | 194 | { |
169 | struct amd64_pvt *pvt = mci->pvt_info; | 195 | struct amd64_pvt *pvt = mci->pvt_info; |
196 | u32 min_scrubrate = 0x5; | ||
197 | |||
198 | if (boot_cpu_data.x86 == 0xf) | ||
199 | min_scrubrate = 0x0; | ||
170 | 200 | ||
171 | return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate); | 201 | return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); |
172 | } | 202 | } |
173 | 203 | ||
174 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci) | 204 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci) |
@@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) | |||
177 | u32 scrubval = 0; | 207 | u32 scrubval = 0; |
178 | int i, retval = -EINVAL; | 208 | int i, retval = -EINVAL; |
179 | 209 | ||
180 | amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); | 210 | amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); |
181 | 211 | ||
182 | scrubval = scrubval & 0x001F; | 212 | scrubval = scrubval & 0x001F; |
183 | 213 | ||
@@ -192,63 +222,14 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) | |||
192 | return retval; | 222 | return retval; |
193 | } | 223 | } |
194 | 224 | ||
195 | /* Map from a CSROW entry to the mask entry that operates on it */ | ||
196 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | ||
197 | { | ||
198 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) | ||
199 | return csrow; | ||
200 | else | ||
201 | return csrow >> 1; | ||
202 | } | ||
203 | |||
204 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | ||
205 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) | ||
206 | { | ||
207 | if (dct == 0) | ||
208 | return pvt->dcsb0[csrow]; | ||
209 | else | ||
210 | return pvt->dcsb1[csrow]; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Return the 'mask' address the i'th CS entry. This function is needed because | ||
215 | * there number of DCSM registers on Rev E and prior vs Rev F and later is | ||
216 | * different. | ||
217 | */ | ||
218 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) | ||
219 | { | ||
220 | if (dct == 0) | ||
221 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; | ||
222 | else | ||
223 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; | ||
224 | } | ||
225 | |||
226 | |||
227 | /* | 225 | /* |
228 | * In *base and *limit, pass back the full 40-bit base and limit physical | 226 | * returns true if the SysAddr given by sys_addr matches the |
229 | * addresses for the node given by node_id. This information is obtained from | 227 | * DRAM base/limit associated with node_id |
230 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The | ||
231 | * base and limit addresses are of type SysAddr, as defined at the start of | ||
232 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses | ||
233 | * in the address range they represent. | ||
234 | */ | 228 | */ |
235 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, | 229 | static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, |
236 | u64 *base, u64 *limit) | 230 | unsigned nid) |
237 | { | 231 | { |
238 | *base = pvt->dram_base[node_id]; | 232 | u64 addr; |
239 | *limit = pvt->dram_limit[node_id]; | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated | ||
244 | * with node_id | ||
245 | */ | ||
246 | static int amd64_base_limit_match(struct amd64_pvt *pvt, | ||
247 | u64 sys_addr, int node_id) | ||
248 | { | ||
249 | u64 base, limit, addr; | ||
250 | |||
251 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); | ||
252 | 233 | ||
253 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be | 234 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be |
254 | * all ones if the most significant implemented address bit is 1. | 235 | * all ones if the most significant implemented address bit is 1. |
@@ -258,7 +239,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt, | |||
258 | */ | 239 | */ |
259 | addr = sys_addr & 0x000000ffffffffffull; | 240 | addr = sys_addr & 0x000000ffffffffffull; |
260 | 241 | ||
261 | return (addr >= base) && (addr <= limit); | 242 | return ((addr >= get_dram_base(pvt, nid)) && |
243 | (addr <= get_dram_limit(pvt, nid))); | ||
262 | } | 244 | } |
263 | 245 | ||
264 | /* | 246 | /* |
@@ -271,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
271 | u64 sys_addr) | 253 | u64 sys_addr) |
272 | { | 254 | { |
273 | struct amd64_pvt *pvt; | 255 | struct amd64_pvt *pvt; |
274 | int node_id; | 256 | unsigned node_id; |
275 | u32 intlv_en, bits; | 257 | u32 intlv_en, bits; |
276 | 258 | ||
277 | /* | 259 | /* |
@@ -285,10 +267,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
285 | * registers. Therefore we arbitrarily choose to read it from the | 267 | * registers. Therefore we arbitrarily choose to read it from the |
286 | * register for node 0. | 268 | * register for node 0. |
287 | */ | 269 | */ |
288 | intlv_en = pvt->dram_IntlvEn[0]; | 270 | intlv_en = dram_intlv_en(pvt, 0); |
289 | 271 | ||
290 | if (intlv_en == 0) { | 272 | if (intlv_en == 0) { |
291 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { | 273 | for (node_id = 0; node_id < DRAM_RANGES; node_id++) { |
292 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) | 274 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
293 | goto found; | 275 | goto found; |
294 | } | 276 | } |
@@ -305,10 +287,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
305 | bits = (((u32) sys_addr) >> 12) & intlv_en; | 287 | bits = (((u32) sys_addr) >> 12) & intlv_en; |
306 | 288 | ||
307 | for (node_id = 0; ; ) { | 289 | for (node_id = 0; ; ) { |
308 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) | 290 | if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits) |
309 | break; /* intlv_sel field matches */ | 291 | break; /* intlv_sel field matches */ |
310 | 292 | ||
311 | if (++node_id >= DRAM_REG_COUNT) | 293 | if (++node_id >= DRAM_RANGES) |
312 | goto err_no_match; | 294 | goto err_no_match; |
313 | } | 295 | } |
314 | 296 | ||
@@ -321,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
321 | } | 303 | } |
322 | 304 | ||
323 | found: | 305 | found: |
324 | return edac_mc_find(node_id); | 306 | return edac_mc_find((int)node_id); |
325 | 307 | ||
326 | err_no_match: | 308 | err_no_match: |
327 | debugf2("sys_addr 0x%lx doesn't match any node\n", | 309 | debugf2("sys_addr 0x%lx doesn't match any node\n", |
@@ -331,37 +313,50 @@ err_no_match: | |||
331 | } | 313 | } |
332 | 314 | ||
333 | /* | 315 | /* |
334 | * Extract the DRAM CS base address from selected csrow register. | 316 | * compute the CS base address of the @csrow on the DRAM controller @dct. |
317 | * For details see F2x[5C:40] in the processor's BKDG | ||
335 | */ | 318 | */ |
336 | static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) | 319 | static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, |
320 | u64 *base, u64 *mask) | ||
337 | { | 321 | { |
338 | return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << | 322 | u64 csbase, csmask, base_bits, mask_bits; |
339 | pvt->dcs_shift; | 323 | u8 addr_shift; |
340 | } | ||
341 | 324 | ||
342 | /* | 325 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
343 | * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. | 326 | csbase = pvt->csels[dct].csbases[csrow]; |
344 | */ | 327 | csmask = pvt->csels[dct].csmasks[csrow]; |
345 | static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) | 328 | base_bits = GENMASK(21, 31) | GENMASK(9, 15); |
346 | { | 329 | mask_bits = GENMASK(21, 29) | GENMASK(9, 15); |
347 | u64 dcsm_bits, other_bits; | 330 | addr_shift = 4; |
348 | u64 mask; | 331 | } else { |
349 | 332 | csbase = pvt->csels[dct].csbases[csrow]; | |
350 | /* Extract bits from DRAM CS Mask. */ | 333 | csmask = pvt->csels[dct].csmasks[csrow >> 1]; |
351 | dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; | 334 | addr_shift = 8; |
352 | 335 | ||
353 | other_bits = pvt->dcsm_mask; | 336 | if (boot_cpu_data.x86 == 0x15) |
354 | other_bits = ~(other_bits << pvt->dcs_shift); | 337 | base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); |
338 | else | ||
339 | base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); | ||
340 | } | ||
355 | 341 | ||
356 | /* | 342 | *base = (csbase & base_bits) << addr_shift; |
357 | * The extracted bits from DCSM belong in the spaces represented by | ||
358 | * the cleared bits in other_bits. | ||
359 | */ | ||
360 | mask = (dcsm_bits << pvt->dcs_shift) | other_bits; | ||
361 | 343 | ||
362 | return mask; | 344 | *mask = ~0ULL; |
345 | /* poke holes for the csmask */ | ||
346 | *mask &= ~(mask_bits << addr_shift); | ||
347 | /* OR them in */ | ||
348 | *mask |= (csmask & mask_bits) << addr_shift; | ||
363 | } | 349 | } |
364 | 350 | ||
351 | #define for_each_chip_select(i, dct, pvt) \ | ||
352 | for (i = 0; i < pvt->csels[dct].b_cnt; i++) | ||
353 | |||
354 | #define chip_select_base(i, dct, pvt) \ | ||
355 | pvt->csels[dct].csbases[i] | ||
356 | |||
357 | #define for_each_chip_select_mask(i, dct, pvt) \ | ||
358 | for (i = 0; i < pvt->csels[dct].m_cnt; i++) | ||
359 | |||
365 | /* | 360 | /* |
366 | * @input_addr is an InputAddr associated with the node given by mci. Return the | 361 | * @input_addr is an InputAddr associated with the node given by mci. Return the |
367 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). | 362 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). |
@@ -374,19 +369,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
374 | 369 | ||
375 | pvt = mci->pvt_info; | 370 | pvt = mci->pvt_info; |
376 | 371 | ||
377 | /* | 372 | for_each_chip_select(csrow, 0, pvt) { |
378 | * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS | 373 | if (!csrow_enabled(csrow, 0, pvt)) |
379 | * base/mask register pair, test the condition shown near the start of | ||
380 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | ||
381 | */ | ||
382 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { | ||
383 | |||
384 | /* This DRAM chip select is disabled on this node */ | ||
385 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | ||
386 | continue; | 374 | continue; |
387 | 375 | ||
388 | base = base_from_dct_base(pvt, csrow); | 376 | get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); |
389 | mask = ~mask_from_dct_mask(pvt, csrow); | 377 | |
378 | mask = ~mask; | ||
390 | 379 | ||
391 | if ((input_addr & mask) == (base & mask)) { | 380 | if ((input_addr & mask) == (base & mask)) { |
392 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", | 381 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", |
@@ -396,7 +385,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
396 | return csrow; | 385 | return csrow; |
397 | } | 386 | } |
398 | } | 387 | } |
399 | |||
400 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", | 388 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", |
401 | (unsigned long)input_addr, pvt->mc_node_id); | 389 | (unsigned long)input_addr, pvt->mc_node_id); |
402 | 390 | ||
@@ -404,19 +392,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
404 | } | 392 | } |
405 | 393 | ||
406 | /* | 394 | /* |
407 | * Return the base value defined by the DRAM Base register for the node | ||
408 | * represented by mci. This function returns the full 40-bit value despite the | ||
409 | * fact that the register only stores bits 39-24 of the value. See section | ||
410 | * 3.4.4.1 (BKDG #26094, K8, revA-E) | ||
411 | */ | ||
412 | static inline u64 get_dram_base(struct mem_ctl_info *mci) | ||
413 | { | ||
414 | struct amd64_pvt *pvt = mci->pvt_info; | ||
415 | |||
416 | return pvt->dram_base[pvt->mc_node_id]; | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) | 395 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) |
421 | * for the node represented by mci. Info is passed back in *hole_base, | 396 | * for the node represented by mci. Info is passed back in *hole_base, |
422 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if | 397 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if |
@@ -445,14 +420,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
445 | return 1; | 420 | return 1; |
446 | } | 421 | } |
447 | 422 | ||
448 | /* only valid for Fam10h */ | 423 | /* valid for Fam10h and above */ |
449 | if (boot_cpu_data.x86 == 0x10 && | 424 | if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { |
450 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { | ||
451 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); | 425 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); |
452 | return 1; | 426 | return 1; |
453 | } | 427 | } |
454 | 428 | ||
455 | if ((pvt->dhar & DHAR_VALID) == 0) { | 429 | if (!dhar_valid(pvt)) { |
456 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", | 430 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", |
457 | pvt->mc_node_id); | 431 | pvt->mc_node_id); |
458 | return 1; | 432 | return 1; |
@@ -476,15 +450,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
476 | * addresses in the hole so that they start at 0x100000000. | 450 | * addresses in the hole so that they start at 0x100000000. |
477 | */ | 451 | */ |
478 | 452 | ||
479 | base = dhar_base(pvt->dhar); | 453 | base = dhar_base(pvt); |
480 | 454 | ||
481 | *hole_base = base; | 455 | *hole_base = base; |
482 | *hole_size = (0x1ull << 32) - base; | 456 | *hole_size = (0x1ull << 32) - base; |
483 | 457 | ||
484 | if (boot_cpu_data.x86 > 0xf) | 458 | if (boot_cpu_data.x86 > 0xf) |
485 | *hole_offset = f10_dhar_offset(pvt->dhar); | 459 | *hole_offset = f10_dhar_offset(pvt); |
486 | else | 460 | else |
487 | *hole_offset = k8_dhar_offset(pvt->dhar); | 461 | *hole_offset = k8_dhar_offset(pvt); |
488 | 462 | ||
489 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | 463 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", |
490 | pvt->mc_node_id, (unsigned long)*hole_base, | 464 | pvt->mc_node_id, (unsigned long)*hole_base, |
@@ -525,10 +499,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); | |||
525 | */ | 499 | */ |
526 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | 500 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) |
527 | { | 501 | { |
502 | struct amd64_pvt *pvt = mci->pvt_info; | ||
528 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; | 503 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; |
529 | int ret = 0; | 504 | int ret = 0; |
530 | 505 | ||
531 | dram_base = get_dram_base(mci); | 506 | dram_base = get_dram_base(pvt, pvt->mc_node_id); |
532 | 507 | ||
533 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | 508 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, |
534 | &hole_size); | 509 | &hole_size); |
@@ -556,7 +531,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
556 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture | 531 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture |
557 | * Programmer's Manual Volume 1 Application Programming. | 532 | * Programmer's Manual Volume 1 Application Programming. |
558 | */ | 533 | */ |
559 | dram_addr = (sys_addr & 0xffffffffffull) - dram_base; | 534 | dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; |
560 | 535 | ||
561 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " | 536 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " |
562 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, | 537 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, |
@@ -592,9 +567,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
592 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | 567 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) |
593 | * concerning translating a DramAddr to an InputAddr. | 568 | * concerning translating a DramAddr to an InputAddr. |
594 | */ | 569 | */ |
595 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | 570 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); |
596 | input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + | 571 | input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + |
597 | (dram_addr & 0xfff); | 572 | (dram_addr & 0xfff); |
598 | 573 | ||
599 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", | 574 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", |
600 | intlv_shift, (unsigned long)dram_addr, | 575 | intlv_shift, (unsigned long)dram_addr, |
@@ -628,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
628 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | 603 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) |
629 | { | 604 | { |
630 | struct amd64_pvt *pvt; | 605 | struct amd64_pvt *pvt; |
631 | int node_id, intlv_shift; | 606 | unsigned node_id, intlv_shift; |
632 | u64 bits, dram_addr; | 607 | u64 bits, dram_addr; |
633 | u32 intlv_sel; | 608 | u32 intlv_sel; |
634 | 609 | ||
@@ -642,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
642 | */ | 617 | */ |
643 | pvt = mci->pvt_info; | 618 | pvt = mci->pvt_info; |
644 | node_id = pvt->mc_node_id; | 619 | node_id = pvt->mc_node_id; |
645 | BUG_ON((node_id < 0) || (node_id > 7)); | ||
646 | 620 | ||
647 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | 621 | BUG_ON(node_id > 7); |
648 | 622 | ||
623 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); | ||
649 | if (intlv_shift == 0) { | 624 | if (intlv_shift == 0) { |
650 | debugf1(" InputAddr 0x%lx translates to DramAddr of " | 625 | debugf1(" InputAddr 0x%lx translates to DramAddr of " |
651 | "same value\n", (unsigned long)input_addr); | 626 | "same value\n", (unsigned long)input_addr); |
@@ -653,10 +628,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
653 | return input_addr; | 628 | return input_addr; |
654 | } | 629 | } |
655 | 630 | ||
656 | bits = ((input_addr & 0xffffff000ull) << intlv_shift) + | 631 | bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) + |
657 | (input_addr & 0xfff); | 632 | (input_addr & 0xfff); |
658 | 633 | ||
659 | intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); | 634 | intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); |
660 | dram_addr = bits + (intlv_sel << 12); | 635 | dram_addr = bits + (intlv_sel << 12); |
661 | 636 | ||
662 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " | 637 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " |
@@ -673,7 +648,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
673 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | 648 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) |
674 | { | 649 | { |
675 | struct amd64_pvt *pvt = mci->pvt_info; | 650 | struct amd64_pvt *pvt = mci->pvt_info; |
676 | u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; | 651 | u64 hole_base, hole_offset, hole_size, base, sys_addr; |
677 | int ret = 0; | 652 | int ret = 0; |
678 | 653 | ||
679 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | 654 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, |
@@ -691,7 +666,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
691 | } | 666 | } |
692 | } | 667 | } |
693 | 668 | ||
694 | amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); | 669 | base = get_dram_base(pvt, pvt->mc_node_id); |
695 | sys_addr = dram_addr + base; | 670 | sys_addr = dram_addr + base; |
696 | 671 | ||
697 | /* | 672 | /* |
@@ -736,13 +711,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |||
736 | u64 base, mask; | 711 | u64 base, mask; |
737 | 712 | ||
738 | pvt = mci->pvt_info; | 713 | pvt = mci->pvt_info; |
739 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); | 714 | BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt)); |
740 | 715 | ||
741 | base = base_from_dct_base(pvt, csrow); | 716 | get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); |
742 | mask = mask_from_dct_mask(pvt, csrow); | ||
743 | 717 | ||
744 | *input_addr_min = base & ~mask; | 718 | *input_addr_min = base & ~mask; |
745 | *input_addr_max = base | mask | pvt->dcs_mask_notused; | 719 | *input_addr_max = base | mask; |
746 | } | 720 | } |
747 | 721 | ||
748 | /* Map the Error address to a PAGE and PAGE OFFSET. */ | 722 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
@@ -775,18 +749,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |||
775 | 749 | ||
776 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); | 750 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
777 | 751 | ||
778 | static u16 extract_syndrome(struct err_regs *err) | ||
779 | { | ||
780 | return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); | ||
781 | } | ||
782 | |||
783 | /* | 752 | /* |
784 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs | 753 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs |
785 | * are ECC capable. | 754 | * are ECC capable. |
786 | */ | 755 | */ |
787 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | 756 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) |
788 | { | 757 | { |
789 | int bit; | 758 | u8 bit; |
790 | enum dev_type edac_cap = EDAC_FLAG_NONE; | 759 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
791 | 760 | ||
792 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) | 761 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
@@ -799,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |||
799 | return edac_cap; | 768 | return edac_cap; |
800 | } | 769 | } |
801 | 770 | ||
802 | 771 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); | |
803 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); | ||
804 | 772 | ||
805 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) | 773 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) |
806 | { | 774 | { |
@@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
813 | debugf1(" PAR/ERR parity: %s\n", | 781 | debugf1(" PAR/ERR parity: %s\n", |
814 | (dclr & BIT(8)) ? "enabled" : "disabled"); | 782 | (dclr & BIT(8)) ? "enabled" : "disabled"); |
815 | 783 | ||
816 | debugf1(" DCT 128bit mode width: %s\n", | 784 | if (boot_cpu_data.x86 == 0x10) |
817 | (dclr & BIT(11)) ? "128b" : "64b"); | 785 | debugf1(" DCT 128bit mode width: %s\n", |
786 | (dclr & BIT(11)) ? "128b" : "64b"); | ||
818 | 787 | ||
819 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | 788 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", |
820 | (dclr & BIT(12)) ? "yes" : "no", | 789 | (dclr & BIT(12)) ? "yes" : "no", |
@@ -824,16 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
824 | } | 793 | } |
825 | 794 | ||
826 | /* Display and decode various NB registers for debug purposes. */ | 795 | /* Display and decode various NB registers for debug purposes. */ |
827 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | 796 | static void dump_misc_regs(struct amd64_pvt *pvt) |
828 | { | 797 | { |
829 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); | 798 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
830 | 799 | ||
831 | debugf1(" NB two channel DRAM capable: %s\n", | 800 | debugf1(" NB two channel DRAM capable: %s\n", |
832 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); | 801 | (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); |
833 | 802 | ||
834 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", | 803 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
835 | (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", | 804 | (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", |
836 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); | 805 | (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); |
837 | 806 | ||
838 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | 807 | amd64_dump_dramcfg_low(pvt->dclr0, 0); |
839 | 808 | ||
@@ -841,130 +810,84 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |||
841 | 810 | ||
842 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " | 811 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
843 | "offset: 0x%08x\n", | 812 | "offset: 0x%08x\n", |
844 | pvt->dhar, | 813 | pvt->dhar, dhar_base(pvt), |
845 | dhar_base(pvt->dhar), | 814 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) |
846 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) | 815 | : f10_dhar_offset(pvt)); |
847 | : f10_dhar_offset(pvt->dhar)); | ||
848 | 816 | ||
849 | debugf1(" DramHoleValid: %s\n", | 817 | debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); |
850 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); | ||
851 | 818 | ||
852 | amd64_debug_display_dimm_sizes(0, pvt); | 819 | amd64_debug_display_dimm_sizes(pvt, 0); |
853 | 820 | ||
854 | /* everything below this point is Fam10h and above */ | 821 | /* everything below this point is Fam10h and above */ |
855 | if (boot_cpu_data.x86 == 0xf) | 822 | if (boot_cpu_data.x86 == 0xf) |
856 | return; | 823 | return; |
857 | 824 | ||
858 | amd64_debug_display_dimm_sizes(1, pvt); | 825 | amd64_debug_display_dimm_sizes(pvt, 1); |
859 | 826 | ||
860 | amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); | 827 | amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); |
861 | 828 | ||
862 | /* Only if NOT ganged does dclr1 have valid info */ | 829 | /* Only if NOT ganged does dclr1 have valid info */ |
863 | if (!dct_ganging_enabled(pvt)) | 830 | if (!dct_ganging_enabled(pvt)) |
864 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | 831 | amd64_dump_dramcfg_low(pvt->dclr1, 1); |
865 | } | 832 | } |
866 | 833 | ||
867 | /* Read in both of DBAM registers */ | ||
868 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | ||
869 | { | ||
870 | amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0); | ||
871 | |||
872 | if (boot_cpu_data.x86 >= 0x10) | ||
873 | amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1); | ||
874 | } | ||
875 | |||
876 | /* | 834 | /* |
877 | * NOTE: CPU Revision Dependent code: Rev E and Rev F | 835 | * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] |
878 | * | ||
879 | * Set the DCSB and DCSM mask values depending on the CPU revision value. Also | ||
880 | * set the shift factor for the DCSB and DCSM values. | ||
881 | * | ||
882 | * ->dcs_mask_notused, RevE: | ||
883 | * | ||
884 | * To find the max InputAddr for the csrow, start with the base address and set | ||
885 | * all bits that are "don't care" bits in the test at the start of section | ||
886 | * 3.5.4 (p. 84). | ||
887 | * | ||
888 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | ||
889 | * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS | ||
890 | * represents bits [24:20] and [12:0], which are all bits in the above-mentioned | ||
891 | * gaps. | ||
892 | * | ||
893 | * ->dcs_mask_notused, RevF and later: | ||
894 | * | ||
895 | * To find the max InputAddr for the csrow, start with the base address and set | ||
896 | * all bits that are "don't care" bits in the test at the start of NPT section | ||
897 | * 4.5.4 (p. 87). | ||
898 | * | ||
899 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | ||
900 | * between bit ranges [36:27] and [21:13]. | ||
901 | * | ||
902 | * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], | ||
903 | * which are all bits in the above-mentioned gaps. | ||
904 | */ | 836 | */ |
905 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | 837 | static void prep_chip_selects(struct amd64_pvt *pvt) |
906 | { | 838 | { |
907 | |||
908 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { | 839 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
909 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | 840 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
910 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | 841 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; |
911 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | ||
912 | pvt->dcs_shift = REV_E_DCS_SHIFT; | ||
913 | pvt->cs_count = 8; | ||
914 | pvt->num_dcsm = 8; | ||
915 | } else { | 842 | } else { |
916 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; | 843 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
917 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | 844 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; |
918 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | ||
919 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | ||
920 | pvt->cs_count = 8; | ||
921 | pvt->num_dcsm = 4; | ||
922 | } | 845 | } |
923 | } | 846 | } |
924 | 847 | ||
925 | /* | 848 | /* |
926 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers | 849 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers |
927 | */ | 850 | */ |
928 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | 851 | static void read_dct_base_mask(struct amd64_pvt *pvt) |
929 | { | 852 | { |
930 | int cs, reg; | 853 | int cs; |
931 | 854 | ||
932 | amd64_set_dct_base_and_mask(pvt); | 855 | prep_chip_selects(pvt); |
933 | 856 | ||
934 | for (cs = 0; cs < pvt->cs_count; cs++) { | 857 | for_each_chip_select(cs, 0, pvt) { |
935 | reg = K8_DCSB0 + (cs * 4); | 858 | int reg0 = DCSB0 + (cs * 4); |
936 | if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs])) | 859 | int reg1 = DCSB1 + (cs * 4); |
860 | u32 *base0 = &pvt->csels[0].csbases[cs]; | ||
861 | u32 *base1 = &pvt->csels[1].csbases[cs]; | ||
862 | |||
863 | if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) | ||
937 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", | 864 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
938 | cs, pvt->dcsb0[cs], reg); | 865 | cs, *base0, reg0); |
939 | 866 | ||
940 | /* If DCT are NOT ganged, then read in DCT1's base */ | 867 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
941 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 868 | continue; |
942 | reg = F10_DCSB1 + (cs * 4); | 869 | |
943 | if (!amd64_read_pci_cfg(pvt->F2, reg, | 870 | if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) |
944 | &pvt->dcsb1[cs])) | 871 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
945 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", | 872 | cs, *base1, reg1); |
946 | cs, pvt->dcsb1[cs], reg); | ||
947 | } else { | ||
948 | pvt->dcsb1[cs] = 0; | ||
949 | } | ||
950 | } | 873 | } |
951 | 874 | ||
952 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | 875 | for_each_chip_select_mask(cs, 0, pvt) { |
953 | reg = K8_DCSM0 + (cs * 4); | 876 | int reg0 = DCSM0 + (cs * 4); |
954 | if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs])) | 877 | int reg1 = DCSM1 + (cs * 4); |
878 | u32 *mask0 = &pvt->csels[0].csmasks[cs]; | ||
879 | u32 *mask1 = &pvt->csels[1].csmasks[cs]; | ||
880 | |||
881 | if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) | ||
955 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", | 882 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
956 | cs, pvt->dcsm0[cs], reg); | 883 | cs, *mask0, reg0); |
957 | 884 | ||
958 | /* If DCT are NOT ganged, then read in DCT1's mask */ | 885 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
959 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 886 | continue; |
960 | reg = F10_DCSM1 + (cs * 4); | 887 | |
961 | if (!amd64_read_pci_cfg(pvt->F2, reg, | 888 | if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) |
962 | &pvt->dcsm1[cs])) | 889 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
963 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", | 890 | cs, *mask1, reg1); |
964 | cs, pvt->dcsm1[cs], reg); | ||
965 | } else { | ||
966 | pvt->dcsm1[cs] = 0; | ||
967 | } | ||
968 | } | 891 | } |
969 | } | 892 | } |
970 | 893 | ||
@@ -972,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) | |||
972 | { | 895 | { |
973 | enum mem_type type; | 896 | enum mem_type type; |
974 | 897 | ||
975 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { | 898 | /* F15h supports only DDR3 */ |
899 | if (boot_cpu_data.x86 >= 0x15) | ||
900 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | ||
901 | else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { | ||
976 | if (pvt->dchr0 & DDR3_MODE) | 902 | if (pvt->dchr0 & DDR3_MODE) |
977 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | 903 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; |
978 | else | 904 | else |
@@ -986,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) | |||
986 | return type; | 912 | return type; |
987 | } | 913 | } |
988 | 914 | ||
989 | /* | 915 | /* Get the number of DCT channels the memory controller is using. */ |
990 | * Read the DRAM Configuration Low register. It differs between CG, D & E revs | ||
991 | * and the later RevF memory controllers (DDR vs DDR2) | ||
992 | * | ||
993 | * Return: | ||
994 | * number of memory channels in operation | ||
995 | * Pass back: | ||
996 | * contents of the DCL0_LOW register | ||
997 | */ | ||
998 | static int k8_early_channel_count(struct amd64_pvt *pvt) | 916 | static int k8_early_channel_count(struct amd64_pvt *pvt) |
999 | { | 917 | { |
1000 | int flag, err = 0; | 918 | int flag; |
1001 | |||
1002 | err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); | ||
1003 | if (err) | ||
1004 | return err; | ||
1005 | 919 | ||
1006 | if (pvt->ext_model >= K8_REV_F) | 920 | if (pvt->ext_model >= K8_REV_F) |
1007 | /* RevF (NPT) and later */ | 921 | /* RevF (NPT) and later */ |
1008 | flag = pvt->dclr0 & F10_WIDTH_128; | 922 | flag = pvt->dclr0 & WIDTH_128; |
1009 | else | 923 | else |
1010 | /* RevE and earlier */ | 924 | /* RevE and earlier */ |
1011 | flag = pvt->dclr0 & REVE_WIDTH_128; | 925 | flag = pvt->dclr0 & REVE_WIDTH_128; |
@@ -1016,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) | |||
1016 | return (flag) ? 2 : 1; | 930 | return (flag) ? 2 : 1; |
1017 | } | 931 | } |
1018 | 932 | ||
1019 | /* extract the ERROR ADDRESS for the K8 CPUs */ | 933 | /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ |
1020 | static u64 k8_get_error_address(struct mem_ctl_info *mci, | 934 | static u64 get_error_address(struct mce *m) |
1021 | struct err_regs *info) | ||
1022 | { | 935 | { |
1023 | return (((u64) (info->nbeah & 0xff)) << 32) + | 936 | u8 start_bit = 1; |
1024 | (info->nbeal & ~0x03); | 937 | u8 end_bit = 47; |
938 | |||
939 | if (boot_cpu_data.x86 == 0xf) { | ||
940 | start_bit = 3; | ||
941 | end_bit = 39; | ||
942 | } | ||
943 | |||
944 | return m->addr & GENMASK(start_bit, end_bit); | ||
1025 | } | 945 | } |
1026 | 946 | ||
1027 | /* | 947 | static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) |
1028 | * Read the Base and Limit registers for K8 based Memory controllers; extract | ||
1029 | * fields from the 'raw' reg into separate data fields | ||
1030 | * | ||
1031 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN | ||
1032 | */ | ||
1033 | static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | ||
1034 | { | 948 | { |
1035 | u32 low; | 949 | int off = range << 3; |
1036 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ | ||
1037 | 950 | ||
1038 | amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low); | 951 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); |
952 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); | ||
1039 | 953 | ||
1040 | /* Extract parts into separate data entries */ | 954 | if (boot_cpu_data.x86 == 0xf) |
1041 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; | 955 | return; |
1042 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; | ||
1043 | pvt->dram_rw_en[dram] = (low & 0x3); | ||
1044 | 956 | ||
1045 | amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low); | 957 | if (!dram_rw(pvt, range)) |
958 | return; | ||
1046 | 959 | ||
1047 | /* | 960 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); |
1048 | * Extract parts into separate data entries. Limit is the HIGHEST memory | 961 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); |
1049 | * location of the region, so lower 24 bits need to be all ones | ||
1050 | */ | ||
1051 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; | ||
1052 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; | ||
1053 | pvt->dram_DstNode[dram] = (low & 0x7); | ||
1054 | } | 962 | } |
1055 | 963 | ||
1056 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 964 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1057 | struct err_regs *err_info, u64 sys_addr) | 965 | u16 syndrome) |
1058 | { | 966 | { |
1059 | struct mem_ctl_info *src_mci; | 967 | struct mem_ctl_info *src_mci; |
968 | struct amd64_pvt *pvt = mci->pvt_info; | ||
1060 | int channel, csrow; | 969 | int channel, csrow; |
1061 | u32 page, offset; | 970 | u32 page, offset; |
1062 | u16 syndrome; | ||
1063 | |||
1064 | syndrome = extract_syndrome(err_info); | ||
1065 | 971 | ||
1066 | /* CHIPKILL enabled */ | 972 | /* CHIPKILL enabled */ |
1067 | if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { | 973 | if (pvt->nbcfg & NBCFG_CHIPKILL) { |
1068 | channel = get_channel_from_ecc_syndrome(mci, syndrome); | 974 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
1069 | if (channel < 0) { | 975 | if (channel < 0) { |
1070 | /* | 976 | /* |
@@ -1113,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1113 | } | 1019 | } |
1114 | } | 1020 | } |
1115 | 1021 | ||
1116 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | 1022 | static int ddr2_cs_size(unsigned i, bool dct_width) |
1117 | { | 1023 | { |
1118 | int *dbam_map; | 1024 | unsigned shift = 0; |
1119 | 1025 | ||
1120 | if (pvt->ext_model >= K8_REV_F) | 1026 | if (i <= 2) |
1121 | dbam_map = ddr2_dbam; | 1027 | shift = i; |
1122 | else if (pvt->ext_model >= K8_REV_D) | 1028 | else if (!(i & 0x1)) |
1123 | dbam_map = ddr2_dbam_revD; | 1029 | shift = i >> 1; |
1124 | else | 1030 | else |
1125 | dbam_map = ddr2_dbam_revCG; | 1031 | shift = (i + 1) >> 1; |
1126 | 1032 | ||
1127 | return dbam_map[cs_mode]; | 1033 | return 128 << (shift + !!dct_width); |
1034 | } | ||
1035 | |||
1036 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, | ||
1037 | unsigned cs_mode) | ||
1038 | { | ||
1039 | u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; | ||
1040 | |||
1041 | if (pvt->ext_model >= K8_REV_F) { | ||
1042 | WARN_ON(cs_mode > 11); | ||
1043 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); | ||
1044 | } | ||
1045 | else if (pvt->ext_model >= K8_REV_D) { | ||
1046 | WARN_ON(cs_mode > 10); | ||
1047 | |||
1048 | if (cs_mode == 3 || cs_mode == 8) | ||
1049 | return 32 << (cs_mode - 1); | ||
1050 | else | ||
1051 | return 32 << cs_mode; | ||
1052 | } | ||
1053 | else { | ||
1054 | WARN_ON(cs_mode > 6); | ||
1055 | return 32 << cs_mode; | ||
1056 | } | ||
1128 | } | 1057 | } |
1129 | 1058 | ||
1130 | /* | 1059 | /* |
@@ -1135,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | |||
1135 | * Pass back: | 1064 | * Pass back: |
1136 | * contents of the DCL0_LOW register | 1065 | * contents of the DCL0_LOW register |
1137 | */ | 1066 | */ |
1138 | static int f10_early_channel_count(struct amd64_pvt *pvt) | 1067 | static int f1x_early_channel_count(struct amd64_pvt *pvt) |
1139 | { | 1068 | { |
1140 | int dbams[] = { DBAM0, DBAM1 }; | ||
1141 | int i, j, channels = 0; | 1069 | int i, j, channels = 0; |
1142 | u32 dbam; | ||
1143 | 1070 | ||
1144 | /* If we are in 128 bit mode, then we are using 2 channels */ | 1071 | /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ |
1145 | if (pvt->dclr0 & F10_WIDTH_128) { | 1072 | if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) |
1146 | channels = 2; | 1073 | return 2; |
1147 | return channels; | ||
1148 | } | ||
1149 | 1074 | ||
1150 | /* | 1075 | /* |
1151 | * Need to check if in unganged mode: In such, there are 2 channels, | 1076 | * Need to check if in unganged mode: In such, there are 2 channels, |
@@ -1162,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1162 | * is more than just one DIMM present in unganged mode. Need to check | 1087 | * is more than just one DIMM present in unganged mode. Need to check |
1163 | * both controllers since DIMMs can be placed in either one. | 1088 | * both controllers since DIMMs can be placed in either one. |
1164 | */ | 1089 | */ |
1165 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { | 1090 | for (i = 0; i < 2; i++) { |
1166 | if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam)) | 1091 | u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); |
1167 | goto err_reg; | ||
1168 | 1092 | ||
1169 | for (j = 0; j < 4; j++) { | 1093 | for (j = 0; j < 4; j++) { |
1170 | if (DBAM_DIMM(j, dbam) > 0) { | 1094 | if (DBAM_DIMM(j, dbam) > 0) { |
@@ -1180,216 +1104,191 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1180 | amd64_info("MCT channel count: %d\n", channels); | 1104 | amd64_info("MCT channel count: %d\n", channels); |
1181 | 1105 | ||
1182 | return channels; | 1106 | return channels; |
1183 | |||
1184 | err_reg: | ||
1185 | return -1; | ||
1186 | |||
1187 | } | 1107 | } |
1188 | 1108 | ||
1189 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | 1109 | static int ddr3_cs_size(unsigned i, bool dct_width) |
1190 | { | 1110 | { |
1191 | int *dbam_map; | 1111 | unsigned shift = 0; |
1112 | int cs_size = 0; | ||
1192 | 1113 | ||
1193 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | 1114 | if (i == 0 || i == 3 || i == 4) |
1194 | dbam_map = ddr3_dbam; | 1115 | cs_size = -1; |
1116 | else if (i <= 2) | ||
1117 | shift = i; | ||
1118 | else if (i == 12) | ||
1119 | shift = 7; | ||
1120 | else if (!(i & 0x1)) | ||
1121 | shift = i >> 1; | ||
1195 | else | 1122 | else |
1196 | dbam_map = ddr2_dbam; | 1123 | shift = (i + 1) >> 1; |
1124 | |||
1125 | if (cs_size != -1) | ||
1126 | cs_size = (128 * (1 << !!dct_width)) << shift; | ||
1197 | 1127 | ||
1198 | return dbam_map[cs_mode]; | 1128 | return cs_size; |
1199 | } | 1129 | } |
1200 | 1130 | ||
1201 | static u64 f10_get_error_address(struct mem_ctl_info *mci, | 1131 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, |
1202 | struct err_regs *info) | 1132 | unsigned cs_mode) |
1203 | { | 1133 | { |
1204 | return (((u64) (info->nbeah & 0xffff)) << 32) + | 1134 | u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; |
1205 | (info->nbeal & ~0x01); | 1135 | |
1136 | WARN_ON(cs_mode > 11); | ||
1137 | |||
1138 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | ||
1139 | return ddr3_cs_size(cs_mode, dclr & WIDTH_128); | ||
1140 | else | ||
1141 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); | ||
1206 | } | 1142 | } |
1207 | 1143 | ||
1208 | /* | 1144 | /* |
1209 | * Read the Base and Limit registers for F10 based Memory controllers. Extract | 1145 | * F15h supports only 64bit DCT interfaces |
1210 | * fields from the 'raw' reg into separate data fields. | ||
1211 | * | ||
1212 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. | ||
1213 | */ | 1146 | */ |
1214 | static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | 1147 | static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, |
1148 | unsigned cs_mode) | ||
1215 | { | 1149 | { |
1216 | u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; | 1150 | WARN_ON(cs_mode > 12); |
1217 | |||
1218 | low_offset = K8_DRAM_BASE_LOW + (dram << 3); | ||
1219 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); | ||
1220 | |||
1221 | /* read the 'raw' DRAM BASE Address register */ | ||
1222 | amd64_read_pci_cfg(pvt->F1, low_offset, &low_base); | ||
1223 | amd64_read_pci_cfg(pvt->F1, high_offset, &high_base); | ||
1224 | |||
1225 | /* Extract parts into separate data entries */ | ||
1226 | pvt->dram_rw_en[dram] = (low_base & 0x3); | ||
1227 | |||
1228 | if (pvt->dram_rw_en[dram] == 0) | ||
1229 | return; | ||
1230 | |||
1231 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | ||
1232 | |||
1233 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | | ||
1234 | (((u64)low_base & 0xFFFF0000) << 8); | ||
1235 | |||
1236 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | ||
1237 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | ||
1238 | |||
1239 | /* read the 'raw' LIMIT registers */ | ||
1240 | amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit); | ||
1241 | amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit); | ||
1242 | |||
1243 | pvt->dram_DstNode[dram] = (low_limit & 0x7); | ||
1244 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; | ||
1245 | 1151 | ||
1246 | /* | 1152 | return ddr3_cs_size(cs_mode, false); |
1247 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | ||
1248 | * memory location of the region, so low 24 bits need to be all ones. | ||
1249 | */ | ||
1250 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | | ||
1251 | (((u64) low_limit & 0xFFFF0000) << 8) | | ||
1252 | 0x00FFFFFF; | ||
1253 | } | 1153 | } |
1254 | 1154 | ||
1255 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | 1155 | static void read_dram_ctl_register(struct amd64_pvt *pvt) |
1256 | { | 1156 | { |
1257 | 1157 | ||
1258 | if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW, | 1158 | if (boot_cpu_data.x86 == 0xf) |
1259 | &pvt->dram_ctl_select_low)) { | 1159 | return; |
1260 | debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " | 1160 | |
1261 | "High range addresses at: 0x%x\n", | 1161 | if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { |
1262 | pvt->dram_ctl_select_low, | 1162 | debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", |
1263 | dct_sel_baseaddr(pvt)); | 1163 | pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); |
1264 | 1164 | ||
1265 | debugf0(" DCT mode: %s, All DCTs on: %s\n", | 1165 | debugf0(" DCTs operate in %s mode.\n", |
1266 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), | 1166 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); |
1267 | (dct_dram_enabled(pvt) ? "yes" : "no")); | ||
1268 | 1167 | ||
1269 | if (!dct_ganging_enabled(pvt)) | 1168 | if (!dct_ganging_enabled(pvt)) |
1270 | debugf0(" Address range split per DCT: %s\n", | 1169 | debugf0(" Address range split per DCT: %s\n", |
1271 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | 1170 | (dct_high_range_enabled(pvt) ? "yes" : "no")); |
1272 | 1171 | ||
1273 | debugf0(" DCT data interleave for ECC: %s, " | 1172 | debugf0(" data interleave for ECC: %s, " |
1274 | "DRAM cleared since last warm reset: %s\n", | 1173 | "DRAM cleared since last warm reset: %s\n", |
1275 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | 1174 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), |
1276 | (dct_memory_cleared(pvt) ? "yes" : "no")); | 1175 | (dct_memory_cleared(pvt) ? "yes" : "no")); |
1277 | 1176 | ||
1278 | debugf0(" DCT channel interleave: %s, " | 1177 | debugf0(" channel interleave: %s, " |
1279 | "DCT interleave bits selector: 0x%x\n", | 1178 | "interleave bits selector: 0x%x\n", |
1280 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | 1179 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), |
1281 | dct_sel_interleave_addr(pvt)); | 1180 | dct_sel_interleave_addr(pvt)); |
1282 | } | 1181 | } |
1283 | 1182 | ||
1284 | amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH, | 1183 | amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); |
1285 | &pvt->dram_ctl_select_high); | ||
1286 | } | 1184 | } |
1287 | 1185 | ||
1288 | /* | 1186 | /* |
1289 | * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory | 1187 | * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory |
1290 | * Interleaving Modes. | 1188 | * Interleaving Modes. |
1291 | */ | 1189 | */ |
1292 | static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, | 1190 | static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, |
1293 | int hi_range_sel, u32 intlv_en) | 1191 | bool hi_range_sel, u8 intlv_en) |
1294 | { | 1192 | { |
1295 | u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; | 1193 | u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; |
1296 | 1194 | ||
1297 | if (dct_ganging_enabled(pvt)) | 1195 | if (dct_ganging_enabled(pvt)) |
1298 | cs = 0; | 1196 | return 0; |
1299 | else if (hi_range_sel) | ||
1300 | cs = dct_sel_high; | ||
1301 | else if (dct_interleave_enabled(pvt)) { | ||
1302 | /* | ||
1303 | * see F2x110[DctSelIntLvAddr] - channel interleave mode | ||
1304 | */ | ||
1305 | if (dct_sel_interleave_addr(pvt) == 0) | ||
1306 | cs = sys_addr >> 6 & 1; | ||
1307 | else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { | ||
1308 | temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; | ||
1309 | 1197 | ||
1310 | if (dct_sel_interleave_addr(pvt) & 1) | 1198 | if (hi_range_sel) |
1311 | cs = (sys_addr >> 9 & 1) ^ temp; | 1199 | return dct_sel_high; |
1312 | else | ||
1313 | cs = (sys_addr >> 6 & 1) ^ temp; | ||
1314 | } else if (intlv_en & 4) | ||
1315 | cs = sys_addr >> 15 & 1; | ||
1316 | else if (intlv_en & 2) | ||
1317 | cs = sys_addr >> 14 & 1; | ||
1318 | else if (intlv_en & 1) | ||
1319 | cs = sys_addr >> 13 & 1; | ||
1320 | else | ||
1321 | cs = sys_addr >> 12 & 1; | ||
1322 | } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) | ||
1323 | cs = ~dct_sel_high & 1; | ||
1324 | else | ||
1325 | cs = 0; | ||
1326 | 1200 | ||
1327 | return cs; | 1201 | /* |
1328 | } | 1202 | * see F2x110[DctSelIntLvAddr] - channel interleave mode |
1203 | */ | ||
1204 | if (dct_interleave_enabled(pvt)) { | ||
1205 | u8 intlv_addr = dct_sel_interleave_addr(pvt); | ||
1329 | 1206 | ||
1330 | static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) | 1207 | /* return DCT select function: 0=DCT0, 1=DCT1 */ |
1331 | { | 1208 | if (!intlv_addr) |
1332 | if (intlv_en == 1) | 1209 | return sys_addr >> 6 & 1; |
1333 | return 1; | 1210 | |
1334 | else if (intlv_en == 3) | 1211 | if (intlv_addr & 0x2) { |
1335 | return 2; | 1212 | u8 shift = intlv_addr & 0x1 ? 9 : 6; |
1336 | else if (intlv_en == 7) | 1213 | u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; |
1337 | return 3; | 1214 | |
1215 | return ((sys_addr >> shift) & 1) ^ temp; | ||
1216 | } | ||
1217 | |||
1218 | return (sys_addr >> (12 + hweight8(intlv_en))) & 1; | ||
1219 | } | ||
1220 | |||
1221 | if (dct_high_range_enabled(pvt)) | ||
1222 | return ~dct_sel_high & 1; | ||
1338 | 1223 | ||
1339 | return 0; | 1224 | return 0; |
1340 | } | 1225 | } |
1341 | 1226 | ||
1342 | /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ | 1227 | /* Convert the sys_addr to the normalized DCT address */ |
1343 | static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, | 1228 | static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range, |
1344 | u32 dct_sel_base_addr, | 1229 | u64 sys_addr, bool hi_rng, |
1345 | u64 dct_sel_base_off, | 1230 | u32 dct_sel_base_addr) |
1346 | u32 hole_valid, u32 hole_off, | ||
1347 | u64 dram_base) | ||
1348 | { | 1231 | { |
1349 | u64 chan_off; | 1232 | u64 chan_off; |
1233 | u64 dram_base = get_dram_base(pvt, range); | ||
1234 | u64 hole_off = f10_dhar_offset(pvt); | ||
1235 | u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16; | ||
1350 | 1236 | ||
1351 | if (hi_range_sel) { | 1237 | if (hi_rng) { |
1352 | if (!(dct_sel_base_addr & 0xFFFF0000) && | 1238 | /* |
1353 | hole_valid && (sys_addr >= 0x100000000ULL)) | 1239 | * if |
1354 | chan_off = hole_off << 16; | 1240 | * base address of high range is below 4Gb |
1241 | * (bits [47:27] at [31:11]) | ||
1242 | * DRAM address space on this DCT is hoisted above 4Gb && | ||
1243 | * sys_addr > 4Gb | ||
1244 | * | ||
1245 | * remove hole offset from sys_addr | ||
1246 | * else | ||
1247 | * remove high range offset from sys_addr | ||
1248 | */ | ||
1249 | if ((!(dct_sel_base_addr >> 16) || | ||
1250 | dct_sel_base_addr < dhar_base(pvt)) && | ||
1251 | dhar_valid(pvt) && | ||
1252 | (sys_addr >= BIT_64(32))) | ||
1253 | chan_off = hole_off; | ||
1355 | else | 1254 | else |
1356 | chan_off = dct_sel_base_off; | 1255 | chan_off = dct_sel_base_off; |
1357 | } else { | 1256 | } else { |
1358 | if (hole_valid && (sys_addr >= 0x100000000ULL)) | 1257 | /* |
1359 | chan_off = hole_off << 16; | 1258 | * if |
1259 | * we have a valid hole && | ||
1260 | * sys_addr > 4Gb | ||
1261 | * | ||
1262 | * remove hole | ||
1263 | * else | ||
1264 | * remove dram base to normalize to DCT address | ||
1265 | */ | ||
1266 | if (dhar_valid(pvt) && (sys_addr >= BIT_64(32))) | ||
1267 | chan_off = hole_off; | ||
1360 | else | 1268 | else |
1361 | chan_off = dram_base & 0xFFFFF8000000ULL; | 1269 | chan_off = dram_base; |
1362 | } | 1270 | } |
1363 | 1271 | ||
1364 | return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - | 1272 | return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47)); |
1365 | (chan_off & 0x0000FFFFFF800000ULL); | ||
1366 | } | 1273 | } |
1367 | 1274 | ||
1368 | /* Hack for the time being - Can we get this from BIOS?? */ | ||
1369 | #define CH0SPARE_RANK 0 | ||
1370 | #define CH1SPARE_RANK 1 | ||
1371 | |||
1372 | /* | 1275 | /* |
1373 | * checks if the csrow passed in is marked as SPARED, if so returns the new | 1276 | * checks if the csrow passed in is marked as SPARED, if so returns the new |
1374 | * spare row | 1277 | * spare row |
1375 | */ | 1278 | */ |
1376 | static inline int f10_process_possible_spare(int csrow, | 1279 | static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow) |
1377 | u32 cs, struct amd64_pvt *pvt) | 1280 | { |
1378 | { | 1281 | int tmp_cs; |
1379 | u32 swap_done; | 1282 | |
1380 | u32 bad_dram_cs; | 1283 | if (online_spare_swap_done(pvt, dct) && |
1381 | 1284 | csrow == online_spare_bad_dramcs(pvt, dct)) { | |
1382 | /* Depending on channel, isolate respective SPARING info */ | 1285 | |
1383 | if (cs) { | 1286 | for_each_chip_select(tmp_cs, dct, pvt) { |
1384 | swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); | 1287 | if (chip_select_base(tmp_cs, dct, pvt) & 0x2) { |
1385 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); | 1288 | csrow = tmp_cs; |
1386 | if (swap_done && (csrow == bad_dram_cs)) | 1289 | break; |
1387 | csrow = CH1SPARE_RANK; | 1290 | } |
1388 | } else { | 1291 | } |
1389 | swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); | ||
1390 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); | ||
1391 | if (swap_done && (csrow == bad_dram_cs)) | ||
1392 | csrow = CH0SPARE_RANK; | ||
1393 | } | 1292 | } |
1394 | return csrow; | 1293 | return csrow; |
1395 | } | 1294 | } |
@@ -1402,11 +1301,11 @@ static inline int f10_process_possible_spare(int csrow, | |||
1402 | * -EINVAL: NOT FOUND | 1301 | * -EINVAL: NOT FOUND |
1403 | * 0..csrow = Chip-Select Row | 1302 | * 0..csrow = Chip-Select Row |
1404 | */ | 1303 | */ |
1405 | static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | 1304 | static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) |
1406 | { | 1305 | { |
1407 | struct mem_ctl_info *mci; | 1306 | struct mem_ctl_info *mci; |
1408 | struct amd64_pvt *pvt; | 1307 | struct amd64_pvt *pvt; |
1409 | u32 cs_base, cs_mask; | 1308 | u64 cs_base, cs_mask; |
1410 | int cs_found = -EINVAL; | 1309 | int cs_found = -EINVAL; |
1411 | int csrow; | 1310 | int csrow; |
1412 | 1311 | ||
@@ -1416,39 +1315,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |||
1416 | 1315 | ||
1417 | pvt = mci->pvt_info; | 1316 | pvt = mci->pvt_info; |
1418 | 1317 | ||
1419 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | 1318 | debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); |
1420 | |||
1421 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { | ||
1422 | 1319 | ||
1423 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | 1320 | for_each_chip_select(csrow, dct, pvt) { |
1424 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | 1321 | if (!csrow_enabled(csrow, dct, pvt)) |
1425 | continue; | 1322 | continue; |
1426 | 1323 | ||
1427 | /* | 1324 | get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); |
1428 | * We have an ENABLED CSROW, Isolate just the MASK bits of the | ||
1429 | * target: [28:19] and [13:5], which map to [36:27] and [21:13] | ||
1430 | * of the actual address. | ||
1431 | */ | ||
1432 | cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; | ||
1433 | |||
1434 | /* | ||
1435 | * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and | ||
1436 | * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) | ||
1437 | */ | ||
1438 | cs_mask = amd64_get_dct_mask(pvt, cs, csrow); | ||
1439 | 1325 | ||
1440 | debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", | 1326 | debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", |
1441 | csrow, cs_base, cs_mask); | 1327 | csrow, cs_base, cs_mask); |
1442 | 1328 | ||
1443 | cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; | 1329 | cs_mask = ~cs_mask; |
1444 | 1330 | ||
1445 | debugf1(" Final CSMask=0x%x\n", cs_mask); | 1331 | debugf1(" (InputAddr & ~CSMask)=0x%llx " |
1446 | debugf1(" (InputAddr & ~CSMask)=0x%x " | 1332 | "(CSBase & ~CSMask)=0x%llx\n", |
1447 | "(CSBase & ~CSMask)=0x%x\n", | 1333 | (in_addr & cs_mask), (cs_base & cs_mask)); |
1448 | (in_addr & ~cs_mask), (cs_base & ~cs_mask)); | ||
1449 | 1334 | ||
1450 | if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { | 1335 | if ((in_addr & cs_mask) == (cs_base & cs_mask)) { |
1451 | cs_found = f10_process_possible_spare(csrow, cs, pvt); | 1336 | cs_found = f10_process_possible_spare(pvt, dct, csrow); |
1452 | 1337 | ||
1453 | debugf1(" MATCH csrow=%d\n", cs_found); | 1338 | debugf1(" MATCH csrow=%d\n", cs_found); |
1454 | break; | 1339 | break; |
@@ -1457,38 +1342,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |||
1457 | return cs_found; | 1342 | return cs_found; |
1458 | } | 1343 | } |
1459 | 1344 | ||
1460 | /* For a given @dram_range, check if @sys_addr falls within it. */ | 1345 | /* |
1461 | static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | 1346 | * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is |
1462 | u64 sys_addr, int *nid, int *chan_sel) | 1347 | * swapped with a region located at the bottom of memory so that the GPU can use |
1348 | * the interleaved region and thus two channels. | ||
1349 | */ | ||
1350 | static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) | ||
1463 | { | 1351 | { |
1464 | int node_id, cs_found = -EINVAL, high_range = 0; | 1352 | u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; |
1465 | u32 intlv_en, intlv_sel, intlv_shift, hole_off; | ||
1466 | u32 hole_valid, tmp, dct_sel_base, channel; | ||
1467 | u64 dram_base, chan_addr, dct_sel_base_off; | ||
1468 | 1353 | ||
1469 | dram_base = pvt->dram_base[dram_range]; | 1354 | if (boot_cpu_data.x86 == 0x10) { |
1470 | intlv_en = pvt->dram_IntlvEn[dram_range]; | 1355 | /* only revC3 and revE have that feature */ |
1356 | if (boot_cpu_data.x86_model < 4 || | ||
1357 | (boot_cpu_data.x86_model < 0xa && | ||
1358 | boot_cpu_data.x86_mask < 3)) | ||
1359 | return sys_addr; | ||
1360 | } | ||
1471 | 1361 | ||
1472 | node_id = pvt->dram_DstNode[dram_range]; | 1362 | amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); |
1473 | intlv_sel = pvt->dram_IntlvSel[dram_range]; | ||
1474 | 1363 | ||
1475 | debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", | 1364 | if (!(swap_reg & 0x1)) |
1476 | dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); | 1365 | return sys_addr; |
1477 | 1366 | ||
1478 | /* | 1367 | swap_base = (swap_reg >> 3) & 0x7f; |
1479 | * This assumes that one node's DHAR is the same as all the other | 1368 | swap_limit = (swap_reg >> 11) & 0x7f; |
1480 | * nodes' DHAR. | 1369 | rgn_size = (swap_reg >> 20) & 0x7f; |
1481 | */ | 1370 | tmp_addr = sys_addr >> 27; |
1482 | hole_off = (pvt->dhar & 0x0000FF80); | ||
1483 | hole_valid = (pvt->dhar & 0x1); | ||
1484 | dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; | ||
1485 | 1371 | ||
1486 | debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", | 1372 | if (!(sys_addr >> 34) && |
1487 | hole_off, hole_valid, intlv_sel); | 1373 | (((tmp_addr >= swap_base) && |
1374 | (tmp_addr <= swap_limit)) || | ||
1375 | (tmp_addr < rgn_size))) | ||
1376 | return sys_addr ^ (u64)swap_base << 27; | ||
1377 | |||
1378 | return sys_addr; | ||
1379 | } | ||
1380 | |||
1381 | /* For a given @dram_range, check if @sys_addr falls within it. */ | ||
1382 | static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, | ||
1383 | u64 sys_addr, int *nid, int *chan_sel) | ||
1384 | { | ||
1385 | int cs_found = -EINVAL; | ||
1386 | u64 chan_addr; | ||
1387 | u32 dct_sel_base; | ||
1388 | u8 channel; | ||
1389 | bool high_range = false; | ||
1390 | |||
1391 | u8 node_id = dram_dst_node(pvt, range); | ||
1392 | u8 intlv_en = dram_intlv_en(pvt, range); | ||
1393 | u32 intlv_sel = dram_intlv_sel(pvt, range); | ||
1394 | |||
1395 | debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", | ||
1396 | range, sys_addr, get_dram_limit(pvt, range)); | ||
1397 | |||
1398 | if (dhar_valid(pvt) && | ||
1399 | dhar_base(pvt) <= sys_addr && | ||
1400 | sys_addr < BIT_64(32)) { | ||
1401 | amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", | ||
1402 | sys_addr); | ||
1403 | return -EINVAL; | ||
1404 | } | ||
1488 | 1405 | ||
1489 | if (intlv_en && | 1406 | if (intlv_en && |
1490 | (intlv_sel != ((sys_addr >> 12) & intlv_en))) | 1407 | (intlv_sel != ((sys_addr >> 12) & intlv_en))) { |
1408 | amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n", | ||
1409 | intlv_en, intlv_sel); | ||
1491 | return -EINVAL; | 1410 | return -EINVAL; |
1411 | } | ||
1412 | |||
1413 | sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); | ||
1492 | 1414 | ||
1493 | dct_sel_base = dct_sel_baseaddr(pvt); | 1415 | dct_sel_base = dct_sel_baseaddr(pvt); |
1494 | 1416 | ||
@@ -1499,38 +1421,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |||
1499 | if (dct_high_range_enabled(pvt) && | 1421 | if (dct_high_range_enabled(pvt) && |
1500 | !dct_ganging_enabled(pvt) && | 1422 | !dct_ganging_enabled(pvt) && |
1501 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) | 1423 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) |
1502 | high_range = 1; | 1424 | high_range = true; |
1503 | |||
1504 | channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); | ||
1505 | 1425 | ||
1506 | chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, | 1426 | channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en); |
1507 | dct_sel_base_off, hole_valid, | ||
1508 | hole_off, dram_base); | ||
1509 | 1427 | ||
1510 | intlv_shift = f10_map_intlv_en_to_shift(intlv_en); | 1428 | chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr, |
1429 | high_range, dct_sel_base); | ||
1511 | 1430 | ||
1512 | /* remove Node ID (in case of memory interleaving) */ | 1431 | /* Remove node interleaving, see F1x120 */ |
1513 | tmp = chan_addr & 0xFC0; | 1432 | if (intlv_en) |
1433 | chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) | | ||
1434 | (chan_addr & 0xfff); | ||
1514 | 1435 | ||
1515 | chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; | 1436 | /* remove channel interleave */ |
1516 | |||
1517 | /* remove channel interleave and hash */ | ||
1518 | if (dct_interleave_enabled(pvt) && | 1437 | if (dct_interleave_enabled(pvt) && |
1519 | !dct_high_range_enabled(pvt) && | 1438 | !dct_high_range_enabled(pvt) && |
1520 | !dct_ganging_enabled(pvt)) { | 1439 | !dct_ganging_enabled(pvt)) { |
1521 | if (dct_sel_interleave_addr(pvt) != 1) | 1440 | |
1522 | chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; | 1441 | if (dct_sel_interleave_addr(pvt) != 1) { |
1523 | else { | 1442 | if (dct_sel_interleave_addr(pvt) == 0x3) |
1524 | tmp = chan_addr & 0xFC0; | 1443 | /* hash 9 */ |
1525 | chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) | 1444 | chan_addr = ((chan_addr >> 10) << 9) | |
1526 | | tmp; | 1445 | (chan_addr & 0x1ff); |
1527 | } | 1446 | else |
1447 | /* A[6] or hash 6 */ | ||
1448 | chan_addr = ((chan_addr >> 7) << 6) | | ||
1449 | (chan_addr & 0x3f); | ||
1450 | } else | ||
1451 | /* A[12] */ | ||
1452 | chan_addr = ((chan_addr >> 13) << 12) | | ||
1453 | (chan_addr & 0xfff); | ||
1528 | } | 1454 | } |
1529 | 1455 | ||
1530 | debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", | 1456 | debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); |
1531 | chan_addr, (u32)(chan_addr >> 8)); | ||
1532 | 1457 | ||
1533 | cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); | 1458 | cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); |
1534 | 1459 | ||
1535 | if (cs_found >= 0) { | 1460 | if (cs_found >= 0) { |
1536 | *nid = node_id; | 1461 | *nid = node_id; |
@@ -1539,23 +1464,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |||
1539 | return cs_found; | 1464 | return cs_found; |
1540 | } | 1465 | } |
1541 | 1466 | ||
1542 | static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | 1467 | static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, |
1543 | int *node, int *chan_sel) | 1468 | int *node, int *chan_sel) |
1544 | { | 1469 | { |
1545 | int dram_range, cs_found = -EINVAL; | 1470 | int cs_found = -EINVAL; |
1546 | u64 dram_base, dram_limit; | 1471 | unsigned range; |
1547 | 1472 | ||
1548 | for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { | 1473 | for (range = 0; range < DRAM_RANGES; range++) { |
1549 | 1474 | ||
1550 | if (!pvt->dram_rw_en[dram_range]) | 1475 | if (!dram_rw(pvt, range)) |
1551 | continue; | 1476 | continue; |
1552 | 1477 | ||
1553 | dram_base = pvt->dram_base[dram_range]; | 1478 | if ((get_dram_base(pvt, range) <= sys_addr) && |
1554 | dram_limit = pvt->dram_limit[dram_range]; | 1479 | (get_dram_limit(pvt, range) >= sys_addr)) { |
1555 | 1480 | ||
1556 | if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { | 1481 | cs_found = f1x_match_to_this_node(pvt, range, |
1557 | |||
1558 | cs_found = f10_match_to_this_node(pvt, dram_range, | ||
1559 | sys_addr, node, | 1482 | sys_addr, node, |
1560 | chan_sel); | 1483 | chan_sel); |
1561 | if (cs_found >= 0) | 1484 | if (cs_found >= 0) |
@@ -1572,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |||
1572 | * The @sys_addr is usually an error address received from the hardware | 1495 | * The @sys_addr is usually an error address received from the hardware |
1573 | * (MCX_ADDR). | 1496 | * (MCX_ADDR). |
1574 | */ | 1497 | */ |
1575 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1498 | static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1576 | struct err_regs *err_info, | 1499 | u16 syndrome) |
1577 | u64 sys_addr) | ||
1578 | { | 1500 | { |
1579 | struct amd64_pvt *pvt = mci->pvt_info; | 1501 | struct amd64_pvt *pvt = mci->pvt_info; |
1580 | u32 page, offset; | 1502 | u32 page, offset; |
1581 | int nid, csrow, chan = 0; | 1503 | int nid, csrow, chan = 0; |
1582 | u16 syndrome; | ||
1583 | 1504 | ||
1584 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | 1505 | csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); |
1585 | 1506 | ||
1586 | if (csrow < 0) { | 1507 | if (csrow < 0) { |
1587 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1508 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
@@ -1590,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1590 | 1511 | ||
1591 | error_address_to_page_and_offset(sys_addr, &page, &offset); | 1512 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
1592 | 1513 | ||
1593 | syndrome = extract_syndrome(err_info); | ||
1594 | |||
1595 | /* | 1514 | /* |
1596 | * We need the syndromes for channel detection only when we're | 1515 | * We need the syndromes for channel detection only when we're |
1597 | * ganged. Otherwise @chan should already contain the channel at | 1516 | * ganged. Otherwise @chan should already contain the channel at |
1598 | * this point. | 1517 | * this point. |
1599 | */ | 1518 | */ |
1600 | if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) | 1519 | if (dct_ganging_enabled(pvt)) |
1601 | chan = get_channel_from_ecc_syndrome(mci, syndrome); | 1520 | chan = get_channel_from_ecc_syndrome(mci, syndrome); |
1602 | 1521 | ||
1603 | if (chan >= 0) | 1522 | if (chan >= 0) |
@@ -1614,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1614 | 1533 | ||
1615 | /* | 1534 | /* |
1616 | * debug routine to display the memory sizes of all logical DIMMs and its | 1535 | * debug routine to display the memory sizes of all logical DIMMs and its |
1617 | * CSROWs as well | 1536 | * CSROWs |
1618 | */ | 1537 | */ |
1619 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | 1538 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) |
1620 | { | 1539 | { |
1621 | int dimm, size0, size1, factor = 0; | 1540 | int dimm, size0, size1, factor = 0; |
1622 | u32 dbam; | 1541 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; |
1623 | u32 *dcsb; | 1542 | u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; |
1624 | 1543 | ||
1625 | if (boot_cpu_data.x86 == 0xf) { | 1544 | if (boot_cpu_data.x86 == 0xf) { |
1626 | if (pvt->dclr0 & F10_WIDTH_128) | 1545 | if (pvt->dclr0 & WIDTH_128) |
1627 | factor = 1; | 1546 | factor = 1; |
1628 | 1547 | ||
1629 | /* K8 families < revF not supported yet */ | 1548 | /* K8 families < revF not supported yet */ |
@@ -1634,7 +1553,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1634 | } | 1553 | } |
1635 | 1554 | ||
1636 | dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; | 1555 | dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; |
1637 | dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0; | 1556 | dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases |
1557 | : pvt->csels[0].csbases; | ||
1638 | 1558 | ||
1639 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); | 1559 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); |
1640 | 1560 | ||
@@ -1644,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1644 | for (dimm = 0; dimm < 4; dimm++) { | 1564 | for (dimm = 0; dimm < 4; dimm++) { |
1645 | 1565 | ||
1646 | size0 = 0; | 1566 | size0 = 0; |
1647 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) | 1567 | if (dcsb[dimm*2] & DCSB_CS_ENABLE) |
1648 | size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); | 1568 | size0 = pvt->ops->dbam_to_cs(pvt, ctrl, |
1569 | DBAM_DIMM(dimm, dbam)); | ||
1649 | 1570 | ||
1650 | size1 = 0; | 1571 | size1 = 0; |
1651 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | 1572 | if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) |
1652 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); | 1573 | size1 = pvt->ops->dbam_to_cs(pvt, ctrl, |
1574 | DBAM_DIMM(dimm, dbam)); | ||
1653 | 1575 | ||
1654 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", | 1576 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
1655 | dimm * 2, size0 << factor, | 1577 | dimm * 2, size0 << factor, |
@@ -1664,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = { | |||
1664 | .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, | 1586 | .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, |
1665 | .ops = { | 1587 | .ops = { |
1666 | .early_channel_count = k8_early_channel_count, | 1588 | .early_channel_count = k8_early_channel_count, |
1667 | .get_error_address = k8_get_error_address, | ||
1668 | .read_dram_base_limit = k8_read_dram_base_limit, | ||
1669 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, | 1589 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, |
1670 | .dbam_to_cs = k8_dbam_to_chip_select, | 1590 | .dbam_to_cs = k8_dbam_to_chip_select, |
1591 | .read_dct_pci_cfg = k8_read_dct_pci_cfg, | ||
1671 | } | 1592 | } |
1672 | }, | 1593 | }, |
1673 | [F10_CPUS] = { | 1594 | [F10_CPUS] = { |
@@ -1675,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = { | |||
1675 | .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, | 1596 | .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, |
1676 | .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, | 1597 | .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, |
1677 | .ops = { | 1598 | .ops = { |
1678 | .early_channel_count = f10_early_channel_count, | 1599 | .early_channel_count = f1x_early_channel_count, |
1679 | .get_error_address = f10_get_error_address, | 1600 | .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, |
1680 | .read_dram_base_limit = f10_read_dram_base_limit, | ||
1681 | .read_dram_ctl_register = f10_read_dram_ctl_register, | ||
1682 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | ||
1683 | .dbam_to_cs = f10_dbam_to_chip_select, | 1601 | .dbam_to_cs = f10_dbam_to_chip_select, |
1602 | .read_dct_pci_cfg = f10_read_dct_pci_cfg, | ||
1603 | } | ||
1604 | }, | ||
1605 | [F15_CPUS] = { | ||
1606 | .ctl_name = "F15h", | ||
1607 | .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, | ||
1608 | .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3, | ||
1609 | .ops = { | ||
1610 | .early_channel_count = f1x_early_channel_count, | ||
1611 | .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, | ||
1612 | .dbam_to_cs = f15_dbam_to_chip_select, | ||
1613 | .read_dct_pci_cfg = f15_read_dct_pci_cfg, | ||
1684 | } | 1614 | } |
1685 | }, | 1615 | }, |
1686 | }; | 1616 | }; |
@@ -1770,15 +1700,15 @@ static u16 x8_vectors[] = { | |||
1770 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, | 1700 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, |
1771 | }; | 1701 | }; |
1772 | 1702 | ||
1773 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | 1703 | static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, |
1774 | int v_dim) | 1704 | unsigned v_dim) |
1775 | { | 1705 | { |
1776 | unsigned int i, err_sym; | 1706 | unsigned int i, err_sym; |
1777 | 1707 | ||
1778 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { | 1708 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { |
1779 | u16 s = syndrome; | 1709 | u16 s = syndrome; |
1780 | int v_idx = err_sym * v_dim; | 1710 | unsigned v_idx = err_sym * v_dim; |
1781 | int v_end = (err_sym + 1) * v_dim; | 1711 | unsigned v_end = (err_sym + 1) * v_dim; |
1782 | 1712 | ||
1783 | /* walk over all 16 bits of the syndrome */ | 1713 | /* walk over all 16 bits of the syndrome */ |
1784 | for (i = 1; i < (1U << 16); i <<= 1) { | 1714 | for (i = 1; i < (1U << 16); i <<= 1) { |
@@ -1850,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |||
1850 | struct amd64_pvt *pvt = mci->pvt_info; | 1780 | struct amd64_pvt *pvt = mci->pvt_info; |
1851 | int err_sym = -1; | 1781 | int err_sym = -1; |
1852 | 1782 | ||
1853 | if (pvt->syn_type == 8) | 1783 | if (pvt->ecc_sym_sz == 8) |
1854 | err_sym = decode_syndrome(syndrome, x8_vectors, | 1784 | err_sym = decode_syndrome(syndrome, x8_vectors, |
1855 | ARRAY_SIZE(x8_vectors), | 1785 | ARRAY_SIZE(x8_vectors), |
1856 | pvt->syn_type); | 1786 | pvt->ecc_sym_sz); |
1857 | else if (pvt->syn_type == 4) | 1787 | else if (pvt->ecc_sym_sz == 4) |
1858 | err_sym = decode_syndrome(syndrome, x4_vectors, | 1788 | err_sym = decode_syndrome(syndrome, x4_vectors, |
1859 | ARRAY_SIZE(x4_vectors), | 1789 | ARRAY_SIZE(x4_vectors), |
1860 | pvt->syn_type); | 1790 | pvt->ecc_sym_sz); |
1861 | else { | 1791 | else { |
1862 | amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type); | 1792 | amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); |
1863 | return err_sym; | 1793 | return err_sym; |
1864 | } | 1794 | } |
1865 | 1795 | ||
1866 | return map_err_sym_to_channel(err_sym, pvt->syn_type); | 1796 | return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); |
1867 | } | 1797 | } |
1868 | 1798 | ||
1869 | /* | 1799 | /* |
1870 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR | 1800 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR |
1871 | * ADDRESS and process. | 1801 | * ADDRESS and process. |
1872 | */ | 1802 | */ |
1873 | static void amd64_handle_ce(struct mem_ctl_info *mci, | 1803 | static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) |
1874 | struct err_regs *info) | ||
1875 | { | 1804 | { |
1876 | struct amd64_pvt *pvt = mci->pvt_info; | 1805 | struct amd64_pvt *pvt = mci->pvt_info; |
1877 | u64 sys_addr; | 1806 | u64 sys_addr; |
1807 | u16 syndrome; | ||
1878 | 1808 | ||
1879 | /* Ensure that the Error Address is VALID */ | 1809 | /* Ensure that the Error Address is VALID */ |
1880 | if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { | 1810 | if (!(m->status & MCI_STATUS_ADDRV)) { |
1881 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); | 1811 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
1882 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1812 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1883 | return; | 1813 | return; |
1884 | } | 1814 | } |
1885 | 1815 | ||
1886 | sys_addr = pvt->ops->get_error_address(mci, info); | 1816 | sys_addr = get_error_address(m); |
1817 | syndrome = extract_syndrome(m->status); | ||
1887 | 1818 | ||
1888 | amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); | 1819 | amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
1889 | 1820 | ||
1890 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); | 1821 | pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome); |
1891 | } | 1822 | } |
1892 | 1823 | ||
1893 | /* Handle any Un-correctable Errors (UEs) */ | 1824 | /* Handle any Un-correctable Errors (UEs) */ |
1894 | static void amd64_handle_ue(struct mem_ctl_info *mci, | 1825 | static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) |
1895 | struct err_regs *info) | ||
1896 | { | 1826 | { |
1897 | struct amd64_pvt *pvt = mci->pvt_info; | ||
1898 | struct mem_ctl_info *log_mci, *src_mci = NULL; | 1827 | struct mem_ctl_info *log_mci, *src_mci = NULL; |
1899 | int csrow; | 1828 | int csrow; |
1900 | u64 sys_addr; | 1829 | u64 sys_addr; |
@@ -1902,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
1902 | 1831 | ||
1903 | log_mci = mci; | 1832 | log_mci = mci; |
1904 | 1833 | ||
1905 | if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { | 1834 | if (!(m->status & MCI_STATUS_ADDRV)) { |
1906 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); | 1835 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
1907 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1836 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
1908 | return; | 1837 | return; |
1909 | } | 1838 | } |
1910 | 1839 | ||
1911 | sys_addr = pvt->ops->get_error_address(mci, info); | 1840 | sys_addr = get_error_address(m); |
1912 | 1841 | ||
1913 | /* | 1842 | /* |
1914 | * Find out which node the error address belongs to. This may be | 1843 | * Find out which node the error address belongs to. This may be |
@@ -1936,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
1936 | } | 1865 | } |
1937 | 1866 | ||
1938 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | 1867 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, |
1939 | struct err_regs *info) | 1868 | struct mce *m) |
1940 | { | 1869 | { |
1941 | u16 ec = EC(info->nbsl); | 1870 | u16 ec = EC(m->status); |
1942 | u8 xec = XEC(info->nbsl, 0x1f); | 1871 | u8 xec = XEC(m->status, 0x1f); |
1943 | int ecc_type = (info->nbsh >> 13) & 0x3; | 1872 | u8 ecc_type = (m->status >> 45) & 0x3; |
1944 | 1873 | ||
1945 | /* Bail early out if this was an 'observed' error */ | 1874 | /* Bail early out if this was an 'observed' error */ |
1946 | if (PP(ec) == K8_NBSL_PP_OBS) | 1875 | if (PP(ec) == NBSL_PP_OBS) |
1947 | return; | 1876 | return; |
1948 | 1877 | ||
1949 | /* Do only ECC errors */ | 1878 | /* Do only ECC errors */ |
@@ -1951,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | |||
1951 | return; | 1880 | return; |
1952 | 1881 | ||
1953 | if (ecc_type == 2) | 1882 | if (ecc_type == 2) |
1954 | amd64_handle_ce(mci, info); | 1883 | amd64_handle_ce(mci, m); |
1955 | else if (ecc_type == 1) | 1884 | else if (ecc_type == 1) |
1956 | amd64_handle_ue(mci, info); | 1885 | amd64_handle_ue(mci, m); |
1957 | } | 1886 | } |
1958 | 1887 | ||
1959 | void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) | 1888 | void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) |
1960 | { | 1889 | { |
1961 | struct mem_ctl_info *mci = mcis[node_id]; | 1890 | struct mem_ctl_info *mci = mcis[node_id]; |
1962 | struct err_regs regs; | ||
1963 | |||
1964 | regs.nbsl = (u32) m->status; | ||
1965 | regs.nbsh = (u32)(m->status >> 32); | ||
1966 | regs.nbeal = (u32) m->addr; | ||
1967 | regs.nbeah = (u32)(m->addr >> 32); | ||
1968 | regs.nbcfg = nbcfg; | ||
1969 | |||
1970 | __amd64_decode_bus_error(mci, ®s); | ||
1971 | |||
1972 | /* | ||
1973 | * Check the UE bit of the NB status high register, if set generate some | ||
1974 | * logs. If NOT a GART error, then process the event as a NO-INFO event. | ||
1975 | * If it was a GART error, skip that process. | ||
1976 | * | ||
1977 | * FIXME: this should go somewhere else, if at all. | ||
1978 | */ | ||
1979 | if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors) | ||
1980 | edac_mc_handle_ue_no_info(mci, "UE bit is set"); | ||
1981 | 1891 | ||
1892 | __amd64_decode_bus_error(mci, m); | ||
1982 | } | 1893 | } |
1983 | 1894 | ||
1984 | /* | 1895 | /* |
@@ -2027,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt) | |||
2027 | */ | 1938 | */ |
2028 | static void read_mc_regs(struct amd64_pvt *pvt) | 1939 | static void read_mc_regs(struct amd64_pvt *pvt) |
2029 | { | 1940 | { |
1941 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
2030 | u64 msr_val; | 1942 | u64 msr_val; |
2031 | u32 tmp; | 1943 | u32 tmp; |
2032 | int dram; | 1944 | unsigned range; |
2033 | 1945 | ||
2034 | /* | 1946 | /* |
2035 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | 1947 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since |
@@ -2046,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt) | |||
2046 | } else | 1958 | } else |
2047 | debugf0(" TOP_MEM2 disabled.\n"); | 1959 | debugf0(" TOP_MEM2 disabled.\n"); |
2048 | 1960 | ||
2049 | amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap); | 1961 | amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); |
2050 | 1962 | ||
2051 | if (pvt->ops->read_dram_ctl_register) | 1963 | read_dram_ctl_register(pvt); |
2052 | pvt->ops->read_dram_ctl_register(pvt); | ||
2053 | 1964 | ||
2054 | for (dram = 0; dram < DRAM_REG_COUNT; dram++) { | 1965 | for (range = 0; range < DRAM_RANGES; range++) { |
2055 | /* | 1966 | u8 rw; |
2056 | * Call CPU specific READ function to get the DRAM Base and | ||
2057 | * Limit values from the DCT. | ||
2058 | */ | ||
2059 | pvt->ops->read_dram_base_limit(pvt, dram); | ||
2060 | 1967 | ||
2061 | /* | 1968 | /* read settings for this DRAM range */ |
2062 | * Only print out debug info on rows with both R and W Enabled. | 1969 | read_dram_base_limit_regs(pvt, range); |
2063 | * Normal processing, compiler should optimize this whole 'if' | 1970 | |
2064 | * debug output block away. | 1971 | rw = dram_rw(pvt, range); |
2065 | */ | 1972 | if (!rw) |
2066 | if (pvt->dram_rw_en[dram] != 0) { | 1973 | continue; |
2067 | debugf1(" DRAM-BASE[%d]: 0x%016llx " | 1974 | |
2068 | "DRAM-LIMIT: 0x%016llx\n", | 1975 | debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", |
2069 | dram, | 1976 | range, |
2070 | pvt->dram_base[dram], | 1977 | get_dram_base(pvt, range), |
2071 | pvt->dram_limit[dram]); | 1978 | get_dram_limit(pvt, range)); |
2072 | 1979 | ||
2073 | debugf1(" IntlvEn=%s %s %s " | 1980 | debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", |
2074 | "IntlvSel=%d DstNode=%d\n", | 1981 | dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", |
2075 | pvt->dram_IntlvEn[dram] ? | 1982 | (rw & 0x1) ? "R" : "-", |
2076 | "Enabled" : "Disabled", | 1983 | (rw & 0x2) ? "W" : "-", |
2077 | (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", | 1984 | dram_intlv_sel(pvt, range), |
2078 | (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", | 1985 | dram_dst_node(pvt, range)); |
2079 | pvt->dram_IntlvSel[dram], | ||
2080 | pvt->dram_DstNode[dram]); | ||
2081 | } | ||
2082 | } | 1986 | } |
2083 | 1987 | ||
2084 | amd64_read_dct_base_mask(pvt); | 1988 | read_dct_base_mask(pvt); |
2085 | 1989 | ||
2086 | amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar); | 1990 | amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); |
2087 | amd64_read_dbam_reg(pvt); | 1991 | amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); |
2088 | 1992 | ||
2089 | amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); | 1993 | amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); |
2090 | 1994 | ||
2091 | amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); | 1995 | amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); |
2092 | amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0); | 1996 | amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); |
2093 | 1997 | ||
2094 | if (boot_cpu_data.x86 >= 0x10) { | 1998 | if (!dct_ganging_enabled(pvt)) { |
2095 | if (!dct_ganging_enabled(pvt)) { | 1999 | amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); |
2096 | amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1); | 2000 | amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); |
2097 | amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1); | ||
2098 | } | ||
2099 | amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); | ||
2100 | } | 2001 | } |
2101 | 2002 | ||
2102 | if (boot_cpu_data.x86 == 0x10 && | 2003 | pvt->ecc_sym_sz = 4; |
2103 | boot_cpu_data.x86_model > 7 && | ||
2104 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ | ||
2105 | tmp & BIT(25)) | ||
2106 | pvt->syn_type = 8; | ||
2107 | else | ||
2108 | pvt->syn_type = 4; | ||
2109 | 2004 | ||
2110 | amd64_dump_misc_regs(pvt); | 2005 | if (c->x86 >= 0x10) { |
2006 | amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); | ||
2007 | amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); | ||
2008 | |||
2009 | /* F10h, revD and later can do x8 ECC too */ | ||
2010 | if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) | ||
2011 | pvt->ecc_sym_sz = 8; | ||
2012 | } | ||
2013 | dump_misc_regs(pvt); | ||
2111 | } | 2014 | } |
2112 | 2015 | ||
2113 | /* | 2016 | /* |
2114 | * NOTE: CPU Revision Dependent code | 2017 | * NOTE: CPU Revision Dependent code |
2115 | * | 2018 | * |
2116 | * Input: | 2019 | * Input: |
2117 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) | 2020 | * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1) |
2118 | * k8 private pointer to --> | 2021 | * k8 private pointer to --> |
2119 | * DRAM Bank Address mapping register | 2022 | * DRAM Bank Address mapping register |
2120 | * node_id | 2023 | * node_id |
@@ -2144,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt) | |||
2144 | * encompasses | 2047 | * encompasses |
2145 | * | 2048 | * |
2146 | */ | 2049 | */ |
2147 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | 2050 | static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) |
2148 | { | 2051 | { |
2149 | u32 cs_mode, nr_pages; | 2052 | u32 cs_mode, nr_pages; |
2150 | 2053 | ||
@@ -2157,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |||
2157 | */ | 2060 | */ |
2158 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; | 2061 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
2159 | 2062 | ||
2160 | nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); | 2063 | nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); |
2161 | 2064 | ||
2162 | /* | 2065 | /* |
2163 | * If dual channel then double the memory size of single channel. | 2066 | * If dual channel then double the memory size of single channel. |
@@ -2180,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2180 | { | 2083 | { |
2181 | struct csrow_info *csrow; | 2084 | struct csrow_info *csrow; |
2182 | struct amd64_pvt *pvt = mci->pvt_info; | 2085 | struct amd64_pvt *pvt = mci->pvt_info; |
2183 | u64 input_addr_min, input_addr_max, sys_addr; | 2086 | u64 input_addr_min, input_addr_max, sys_addr, base, mask; |
2184 | u32 val; | 2087 | u32 val; |
2185 | int i, empty = 1; | 2088 | int i, empty = 1; |
2186 | 2089 | ||
2187 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val); | 2090 | amd64_read_pci_cfg(pvt->F3, NBCFG, &val); |
2188 | 2091 | ||
2189 | pvt->nbcfg = val; | 2092 | pvt->nbcfg = val; |
2190 | pvt->ctl_error_info.nbcfg = val; | ||
2191 | 2093 | ||
2192 | debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", | 2094 | debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", |
2193 | pvt->mc_node_id, val, | 2095 | pvt->mc_node_id, val, |
2194 | !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE)); | 2096 | !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); |
2195 | 2097 | ||
2196 | for (i = 0; i < pvt->cs_count; i++) { | 2098 | for_each_chip_select(i, 0, pvt) { |
2197 | csrow = &mci->csrows[i]; | 2099 | csrow = &mci->csrows[i]; |
2198 | 2100 | ||
2199 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | 2101 | if (!csrow_enabled(i, 0, pvt)) { |
2200 | debugf1("----CSROW %d EMPTY for node %d\n", i, | 2102 | debugf1("----CSROW %d EMPTY for node %d\n", i, |
2201 | pvt->mc_node_id); | 2103 | pvt->mc_node_id); |
2202 | continue; | 2104 | continue; |
@@ -2206,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2206 | i, pvt->mc_node_id); | 2108 | i, pvt->mc_node_id); |
2207 | 2109 | ||
2208 | empty = 0; | 2110 | empty = 0; |
2209 | csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); | 2111 | csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); |
2210 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); | 2112 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); |
2211 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); | 2113 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); |
2212 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); | 2114 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); |
2213 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); | 2115 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); |
2214 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); | 2116 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); |
2215 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); | 2117 | |
2118 | get_cs_base_and_mask(pvt, i, 0, &base, &mask); | ||
2119 | csrow->page_mask = ~mask; | ||
2216 | /* 8 bytes of resolution */ | 2120 | /* 8 bytes of resolution */ |
2217 | 2121 | ||
2218 | csrow->mtype = amd64_determine_memory_type(pvt, i); | 2122 | csrow->mtype = amd64_determine_memory_type(pvt, i); |
@@ -2231,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2231 | /* | 2135 | /* |
2232 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating | 2136 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating |
2233 | */ | 2137 | */ |
2234 | if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) | 2138 | if (pvt->nbcfg & NBCFG_ECC_ENABLE) |
2235 | csrow->edac_mode = | 2139 | csrow->edac_mode = |
2236 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? | 2140 | (pvt->nbcfg & NBCFG_CHIPKILL) ? |
2237 | EDAC_S4ECD4ED : EDAC_SECDED; | 2141 | EDAC_S4ECD4ED : EDAC_SECDED; |
2238 | else | 2142 | else |
2239 | csrow->edac_mode = EDAC_NONE; | 2143 | csrow->edac_mode = EDAC_NONE; |
@@ -2243,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2243 | } | 2147 | } |
2244 | 2148 | ||
2245 | /* get all cores on this DCT */ | 2149 | /* get all cores on this DCT */ |
2246 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | 2150 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid) |
2247 | { | 2151 | { |
2248 | int cpu; | 2152 | int cpu; |
2249 | 2153 | ||
@@ -2253,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | |||
2253 | } | 2157 | } |
2254 | 2158 | ||
2255 | /* check MCG_CTL on all the cpus on this node */ | 2159 | /* check MCG_CTL on all the cpus on this node */ |
2256 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | 2160 | static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) |
2257 | { | 2161 | { |
2258 | cpumask_var_t mask; | 2162 | cpumask_var_t mask; |
2259 | int cpu, nbe; | 2163 | int cpu, nbe; |
@@ -2270,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) | |||
2270 | 2174 | ||
2271 | for_each_cpu(cpu, mask) { | 2175 | for_each_cpu(cpu, mask) { |
2272 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 2176 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2273 | nbe = reg->l & K8_MSR_MCGCTL_NBE; | 2177 | nbe = reg->l & MSR_MCGCTL_NBE; |
2274 | 2178 | ||
2275 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | 2179 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", |
2276 | cpu, reg->q, | 2180 | cpu, reg->q, |
@@ -2305,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) | |||
2305 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 2209 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2306 | 2210 | ||
2307 | if (on) { | 2211 | if (on) { |
2308 | if (reg->l & K8_MSR_MCGCTL_NBE) | 2212 | if (reg->l & MSR_MCGCTL_NBE) |
2309 | s->flags.nb_mce_enable = 1; | 2213 | s->flags.nb_mce_enable = 1; |
2310 | 2214 | ||
2311 | reg->l |= K8_MSR_MCGCTL_NBE; | 2215 | reg->l |= MSR_MCGCTL_NBE; |
2312 | } else { | 2216 | } else { |
2313 | /* | 2217 | /* |
2314 | * Turn off NB MCE reporting only when it was off before | 2218 | * Turn off NB MCE reporting only when it was off before |
2315 | */ | 2219 | */ |
2316 | if (!s->flags.nb_mce_enable) | 2220 | if (!s->flags.nb_mce_enable) |
2317 | reg->l &= ~K8_MSR_MCGCTL_NBE; | 2221 | reg->l &= ~MSR_MCGCTL_NBE; |
2318 | } | 2222 | } |
2319 | } | 2223 | } |
2320 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | 2224 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); |
@@ -2328,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, | |||
2328 | struct pci_dev *F3) | 2232 | struct pci_dev *F3) |
2329 | { | 2233 | { |
2330 | bool ret = true; | 2234 | bool ret = true; |
2331 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | 2235 | u32 value, mask = 0x3; /* UECC/CECC enable */ |
2332 | 2236 | ||
2333 | if (toggle_ecc_err_reporting(s, nid, ON)) { | 2237 | if (toggle_ecc_err_reporting(s, nid, ON)) { |
2334 | amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); | 2238 | amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); |
2335 | return false; | 2239 | return false; |
2336 | } | 2240 | } |
2337 | 2241 | ||
2338 | amd64_read_pci_cfg(F3, K8_NBCTL, &value); | 2242 | amd64_read_pci_cfg(F3, NBCTL, &value); |
2339 | 2243 | ||
2340 | /* turn on UECCEn and CECCEn bits */ | ||
2341 | s->old_nbctl = value & mask; | 2244 | s->old_nbctl = value & mask; |
2342 | s->nbctl_valid = true; | 2245 | s->nbctl_valid = true; |
2343 | 2246 | ||
2344 | value |= mask; | 2247 | value |= mask; |
2345 | pci_write_config_dword(F3, K8_NBCTL, value); | 2248 | amd64_write_pci_cfg(F3, NBCTL, value); |
2346 | 2249 | ||
2347 | amd64_read_pci_cfg(F3, K8_NBCFG, &value); | 2250 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2348 | 2251 | ||
2349 | debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", | 2252 | debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2350 | nid, value, | 2253 | nid, value, !!(value & NBCFG_ECC_ENABLE)); |
2351 | !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE)); | ||
2352 | 2254 | ||
2353 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2255 | if (!(value & NBCFG_ECC_ENABLE)) { |
2354 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); | 2256 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); |
2355 | 2257 | ||
2356 | s->flags.nb_ecc_prev = 0; | 2258 | s->flags.nb_ecc_prev = 0; |
2357 | 2259 | ||
2358 | /* Attempt to turn on DRAM ECC Enable */ | 2260 | /* Attempt to turn on DRAM ECC Enable */ |
2359 | value |= K8_NBCFG_ECC_ENABLE; | 2261 | value |= NBCFG_ECC_ENABLE; |
2360 | pci_write_config_dword(F3, K8_NBCFG, value); | 2262 | amd64_write_pci_cfg(F3, NBCFG, value); |
2361 | 2263 | ||
2362 | amd64_read_pci_cfg(F3, K8_NBCFG, &value); | 2264 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2363 | 2265 | ||
2364 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2266 | if (!(value & NBCFG_ECC_ENABLE)) { |
2365 | amd64_warn("Hardware rejected DRAM ECC enable," | 2267 | amd64_warn("Hardware rejected DRAM ECC enable," |
2366 | "check memory DIMM configuration.\n"); | 2268 | "check memory DIMM configuration.\n"); |
2367 | ret = false; | 2269 | ret = false; |
@@ -2372,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, | |||
2372 | s->flags.nb_ecc_prev = 1; | 2274 | s->flags.nb_ecc_prev = 1; |
2373 | } | 2275 | } |
2374 | 2276 | ||
2375 | debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", | 2277 | debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2376 | nid, value, | 2278 | nid, value, !!(value & NBCFG_ECC_ENABLE)); |
2377 | !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE)); | ||
2378 | 2279 | ||
2379 | return ret; | 2280 | return ret; |
2380 | } | 2281 | } |
@@ -2382,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, | |||
2382 | static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, | 2283 | static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, |
2383 | struct pci_dev *F3) | 2284 | struct pci_dev *F3) |
2384 | { | 2285 | { |
2385 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | 2286 | u32 value, mask = 0x3; /* UECC/CECC enable */ |
2287 | |||
2386 | 2288 | ||
2387 | if (!s->nbctl_valid) | 2289 | if (!s->nbctl_valid) |
2388 | return; | 2290 | return; |
2389 | 2291 | ||
2390 | amd64_read_pci_cfg(F3, K8_NBCTL, &value); | 2292 | amd64_read_pci_cfg(F3, NBCTL, &value); |
2391 | value &= ~mask; | 2293 | value &= ~mask; |
2392 | value |= s->old_nbctl; | 2294 | value |= s->old_nbctl; |
2393 | 2295 | ||
2394 | pci_write_config_dword(F3, K8_NBCTL, value); | 2296 | amd64_write_pci_cfg(F3, NBCTL, value); |
2395 | 2297 | ||
2396 | /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ | 2298 | /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ |
2397 | if (!s->flags.nb_ecc_prev) { | 2299 | if (!s->flags.nb_ecc_prev) { |
2398 | amd64_read_pci_cfg(F3, K8_NBCFG, &value); | 2300 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2399 | value &= ~K8_NBCFG_ECC_ENABLE; | 2301 | value &= ~NBCFG_ECC_ENABLE; |
2400 | pci_write_config_dword(F3, K8_NBCFG, value); | 2302 | amd64_write_pci_cfg(F3, NBCFG, value); |
2401 | } | 2303 | } |
2402 | 2304 | ||
2403 | /* restore the NB Enable MCGCTL bit */ | 2305 | /* restore the NB Enable MCGCTL bit */ |
@@ -2423,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid) | |||
2423 | u8 ecc_en = 0; | 2325 | u8 ecc_en = 0; |
2424 | bool nb_mce_en = false; | 2326 | bool nb_mce_en = false; |
2425 | 2327 | ||
2426 | amd64_read_pci_cfg(F3, K8_NBCFG, &value); | 2328 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2427 | 2329 | ||
2428 | ecc_en = !!(value & K8_NBCFG_ECC_ENABLE); | 2330 | ecc_en = !!(value & NBCFG_ECC_ENABLE); |
2429 | amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); | 2331 | amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); |
2430 | 2332 | ||
2431 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); | 2333 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); |
@@ -2463,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) | |||
2463 | mci->mc_driver_sysfs_attributes = sysfs_attrs; | 2365 | mci->mc_driver_sysfs_attributes = sysfs_attrs; |
2464 | } | 2366 | } |
2465 | 2367 | ||
2466 | static void setup_mci_misc_attrs(struct mem_ctl_info *mci) | 2368 | static void setup_mci_misc_attrs(struct mem_ctl_info *mci, |
2369 | struct amd64_family_type *fam) | ||
2467 | { | 2370 | { |
2468 | struct amd64_pvt *pvt = mci->pvt_info; | 2371 | struct amd64_pvt *pvt = mci->pvt_info; |
2469 | 2372 | ||
2470 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; | 2373 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; |
2471 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | 2374 | mci->edac_ctl_cap = EDAC_FLAG_NONE; |
2472 | 2375 | ||
2473 | if (pvt->nbcap & K8_NBCAP_SECDED) | 2376 | if (pvt->nbcap & NBCAP_SECDED) |
2474 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; | 2377 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; |
2475 | 2378 | ||
2476 | if (pvt->nbcap & K8_NBCAP_CHIPKILL) | 2379 | if (pvt->nbcap & NBCAP_CHIPKILL) |
2477 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; | 2380 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; |
2478 | 2381 | ||
2479 | mci->edac_cap = amd64_determine_edac_cap(pvt); | 2382 | mci->edac_cap = amd64_determine_edac_cap(pvt); |
2480 | mci->mod_name = EDAC_MOD_STR; | 2383 | mci->mod_name = EDAC_MOD_STR; |
2481 | mci->mod_ver = EDAC_AMD64_VERSION; | 2384 | mci->mod_ver = EDAC_AMD64_VERSION; |
2482 | mci->ctl_name = pvt->ctl_name; | 2385 | mci->ctl_name = fam->ctl_name; |
2483 | mci->dev_name = pci_name(pvt->F2); | 2386 | mci->dev_name = pci_name(pvt->F2); |
2484 | mci->ctl_page_to_phys = NULL; | 2387 | mci->ctl_page_to_phys = NULL; |
2485 | 2388 | ||
@@ -2500,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) | |||
2500 | case 0xf: | 2403 | case 0xf: |
2501 | fam_type = &amd64_family_types[K8_CPUS]; | 2404 | fam_type = &amd64_family_types[K8_CPUS]; |
2502 | pvt->ops = &amd64_family_types[K8_CPUS].ops; | 2405 | pvt->ops = &amd64_family_types[K8_CPUS].ops; |
2503 | pvt->ctl_name = fam_type->ctl_name; | ||
2504 | pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS; | ||
2505 | break; | 2406 | break; |
2407 | |||
2506 | case 0x10: | 2408 | case 0x10: |
2507 | fam_type = &amd64_family_types[F10_CPUS]; | 2409 | fam_type = &amd64_family_types[F10_CPUS]; |
2508 | pvt->ops = &amd64_family_types[F10_CPUS].ops; | 2410 | pvt->ops = &amd64_family_types[F10_CPUS].ops; |
2509 | pvt->ctl_name = fam_type->ctl_name; | 2411 | break; |
2510 | pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS; | 2412 | |
2413 | case 0x15: | ||
2414 | fam_type = &amd64_family_types[F15_CPUS]; | ||
2415 | pvt->ops = &amd64_family_types[F15_CPUS].ops; | ||
2511 | break; | 2416 | break; |
2512 | 2417 | ||
2513 | default: | 2418 | default: |
@@ -2517,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) | |||
2517 | 2422 | ||
2518 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | 2423 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
2519 | 2424 | ||
2520 | amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name, | 2425 | amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, |
2521 | (fam == 0xf ? | 2426 | (fam == 0xf ? |
2522 | (pvt->ext_model >= K8_REV_F ? "revF or later " | 2427 | (pvt->ext_model >= K8_REV_F ? "revF or later " |
2523 | : "revE or earlier ") | 2428 | : "revE or earlier ") |
@@ -2564,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2) | |||
2564 | goto err_siblings; | 2469 | goto err_siblings; |
2565 | 2470 | ||
2566 | ret = -ENOMEM; | 2471 | ret = -ENOMEM; |
2567 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid); | 2472 | mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); |
2568 | if (!mci) | 2473 | if (!mci) |
2569 | goto err_siblings; | 2474 | goto err_siblings; |
2570 | 2475 | ||
2571 | mci->pvt_info = pvt; | 2476 | mci->pvt_info = pvt; |
2572 | mci->dev = &pvt->F2->dev; | 2477 | mci->dev = &pvt->F2->dev; |
2573 | 2478 | ||
2574 | setup_mci_misc_attrs(mci); | 2479 | setup_mci_misc_attrs(mci, fam_type); |
2575 | 2480 | ||
2576 | if (init_csrows(mci)) | 2481 | if (init_csrows(mci)) |
2577 | mci->edac_cap = EDAC_FLAG_NONE; | 2482 | mci->edac_cap = EDAC_FLAG_NONE; |
@@ -2714,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |||
2714 | .class = 0, | 2619 | .class = 0, |
2715 | .class_mask = 0, | 2620 | .class_mask = 0, |
2716 | }, | 2621 | }, |
2622 | { | ||
2623 | .vendor = PCI_VENDOR_ID_AMD, | ||
2624 | .device = PCI_DEVICE_ID_AMD_15H_NB_F2, | ||
2625 | .subvendor = PCI_ANY_ID, | ||
2626 | .subdevice = PCI_ANY_ID, | ||
2627 | .class = 0, | ||
2628 | .class_mask = 0, | ||
2629 | }, | ||
2630 | |||
2717 | {0, } | 2631 | {0, } |
2718 | }; | 2632 | }; |
2719 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); | 2633 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); |
@@ -2754,7 +2668,7 @@ static int __init amd64_edac_init(void) | |||
2754 | { | 2668 | { |
2755 | int err = -ENODEV; | 2669 | int err = -ENODEV; |
2756 | 2670 | ||
2757 | edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); | 2671 | printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); |
2758 | 2672 | ||
2759 | opstate_init(); | 2673 | opstate_init(); |
2760 | 2674 | ||
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 613ec72b0f65..11be36a311eb 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -144,7 +144,7 @@ | |||
144 | * sections 3.5.4 and 3.5.5 for more information. | 144 | * sections 3.5.4 and 3.5.5 for more information. |
145 | */ | 145 | */ |
146 | 146 | ||
147 | #define EDAC_AMD64_VERSION "v3.3.0" | 147 | #define EDAC_AMD64_VERSION "3.4.0" |
148 | #define EDAC_MOD_STR "amd64_edac" | 148 | #define EDAC_MOD_STR "amd64_edac" |
149 | 149 | ||
150 | /* Extended Model from CPUID, for CPU Revision numbers */ | 150 | /* Extended Model from CPUID, for CPU Revision numbers */ |
@@ -153,85 +153,64 @@ | |||
153 | #define K8_REV_F 4 | 153 | #define K8_REV_F 4 |
154 | 154 | ||
155 | /* Hardware limit on ChipSelect rows per MC and processors per system */ | 155 | /* Hardware limit on ChipSelect rows per MC and processors per system */ |
156 | #define MAX_CS_COUNT 8 | 156 | #define NUM_CHIPSELECTS 8 |
157 | #define DRAM_REG_COUNT 8 | 157 | #define DRAM_RANGES 8 |
158 | 158 | ||
159 | #define ON true | 159 | #define ON true |
160 | #define OFF false | 160 | #define OFF false |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * Create a contiguous bitmask starting at bit position @lo and ending at | ||
164 | * position @hi. For example | ||
165 | * | ||
166 | * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000. | ||
167 | */ | ||
168 | #define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) | ||
169 | |||
170 | /* | ||
163 | * PCI-defined configuration space registers | 171 | * PCI-defined configuration space registers |
164 | */ | 172 | */ |
173 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 | ||
174 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 | ||
165 | 175 | ||
166 | 176 | ||
167 | /* | 177 | /* |
168 | * Function 1 - Address Map | 178 | * Function 1 - Address Map |
169 | */ | 179 | */ |
170 | #define K8_DRAM_BASE_LOW 0x40 | 180 | #define DRAM_BASE_LO 0x40 |
171 | #define K8_DRAM_LIMIT_LOW 0x44 | 181 | #define DRAM_LIMIT_LO 0x44 |
172 | #define K8_DHAR 0xf0 | ||
173 | |||
174 | #define DHAR_VALID BIT(0) | ||
175 | #define F10_DRAM_MEM_HOIST_VALID BIT(1) | ||
176 | 182 | ||
177 | #define DHAR_BASE_MASK 0xff000000 | 183 | #define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7)) |
178 | #define dhar_base(dhar) (dhar & DHAR_BASE_MASK) | 184 | #define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3)) |
185 | #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) | ||
186 | #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) | ||
179 | 187 | ||
180 | #define K8_DHAR_OFFSET_MASK 0x0000ff00 | 188 | #define DHAR 0xf0 |
181 | #define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16) | 189 | #define dhar_valid(pvt) ((pvt)->dhar & BIT(0)) |
190 | #define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1)) | ||
191 | #define dhar_base(pvt) ((pvt)->dhar & 0xff000000) | ||
192 | #define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16) | ||
182 | 193 | ||
183 | #define F10_DHAR_OFFSET_MASK 0x0000ff80 | ||
184 | /* NOTE: Extra mask bit vs K8 */ | 194 | /* NOTE: Extra mask bit vs K8 */ |
185 | #define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16) | 195 | #define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16) |
186 | 196 | ||
197 | #define DCT_CFG_SEL 0x10C | ||
187 | 198 | ||
188 | /* F10 High BASE/LIMIT registers */ | 199 | #define DRAM_BASE_HI 0x140 |
189 | #define F10_DRAM_BASE_HIGH 0x140 | 200 | #define DRAM_LIMIT_HI 0x144 |
190 | #define F10_DRAM_LIMIT_HIGH 0x144 | ||
191 | 201 | ||
192 | 202 | ||
193 | /* | 203 | /* |
194 | * Function 2 - DRAM controller | 204 | * Function 2 - DRAM controller |
195 | */ | 205 | */ |
196 | #define K8_DCSB0 0x40 | 206 | #define DCSB0 0x40 |
197 | #define F10_DCSB1 0x140 | 207 | #define DCSB1 0x140 |
208 | #define DCSB_CS_ENABLE BIT(0) | ||
198 | 209 | ||
199 | #define K8_DCSB_CS_ENABLE BIT(0) | 210 | #define DCSM0 0x60 |
200 | #define K8_DCSB_NPT_SPARE BIT(1) | 211 | #define DCSM1 0x160 |
201 | #define K8_DCSB_NPT_TESTFAIL BIT(2) | ||
202 | 212 | ||
203 | /* | 213 | #define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) |
204 | * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form | ||
205 | * the address | ||
206 | */ | ||
207 | #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) | ||
208 | #define REV_E_DCS_SHIFT 4 | ||
209 | |||
210 | #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) | ||
211 | #define REV_F_F1Xh_DCS_SHIFT 8 | ||
212 | |||
213 | /* | ||
214 | * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount | ||
215 | * to form the address | ||
216 | */ | ||
217 | #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) | ||
218 | #define REV_F_DCS_SHIFT 8 | ||
219 | |||
220 | /* DRAM CS Mask Registers */ | ||
221 | #define K8_DCSM0 0x60 | ||
222 | #define F10_DCSM1 0x160 | ||
223 | |||
224 | /* REV E: select [29:21] and [15:9] from DCSM */ | ||
225 | #define REV_E_DCSM_MASK_BITS 0x3FE0FE00 | ||
226 | |||
227 | /* unused bits [24:20] and [12:0] */ | ||
228 | #define REV_E_DCS_NOTUSED_BITS 0x01F01FFF | ||
229 | |||
230 | /* REV F and later: select [28:19] and [13:5] from DCSM */ | ||
231 | #define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0 | ||
232 | |||
233 | /* unused bits [26:22] and [12:0] */ | ||
234 | #define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF | ||
235 | 214 | ||
236 | #define DBAM0 0x80 | 215 | #define DBAM0 0x80 |
237 | #define DBAM1 0x180 | 216 | #define DBAM1 0x180 |
@@ -241,148 +220,84 @@ | |||
241 | 220 | ||
242 | #define DBAM_MAX_VALUE 11 | 221 | #define DBAM_MAX_VALUE 11 |
243 | 222 | ||
244 | 223 | #define DCLR0 0x90 | |
245 | #define F10_DCLR_0 0x90 | 224 | #define DCLR1 0x190 |
246 | #define F10_DCLR_1 0x190 | ||
247 | #define REVE_WIDTH_128 BIT(16) | 225 | #define REVE_WIDTH_128 BIT(16) |
248 | #define F10_WIDTH_128 BIT(11) | 226 | #define WIDTH_128 BIT(11) |
249 | 227 | ||
228 | #define DCHR0 0x94 | ||
229 | #define DCHR1 0x194 | ||
230 | #define DDR3_MODE BIT(8) | ||
250 | 231 | ||
251 | #define F10_DCHR_0 0x94 | 232 | #define DCT_SEL_LO 0x110 |
252 | #define F10_DCHR_1 0x194 | 233 | #define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800) |
234 | #define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3) | ||
235 | #define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0)) | ||
236 | #define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2)) | ||
253 | 237 | ||
254 | #define F10_DCHR_FOUR_RANK_DIMM BIT(18) | 238 | #define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4))) |
255 | #define DDR3_MODE BIT(8) | ||
256 | #define F10_DCHR_MblMode BIT(6) | ||
257 | 239 | ||
240 | #define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5)) | ||
241 | #define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10)) | ||
258 | 242 | ||
259 | #define F10_DCTL_SEL_LOW 0x110 | 243 | #define SWAP_INTLV_REG 0x10c |
260 | #define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800) | ||
261 | #define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3) | ||
262 | #define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0)) | ||
263 | #define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2)) | ||
264 | #define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4)) | ||
265 | #define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5)) | ||
266 | #define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8)) | ||
267 | #define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10)) | ||
268 | 244 | ||
269 | #define F10_DCTL_SEL_HIGH 0x114 | 245 | #define DCT_SEL_HI 0x114 |
270 | 246 | ||
271 | /* | 247 | /* |
272 | * Function 3 - Misc Control | 248 | * Function 3 - Misc Control |
273 | */ | 249 | */ |
274 | #define K8_NBCTL 0x40 | 250 | #define NBCTL 0x40 |
275 | |||
276 | /* Correctable ECC error reporting enable */ | ||
277 | #define K8_NBCTL_CECCEn BIT(0) | ||
278 | |||
279 | /* UnCorrectable ECC error reporting enable */ | ||
280 | #define K8_NBCTL_UECCEn BIT(1) | ||
281 | 251 | ||
282 | #define K8_NBCFG 0x44 | 252 | #define NBCFG 0x44 |
283 | #define K8_NBCFG_CHIPKILL BIT(23) | 253 | #define NBCFG_CHIPKILL BIT(23) |
284 | #define K8_NBCFG_ECC_ENABLE BIT(22) | 254 | #define NBCFG_ECC_ENABLE BIT(22) |
285 | 255 | ||
286 | #define K8_NBSL 0x48 | 256 | /* F3x48: NBSL */ |
287 | |||
288 | |||
289 | /* Family F10h: Normalized Extended Error Codes */ | ||
290 | #define F10_NBSL_EXT_ERR_RES 0x0 | ||
291 | #define F10_NBSL_EXT_ERR_ECC 0x8 | 257 | #define F10_NBSL_EXT_ERR_ECC 0x8 |
258 | #define NBSL_PP_OBS 0x2 | ||
292 | 259 | ||
293 | /* Next two are overloaded values */ | 260 | #define SCRCTRL 0x58 |
294 | #define F10_NBSL_EXT_ERR_LINK_PROTO 0xB | ||
295 | #define F10_NBSL_EXT_ERR_L3_PROTO 0xB | ||
296 | |||
297 | #define F10_NBSL_EXT_ERR_NB_ARRAY 0xC | ||
298 | #define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD | ||
299 | #define F10_NBSL_EXT_ERR_LINK_RETRY 0xE | ||
300 | |||
301 | /* Next two are overloaded values */ | ||
302 | #define F10_NBSL_EXT_ERR_GART_WALK 0xF | ||
303 | #define F10_NBSL_EXT_ERR_DEV_WALK 0xF | ||
304 | |||
305 | /* 0x10 to 0x1B: Reserved */ | ||
306 | #define F10_NBSL_EXT_ERR_L3_DATA 0x1C | ||
307 | #define F10_NBSL_EXT_ERR_L3_TAG 0x1D | ||
308 | #define F10_NBSL_EXT_ERR_L3_LRU 0x1E | ||
309 | |||
310 | /* K8: Normalized Extended Error Codes */ | ||
311 | #define K8_NBSL_EXT_ERR_ECC 0x0 | ||
312 | #define K8_NBSL_EXT_ERR_CRC 0x1 | ||
313 | #define K8_NBSL_EXT_ERR_SYNC 0x2 | ||
314 | #define K8_NBSL_EXT_ERR_MST 0x3 | ||
315 | #define K8_NBSL_EXT_ERR_TGT 0x4 | ||
316 | #define K8_NBSL_EXT_ERR_GART 0x5 | ||
317 | #define K8_NBSL_EXT_ERR_RMW 0x6 | ||
318 | #define K8_NBSL_EXT_ERR_WDT 0x7 | ||
319 | #define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8 | ||
320 | #define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD | ||
321 | |||
322 | /* | ||
323 | * The following are for BUS type errors AFTER values have been normalized by | ||
324 | * shifting right | ||
325 | */ | ||
326 | #define K8_NBSL_PP_SRC 0x0 | ||
327 | #define K8_NBSL_PP_RES 0x1 | ||
328 | #define K8_NBSL_PP_OBS 0x2 | ||
329 | #define K8_NBSL_PP_GENERIC 0x3 | ||
330 | |||
331 | #define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF) | ||
332 | |||
333 | #define K8_NBEAL 0x50 | ||
334 | #define K8_NBEAH 0x54 | ||
335 | #define K8_SCRCTRL 0x58 | ||
336 | |||
337 | #define F10_NB_CFG_LOW 0x88 | ||
338 | 261 | ||
339 | #define F10_ONLINE_SPARE 0xB0 | 262 | #define F10_ONLINE_SPARE 0xB0 |
340 | #define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) | 263 | #define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1) |
341 | #define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3)) | 264 | #define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7) |
342 | #define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007) | ||
343 | #define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007) | ||
344 | 265 | ||
345 | #define F10_NB_ARRAY_ADDR 0xB8 | 266 | #define F10_NB_ARRAY_ADDR 0xB8 |
346 | 267 | #define F10_NB_ARRAY_DRAM_ECC BIT(31) | |
347 | #define F10_NB_ARRAY_DRAM_ECC 0x80000000 | ||
348 | 268 | ||
349 | /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ | 269 | /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ |
350 | #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) | 270 | #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) |
351 | 271 | ||
352 | #define F10_NB_ARRAY_DATA 0xBC | 272 | #define F10_NB_ARRAY_DATA 0xBC |
353 | |||
354 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ | 273 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ |
355 | (BIT(((word) & 0xF) + 20) | \ | 274 | (BIT(((word) & 0xF) + 20) | \ |
356 | BIT(17) | bits) | 275 | BIT(17) | bits) |
357 | |||
358 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ | 276 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ |
359 | (BIT(((word) & 0xF) + 20) | \ | 277 | (BIT(((word) & 0xF) + 20) | \ |
360 | BIT(16) | bits) | 278 | BIT(16) | bits) |
361 | 279 | ||
362 | #define K8_NBCAP 0xE8 | 280 | #define NBCAP 0xE8 |
363 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) | 281 | #define NBCAP_CHIPKILL BIT(4) |
364 | #define K8_NBCAP_CHIPKILL BIT(4) | 282 | #define NBCAP_SECDED BIT(3) |
365 | #define K8_NBCAP_SECDED BIT(3) | 283 | #define NBCAP_DCT_DUAL BIT(0) |
366 | #define K8_NBCAP_DCT_DUAL BIT(0) | ||
367 | 284 | ||
368 | #define EXT_NB_MCA_CFG 0x180 | 285 | #define EXT_NB_MCA_CFG 0x180 |
369 | 286 | ||
370 | /* MSRs */ | 287 | /* MSRs */ |
371 | #define K8_MSR_MCGCTL_NBE BIT(4) | 288 | #define MSR_MCGCTL_NBE BIT(4) |
372 | |||
373 | #define K8_MSR_MC4CTL 0x0410 | ||
374 | #define K8_MSR_MC4STAT 0x0411 | ||
375 | #define K8_MSR_MC4ADDR 0x0412 | ||
376 | 289 | ||
377 | /* AMD sets the first MC device at device ID 0x18. */ | 290 | /* AMD sets the first MC device at device ID 0x18. */ |
378 | static inline int get_node_id(struct pci_dev *pdev) | 291 | static inline u8 get_node_id(struct pci_dev *pdev) |
379 | { | 292 | { |
380 | return PCI_SLOT(pdev->devfn) - 0x18; | 293 | return PCI_SLOT(pdev->devfn) - 0x18; |
381 | } | 294 | } |
382 | 295 | ||
383 | enum amd64_chipset_families { | 296 | enum amd_families { |
384 | K8_CPUS = 0, | 297 | K8_CPUS = 0, |
385 | F10_CPUS, | 298 | F10_CPUS, |
299 | F15_CPUS, | ||
300 | NUM_FAMILIES, | ||
386 | }; | 301 | }; |
387 | 302 | ||
388 | /* Error injection control structure */ | 303 | /* Error injection control structure */ |
@@ -392,13 +307,35 @@ struct error_injection { | |||
392 | u32 bit_map; | 307 | u32 bit_map; |
393 | }; | 308 | }; |
394 | 309 | ||
310 | /* low and high part of PCI config space regs */ | ||
311 | struct reg_pair { | ||
312 | u32 lo, hi; | ||
313 | }; | ||
314 | |||
315 | /* | ||
316 | * See F1x[1, 0][7C:40] DRAM Base/Limit Registers | ||
317 | */ | ||
318 | struct dram_range { | ||
319 | struct reg_pair base; | ||
320 | struct reg_pair lim; | ||
321 | }; | ||
322 | |||
323 | /* A DCT chip selects collection */ | ||
324 | struct chip_select { | ||
325 | u32 csbases[NUM_CHIPSELECTS]; | ||
326 | u8 b_cnt; | ||
327 | |||
328 | u32 csmasks[NUM_CHIPSELECTS]; | ||
329 | u8 m_cnt; | ||
330 | }; | ||
331 | |||
395 | struct amd64_pvt { | 332 | struct amd64_pvt { |
396 | struct low_ops *ops; | 333 | struct low_ops *ops; |
397 | 334 | ||
398 | /* pci_device handles which we utilize */ | 335 | /* pci_device handles which we utilize */ |
399 | struct pci_dev *F1, *F2, *F3; | 336 | struct pci_dev *F1, *F2, *F3; |
400 | 337 | ||
401 | int mc_node_id; /* MC index of this MC node */ | 338 | unsigned mc_node_id; /* MC index of this MC node */ |
402 | int ext_model; /* extended model value of this node */ | 339 | int ext_model; /* extended model value of this node */ |
403 | int channel_count; | 340 | int channel_count; |
404 | 341 | ||
@@ -414,60 +351,50 @@ struct amd64_pvt { | |||
414 | u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ | 351 | u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ |
415 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ | 352 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ |
416 | 353 | ||
417 | /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ | 354 | /* one for each DCT */ |
418 | u32 dcsb0[MAX_CS_COUNT]; | 355 | struct chip_select csels[2]; |
419 | u32 dcsb1[MAX_CS_COUNT]; | 356 | |
420 | 357 | /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ | |
421 | /* DRAM CS Mask Registers F2x[1,0][6C:60] */ | 358 | struct dram_range ranges[DRAM_RANGES]; |
422 | u32 dcsm0[MAX_CS_COUNT]; | ||
423 | u32 dcsm1[MAX_CS_COUNT]; | ||
424 | |||
425 | /* | ||
426 | * Decoded parts of DRAM BASE and LIMIT Registers | ||
427 | * F1x[78,70,68,60,58,50,48,40] | ||
428 | */ | ||
429 | u64 dram_base[DRAM_REG_COUNT]; | ||
430 | u64 dram_limit[DRAM_REG_COUNT]; | ||
431 | u8 dram_IntlvSel[DRAM_REG_COUNT]; | ||
432 | u8 dram_IntlvEn[DRAM_REG_COUNT]; | ||
433 | u8 dram_DstNode[DRAM_REG_COUNT]; | ||
434 | u8 dram_rw_en[DRAM_REG_COUNT]; | ||
435 | |||
436 | /* | ||
437 | * The following fields are set at (load) run time, after CPU revision | ||
438 | * has been determined, since the dct_base and dct_mask registers vary | ||
439 | * based on revision | ||
440 | */ | ||
441 | u32 dcsb_base; /* DCSB base bits */ | ||
442 | u32 dcsm_mask; /* DCSM mask bits */ | ||
443 | u32 cs_count; /* num chip selects (== num DCSB registers) */ | ||
444 | u32 num_dcsm; /* Number of DCSM registers */ | ||
445 | u32 dcs_mask_notused; /* DCSM notused mask bits */ | ||
446 | u32 dcs_shift; /* DCSB and DCSM shift value */ | ||
447 | 359 | ||
448 | u64 top_mem; /* top of memory below 4GB */ | 360 | u64 top_mem; /* top of memory below 4GB */ |
449 | u64 top_mem2; /* top of memory above 4GB */ | 361 | u64 top_mem2; /* top of memory above 4GB */ |
450 | 362 | ||
451 | u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */ | 363 | u32 dct_sel_lo; /* DRAM Controller Select Low */ |
452 | u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ | 364 | u32 dct_sel_hi; /* DRAM Controller Select High */ |
453 | u32 online_spare; /* On-Line spare Reg */ | 365 | u32 online_spare; /* On-Line spare Reg */ |
454 | 366 | ||
455 | /* x4 or x8 syndromes in use */ | 367 | /* x4 or x8 syndromes in use */ |
456 | u8 syn_type; | 368 | u8 ecc_sym_sz; |
457 | |||
458 | /* temp storage for when input is received from sysfs */ | ||
459 | struct err_regs ctl_error_info; | ||
460 | 369 | ||
461 | /* place to store error injection parameters prior to issue */ | 370 | /* place to store error injection parameters prior to issue */ |
462 | struct error_injection injection; | 371 | struct error_injection injection; |
372 | }; | ||
463 | 373 | ||
464 | /* DCT per-family scrubrate setting */ | 374 | static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i) |
465 | u32 min_scrubrate; | 375 | { |
376 | u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8; | ||
466 | 377 | ||
467 | /* family name this instance is running on */ | 378 | if (boot_cpu_data.x86 == 0xf) |
468 | const char *ctl_name; | 379 | return addr; |
469 | 380 | ||
470 | }; | 381 | return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr; |
382 | } | ||
383 | |||
384 | static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i) | ||
385 | { | ||
386 | u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; | ||
387 | |||
388 | if (boot_cpu_data.x86 == 0xf) | ||
389 | return lim; | ||
390 | |||
391 | return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim; | ||
392 | } | ||
393 | |||
394 | static inline u16 extract_syndrome(u64 status) | ||
395 | { | ||
396 | return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00); | ||
397 | } | ||
471 | 398 | ||
472 | /* | 399 | /* |
473 | * per-node ECC settings descriptor | 400 | * per-node ECC settings descriptor |
@@ -482,14 +409,6 @@ struct ecc_settings { | |||
482 | } flags; | 409 | } flags; |
483 | }; | 410 | }; |
484 | 411 | ||
485 | extern const char *tt_msgs[4]; | ||
486 | extern const char *ll_msgs[4]; | ||
487 | extern const char *rrrr_msgs[16]; | ||
488 | extern const char *to_msgs[2]; | ||
489 | extern const char *pp_msgs[4]; | ||
490 | extern const char *ii_msgs[4]; | ||
491 | extern const char *htlink_msgs[8]; | ||
492 | |||
493 | #ifdef CONFIG_EDAC_DEBUG | 412 | #ifdef CONFIG_EDAC_DEBUG |
494 | #define NUM_DBG_ATTRS 5 | 413 | #define NUM_DBG_ATTRS 5 |
495 | #else | 414 | #else |
@@ -511,14 +430,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], | |||
511 | */ | 430 | */ |
512 | struct low_ops { | 431 | struct low_ops { |
513 | int (*early_channel_count) (struct amd64_pvt *pvt); | 432 | int (*early_channel_count) (struct amd64_pvt *pvt); |
514 | 433 | void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, | |
515 | u64 (*get_error_address) (struct mem_ctl_info *mci, | 434 | u16 syndrome); |
516 | struct err_regs *info); | 435 | int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); |
517 | void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram); | 436 | int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, |
518 | void (*read_dram_ctl_register) (struct amd64_pvt *pvt); | 437 | u32 *val, const char *func); |
519 | void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, | ||
520 | struct err_regs *info, u64 SystemAddr); | ||
521 | int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); | ||
522 | }; | 438 | }; |
523 | 439 | ||
524 | struct amd64_family_type { | 440 | struct amd64_family_type { |
@@ -527,28 +443,17 @@ struct amd64_family_type { | |||
527 | struct low_ops ops; | 443 | struct low_ops ops; |
528 | }; | 444 | }; |
529 | 445 | ||
530 | static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | 446 | int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, |
531 | u32 *val, const char *func) | 447 | u32 val, const char *func); |
532 | { | ||
533 | int err = 0; | ||
534 | |||
535 | err = pci_read_config_dword(pdev, offset, val); | ||
536 | if (err) | ||
537 | amd64_warn("%s: error reading F%dx%x.\n", | ||
538 | func, PCI_FUNC(pdev->devfn), offset); | ||
539 | |||
540 | return err; | ||
541 | } | ||
542 | 448 | ||
543 | #define amd64_read_pci_cfg(pdev, offset, val) \ | 449 | #define amd64_read_pci_cfg(pdev, offset, val) \ |
544 | amd64_read_pci_cfg_dword(pdev, offset, val, __func__) | 450 | __amd64_read_pci_cfg_dword(pdev, offset, val, __func__) |
545 | 451 | ||
546 | /* | 452 | #define amd64_write_pci_cfg(pdev, offset, val) \ |
547 | * For future CPU versions, verify the following as new 'slow' rates appear and | 453 | __amd64_write_pci_cfg_dword(pdev, offset, val, __func__) |
548 | * modify the necessary skip values for the supported CPU. | 454 | |
549 | */ | 455 | #define amd64_read_dct_pci_cfg(pvt, offset, val) \ |
550 | #define K8_MIN_SCRUB_RATE_BITS 0x0 | 456 | pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__) |
551 | #define F10_MIN_SCRUB_RATE_BITS 0x5 | ||
552 | 457 | ||
553 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | 458 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, |
554 | u64 *hole_offset, u64 *hole_size); | 459 | u64 *hole_offset, u64 *hole_size); |
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index 688478de1cbd..303f10e03dda 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c | |||
@@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, | |||
117 | /* Form value to choose 16-byte section of cacheline */ | 117 | /* Form value to choose 16-byte section of cacheline */ |
118 | section = F10_NB_ARRAY_DRAM_ECC | | 118 | section = F10_NB_ARRAY_DRAM_ECC | |
119 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); | 119 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); |
120 | pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section); | 120 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); |
121 | 121 | ||
122 | word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, | 122 | word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, |
123 | pvt->injection.bit_map); | 123 | pvt->injection.bit_map); |
124 | 124 | ||
125 | /* Issue 'word' and 'bit' along with the READ request */ | 125 | /* Issue 'word' and 'bit' along with the READ request */ |
126 | pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits); | 126 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); |
127 | 127 | ||
128 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); | 128 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); |
129 | 129 | ||
@@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, | |||
150 | /* Form value to choose 16-byte section of cacheline */ | 150 | /* Form value to choose 16-byte section of cacheline */ |
151 | section = F10_NB_ARRAY_DRAM_ECC | | 151 | section = F10_NB_ARRAY_DRAM_ECC | |
152 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); | 152 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); |
153 | pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section); | 153 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); |
154 | 154 | ||
155 | word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, | 155 | word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, |
156 | pvt->injection.bit_map); | 156 | pvt->injection.bit_map); |
157 | 157 | ||
158 | /* Issue 'word' and 'bit' along with the READ request */ | 158 | /* Issue 'word' and 'bit' along with the READ request */ |
159 | pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits); | 159 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); |
160 | 160 | ||
161 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); | 161 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); |
162 | 162 | ||
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 39d97cfdf58c..73196f7b7229 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -785,10 +785,10 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, | |||
785 | { | 785 | { |
786 | int err; | 786 | int err; |
787 | 787 | ||
788 | debugf1("%s()\n", __func__); | 788 | debugf4("%s()\n", __func__); |
789 | 789 | ||
790 | while (sysfs_attrib) { | 790 | while (sysfs_attrib) { |
791 | debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); | 791 | debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); |
792 | if (sysfs_attrib->grp) { | 792 | if (sysfs_attrib->grp) { |
793 | struct mcidev_sysfs_group_kobj *grp_kobj; | 793 | struct mcidev_sysfs_group_kobj *grp_kobj; |
794 | 794 | ||
@@ -818,7 +818,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, | |||
818 | if (err < 0) | 818 | if (err < 0) |
819 | return err; | 819 | return err; |
820 | } else if (sysfs_attrib->attr.name) { | 820 | } else if (sysfs_attrib->attr.name) { |
821 | debugf0("%s() file %s\n", __func__, | 821 | debugf4("%s() file %s\n", __func__, |
822 | sysfs_attrib->attr.name); | 822 | sysfs_attrib->attr.name); |
823 | 823 | ||
824 | err = sysfs_create_file(kobj, &sysfs_attrib->attr); | 824 | err = sysfs_create_file(kobj, &sysfs_attrib->attr); |
@@ -853,26 +853,26 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, | |||
853 | * Remove first all the atributes | 853 | * Remove first all the atributes |
854 | */ | 854 | */ |
855 | while (sysfs_attrib) { | 855 | while (sysfs_attrib) { |
856 | debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); | 856 | debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); |
857 | if (sysfs_attrib->grp) { | 857 | if (sysfs_attrib->grp) { |
858 | debugf1("%s() seeking for group %s\n", | 858 | debugf4("%s() seeking for group %s\n", |
859 | __func__, sysfs_attrib->grp->name); | 859 | __func__, sysfs_attrib->grp->name); |
860 | list_for_each_entry(grp_kobj, | 860 | list_for_each_entry(grp_kobj, |
861 | &mci->grp_kobj_list, list) { | 861 | &mci->grp_kobj_list, list) { |
862 | debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); | 862 | debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); |
863 | if (grp_kobj->grp == sysfs_attrib->grp) { | 863 | if (grp_kobj->grp == sysfs_attrib->grp) { |
864 | edac_remove_mci_instance_attributes(mci, | 864 | edac_remove_mci_instance_attributes(mci, |
865 | grp_kobj->grp->mcidev_attr, | 865 | grp_kobj->grp->mcidev_attr, |
866 | &grp_kobj->kobj, count + 1); | 866 | &grp_kobj->kobj, count + 1); |
867 | debugf0("%s() group %s\n", __func__, | 867 | debugf4("%s() group %s\n", __func__, |
868 | sysfs_attrib->grp->name); | 868 | sysfs_attrib->grp->name); |
869 | kobject_put(&grp_kobj->kobj); | 869 | kobject_put(&grp_kobj->kobj); |
870 | } | 870 | } |
871 | } | 871 | } |
872 | debugf1("%s() end of seeking for group %s\n", | 872 | debugf4("%s() end of seeking for group %s\n", |
873 | __func__, sysfs_attrib->grp->name); | 873 | __func__, sysfs_attrib->grp->name); |
874 | } else if (sysfs_attrib->attr.name) { | 874 | } else if (sysfs_attrib->attr.name) { |
875 | debugf0("%s() file %s\n", __func__, | 875 | debugf4("%s() file %s\n", __func__, |
876 | sysfs_attrib->attr.name); | 876 | sysfs_attrib->attr.name); |
877 | sysfs_remove_file(kobj, &sysfs_attrib->attr); | 877 | sysfs_remove_file(kobj, &sysfs_attrib->attr); |
878 | } else | 878 | } else |
@@ -979,7 +979,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
979 | debugf0("%s()\n", __func__); | 979 | debugf0("%s()\n", __func__); |
980 | 980 | ||
981 | /* remove all csrow kobjects */ | 981 | /* remove all csrow kobjects */ |
982 | debugf0("%s() unregister this mci kobj\n", __func__); | 982 | debugf4("%s() unregister this mci kobj\n", __func__); |
983 | for (i = 0; i < mci->nr_csrows; i++) { | 983 | for (i = 0; i < mci->nr_csrows; i++) { |
984 | if (mci->csrows[i].nr_pages > 0) { | 984 | if (mci->csrows[i].nr_pages > 0) { |
985 | debugf0("%s() unreg csrow-%d\n", __func__, i); | 985 | debugf0("%s() unreg csrow-%d\n", __func__, i); |
@@ -989,18 +989,18 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
989 | 989 | ||
990 | /* remove this mci instance's attribtes */ | 990 | /* remove this mci instance's attribtes */ |
991 | if (mci->mc_driver_sysfs_attributes) { | 991 | if (mci->mc_driver_sysfs_attributes) { |
992 | debugf0("%s() unregister mci private attributes\n", __func__); | 992 | debugf4("%s() unregister mci private attributes\n", __func__); |
993 | edac_remove_mci_instance_attributes(mci, | 993 | edac_remove_mci_instance_attributes(mci, |
994 | mci->mc_driver_sysfs_attributes, | 994 | mci->mc_driver_sysfs_attributes, |
995 | &mci->edac_mci_kobj, 0); | 995 | &mci->edac_mci_kobj, 0); |
996 | } | 996 | } |
997 | 997 | ||
998 | /* remove the symlink */ | 998 | /* remove the symlink */ |
999 | debugf0("%s() remove_link\n", __func__); | 999 | debugf4("%s() remove_link\n", __func__); |
1000 | sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); | 1000 | sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); |
1001 | 1001 | ||
1002 | /* unregister this instance's kobject */ | 1002 | /* unregister this instance's kobject */ |
1003 | debugf0("%s() remove_mci_instance\n", __func__); | 1003 | debugf4("%s() remove_mci_instance\n", __func__); |
1004 | kobject_put(&mci->edac_mci_kobj); | 1004 | kobject_put(&mci->edac_mci_kobj); |
1005 | } | 1005 | } |
1006 | 1006 | ||
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index f6cf73d93359..795cfbc0bf50 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c | |||
@@ -594,6 +594,7 @@ static bool nb_noop_mce(u16 ec, u8 xec) | |||
594 | 594 | ||
595 | void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) | 595 | void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) |
596 | { | 596 | { |
597 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
597 | u16 ec = EC(m->status); | 598 | u16 ec = EC(m->status); |
598 | u8 xec = XEC(m->status, 0x1f); | 599 | u8 xec = XEC(m->status, 0x1f); |
599 | u32 nbsh = (u32)(m->status >> 32); | 600 | u32 nbsh = (u32)(m->status >> 32); |
@@ -602,9 +603,8 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) | |||
602 | pr_emerg(HW_ERR "Northbridge Error (node %d", node_id); | 603 | pr_emerg(HW_ERR "Northbridge Error (node %d", node_id); |
603 | 604 | ||
604 | /* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */ | 605 | /* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */ |
605 | if ((boot_cpu_data.x86 == 0x10) && | 606 | if (c->x86 == 0x10 && c->x86_model > 7) { |
606 | (boot_cpu_data.x86_model > 7)) { | 607 | if (nbsh & NBSH_ERR_CPU_VAL) |
607 | if (nbsh & K8_NBSH_ERR_CPU_VAL) | ||
608 | core = nbsh & nb_err_cpumask; | 608 | core = nbsh & nb_err_cpumask; |
609 | } else { | 609 | } else { |
610 | u8 assoc_cpus = nbsh & nb_err_cpumask; | 610 | u8 assoc_cpus = nbsh & nb_err_cpumask; |
@@ -646,7 +646,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) | |||
646 | if (!fam_ops->nb_mce(ec, xec)) | 646 | if (!fam_ops->nb_mce(ec, xec)) |
647 | goto wrong_nb_mce; | 647 | goto wrong_nb_mce; |
648 | 648 | ||
649 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) | 649 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15) |
650 | if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) | 650 | if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) |
651 | nb_bus_decoder(node_id, m, nbcfg); | 651 | nb_bus_decoder(node_id, m, nbcfg); |
652 | 652 | ||
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h index 45dda47173f2..795a3206acf5 100644 --- a/drivers/edac/mce_amd.h +++ b/drivers/edac/mce_amd.h | |||
@@ -31,19 +31,10 @@ | |||
31 | #define R4(x) (((x) >> 4) & 0xf) | 31 | #define R4(x) (((x) >> 4) & 0xf) |
32 | #define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") | 32 | #define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") |
33 | 33 | ||
34 | #define K8_NBSH 0x4C | 34 | /* |
35 | 35 | * F3x4C bits (MCi_STATUS' high half) | |
36 | #define K8_NBSH_VALID_BIT BIT(31) | 36 | */ |
37 | #define K8_NBSH_OVERFLOW BIT(30) | 37 | #define NBSH_ERR_CPU_VAL BIT(24) |
38 | #define K8_NBSH_UC_ERR BIT(29) | ||
39 | #define K8_NBSH_ERR_EN BIT(28) | ||
40 | #define K8_NBSH_MISCV BIT(27) | ||
41 | #define K8_NBSH_VALID_ERROR_ADDR BIT(26) | ||
42 | #define K8_NBSH_PCC BIT(25) | ||
43 | #define K8_NBSH_ERR_CPU_VAL BIT(24) | ||
44 | #define K8_NBSH_CECC BIT(14) | ||
45 | #define K8_NBSH_UECC BIT(13) | ||
46 | #define K8_NBSH_ERR_SCRUBER BIT(8) | ||
47 | 38 | ||
48 | enum tt_ids { | 39 | enum tt_ids { |
49 | TT_INSTR = 0, | 40 | TT_INSTR = 0, |
@@ -86,17 +77,6 @@ extern const char *to_msgs[]; | |||
86 | extern const char *ii_msgs[]; | 77 | extern const char *ii_msgs[]; |
87 | 78 | ||
88 | /* | 79 | /* |
89 | * relevant NB regs | ||
90 | */ | ||
91 | struct err_regs { | ||
92 | u32 nbcfg; | ||
93 | u32 nbsh; | ||
94 | u32 nbsl; | ||
95 | u32 nbeah; | ||
96 | u32 nbeal; | ||
97 | }; | ||
98 | |||
99 | /* | ||
100 | * per-family decoder ops | 80 | * per-family decoder ops |
101 | */ | 81 | */ |
102 | struct amd_decoder_ops { | 82 | struct amd_decoder_ops { |