diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/edac/amd64_edac.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/edac/amd64_edac.c')
-rw-r--r-- | drivers/edac/amd64_edac.c | 2196 |
1 files changed, 1016 insertions, 1180 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index e7d5d6b5dcf6..9a8bebcf6b17 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include "amd64_edac.h" | 1 | #include "amd64_edac.h" |
2 | #include <asm/k8.h> | 2 | #include <asm/amd_nb.h> |
3 | 3 | ||
4 | static struct edac_pci_ctl_info *amd64_ctl_pci; | 4 | static struct edac_pci_ctl_info *amd64_ctl_pci; |
5 | 5 | ||
@@ -15,55 +15,14 @@ module_param(ecc_enable_override, int, 0644); | |||
15 | 15 | ||
16 | static struct msr __percpu *msrs; | 16 | static struct msr __percpu *msrs; |
17 | 17 | ||
18 | /* Lookup table for all possible MC control instances */ | ||
19 | struct amd64_pvt; | ||
20 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; | ||
21 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; | ||
22 | |||
23 | /* | 18 | /* |
24 | * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and | 19 | * count successfully initialized driver instances for setup_pci_device() |
25 | * later. | ||
26 | */ | 20 | */ |
27 | static int ddr2_dbam_revCG[] = { | 21 | static atomic_t drv_instances = ATOMIC_INIT(0); |
28 | [0] = 32, | ||
29 | [1] = 64, | ||
30 | [2] = 128, | ||
31 | [3] = 256, | ||
32 | [4] = 512, | ||
33 | [5] = 1024, | ||
34 | [6] = 2048, | ||
35 | }; | ||
36 | |||
37 | static int ddr2_dbam_revD[] = { | ||
38 | [0] = 32, | ||
39 | [1] = 64, | ||
40 | [2 ... 3] = 128, | ||
41 | [4] = 256, | ||
42 | [5] = 512, | ||
43 | [6] = 256, | ||
44 | [7] = 512, | ||
45 | [8 ... 9] = 1024, | ||
46 | [10] = 2048, | ||
47 | }; | ||
48 | 22 | ||
49 | static int ddr2_dbam[] = { [0] = 128, | 23 | /* Per-node driver instances */ |
50 | [1] = 256, | 24 | static struct mem_ctl_info **mcis; |
51 | [2 ... 4] = 512, | 25 | static struct ecc_settings **ecc_stngs; |
52 | [5 ... 6] = 1024, | ||
53 | [7 ... 8] = 2048, | ||
54 | [9 ... 10] = 4096, | ||
55 | [11] = 8192, | ||
56 | }; | ||
57 | |||
58 | static int ddr3_dbam[] = { [0] = -1, | ||
59 | [1] = 256, | ||
60 | [2] = 512, | ||
61 | [3 ... 4] = -1, | ||
62 | [5 ... 6] = 1024, | ||
63 | [7 ... 8] = 2048, | ||
64 | [9 ... 10] = 4096, | ||
65 | [11] = 8192, | ||
66 | }; | ||
67 | 26 | ||
68 | /* | 27 | /* |
69 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing | 28 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing |
@@ -72,8 +31,10 @@ static int ddr3_dbam[] = { [0] = -1, | |||
72 | * | 31 | * |
73 | *FIXME: Produce a better mapping/linearisation. | 32 | *FIXME: Produce a better mapping/linearisation. |
74 | */ | 33 | */ |
75 | 34 | struct scrubrate { | |
76 | struct scrubrate scrubrates[] = { | 35 | u32 scrubval; /* bit pattern for scrub rate */ |
36 | u32 bandwidth; /* bandwidth consumed (bytes/sec) */ | ||
37 | } scrubrates[] = { | ||
77 | { 0x01, 1600000000UL}, | 38 | { 0x01, 1600000000UL}, |
78 | { 0x02, 800000000UL}, | 39 | { 0x02, 800000000UL}, |
79 | { 0x03, 400000000UL}, | 40 | { 0x03, 400000000UL}, |
@@ -99,6 +60,79 @@ struct scrubrate scrubrates[] = { | |||
99 | { 0x00, 0UL}, /* scrubbing off */ | 60 | { 0x00, 0UL}, /* scrubbing off */ |
100 | }; | 61 | }; |
101 | 62 | ||
63 | static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
64 | u32 *val, const char *func) | ||
65 | { | ||
66 | int err = 0; | ||
67 | |||
68 | err = pci_read_config_dword(pdev, offset, val); | ||
69 | if (err) | ||
70 | amd64_warn("%s: error reading F%dx%03x.\n", | ||
71 | func, PCI_FUNC(pdev->devfn), offset); | ||
72 | |||
73 | return err; | ||
74 | } | ||
75 | |||
76 | int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
77 | u32 val, const char *func) | ||
78 | { | ||
79 | int err = 0; | ||
80 | |||
81 | err = pci_write_config_dword(pdev, offset, val); | ||
82 | if (err) | ||
83 | amd64_warn("%s: error writing to F%dx%03x.\n", | ||
84 | func, PCI_FUNC(pdev->devfn), offset); | ||
85 | |||
86 | return err; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * | ||
91 | * Depending on the family, F2 DCT reads need special handling: | ||
92 | * | ||
93 | * K8: has a single DCT only | ||
94 | * | ||
95 | * F10h: each DCT has its own set of regs | ||
96 | * DCT0 -> F2x040.. | ||
97 | * DCT1 -> F2x140.. | ||
98 | * | ||
99 | * F15h: we select which DCT we access using F1x10C[DctCfgSel] | ||
100 | * | ||
101 | */ | ||
102 | static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
103 | const char *func) | ||
104 | { | ||
105 | if (addr >= 0x100) | ||
106 | return -EINVAL; | ||
107 | |||
108 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
109 | } | ||
110 | |||
111 | static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
112 | const char *func) | ||
113 | { | ||
114 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
115 | } | ||
116 | |||
117 | static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
118 | const char *func) | ||
119 | { | ||
120 | u32 reg = 0; | ||
121 | u8 dct = 0; | ||
122 | |||
123 | if (addr >= 0x140 && addr <= 0x1a0) { | ||
124 | dct = 1; | ||
125 | addr -= 0x100; | ||
126 | } | ||
127 | |||
128 | amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); | ||
129 | reg &= 0xfffffffe; | ||
130 | reg |= dct; | ||
131 | amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); | ||
132 | |||
133 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
134 | } | ||
135 | |||
102 | /* | 136 | /* |
103 | * Memory scrubber control interface. For K8, memory scrubbing is handled by | 137 | * Memory scrubber control interface. For K8, memory scrubbing is handled by |
104 | * hardware and can involve L2 cache, dcache as well as the main memory. With | 138 | * hardware and can involve L2 cache, dcache as well as the main memory. With |
@@ -117,8 +151,7 @@ struct scrubrate scrubrates[] = { | |||
117 | * scan the scrub rate mapping table for a close or matching bandwidth value to | 151 | * scan the scrub rate mapping table for a close or matching bandwidth value to |
118 | * issue. If requested is too big, then use last maximum value found. | 152 | * issue. If requested is too big, then use last maximum value found. |
119 | */ | 153 | */ |
120 | static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | 154 | static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) |
121 | u32 min_scrubrate) | ||
122 | { | 155 | { |
123 | u32 scrubval; | 156 | u32 scrubval; |
124 | int i; | 157 | int i; |
@@ -134,7 +167,7 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | |||
134 | * skip scrub rates which aren't recommended | 167 | * skip scrub rates which aren't recommended |
135 | * (see F10 BKDG, F3x58) | 168 | * (see F10 BKDG, F3x58) |
136 | */ | 169 | */ |
137 | if (scrubrates[i].scrubval < min_scrubrate) | 170 | if (scrubrates[i].scrubval < min_rate) |
138 | continue; | 171 | continue; |
139 | 172 | ||
140 | if (scrubrates[i].bandwidth <= new_bw) | 173 | if (scrubrates[i].bandwidth <= new_bw) |
@@ -148,123 +181,53 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | |||
148 | } | 181 | } |
149 | 182 | ||
150 | scrubval = scrubrates[i].scrubval; | 183 | scrubval = scrubrates[i].scrubval; |
151 | if (scrubval) | ||
152 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
153 | "Setting scrub rate bandwidth: %u\n", | ||
154 | scrubrates[i].bandwidth); | ||
155 | else | ||
156 | edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); | ||
157 | 184 | ||
158 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); | 185 | pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F); |
186 | |||
187 | if (scrubval) | ||
188 | return scrubrates[i].bandwidth; | ||
159 | 189 | ||
160 | return 0; | 190 | return 0; |
161 | } | 191 | } |
162 | 192 | ||
163 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) | 193 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) |
164 | { | 194 | { |
165 | struct amd64_pvt *pvt = mci->pvt_info; | 195 | struct amd64_pvt *pvt = mci->pvt_info; |
166 | u32 min_scrubrate = 0x0; | 196 | u32 min_scrubrate = 0x5; |
167 | 197 | ||
168 | switch (boot_cpu_data.x86) { | 198 | if (boot_cpu_data.x86 == 0xf) |
169 | case 0xf: | 199 | min_scrubrate = 0x0; |
170 | min_scrubrate = K8_MIN_SCRUB_RATE_BITS; | ||
171 | break; | ||
172 | case 0x10: | ||
173 | min_scrubrate = F10_MIN_SCRUB_RATE_BITS; | ||
174 | break; | ||
175 | case 0x11: | ||
176 | min_scrubrate = F11_MIN_SCRUB_RATE_BITS; | ||
177 | break; | ||
178 | 200 | ||
179 | default: | 201 | return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); |
180 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, bandwidth, | ||
184 | min_scrubrate); | ||
185 | } | 202 | } |
186 | 203 | ||
187 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | 204 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci) |
188 | { | 205 | { |
189 | struct amd64_pvt *pvt = mci->pvt_info; | 206 | struct amd64_pvt *pvt = mci->pvt_info; |
190 | u32 scrubval = 0; | 207 | u32 scrubval = 0; |
191 | int status = -1, i; | 208 | int i, retval = -EINVAL; |
192 | 209 | ||
193 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); | 210 | amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); |
194 | 211 | ||
195 | scrubval = scrubval & 0x001F; | 212 | scrubval = scrubval & 0x001F; |
196 | 213 | ||
197 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
198 | "pci-read, sdram scrub control value: %d \n", scrubval); | ||
199 | |||
200 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { | 214 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
201 | if (scrubrates[i].scrubval == scrubval) { | 215 | if (scrubrates[i].scrubval == scrubval) { |
202 | *bw = scrubrates[i].bandwidth; | 216 | retval = scrubrates[i].bandwidth; |
203 | status = 0; | ||
204 | break; | 217 | break; |
205 | } | 218 | } |
206 | } | 219 | } |
207 | 220 | return retval; | |
208 | return status; | ||
209 | } | ||
210 | |||
211 | /* Map from a CSROW entry to the mask entry that operates on it */ | ||
212 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | ||
213 | { | ||
214 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) | ||
215 | return csrow; | ||
216 | else | ||
217 | return csrow >> 1; | ||
218 | } | ||
219 | |||
220 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | ||
221 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) | ||
222 | { | ||
223 | if (dct == 0) | ||
224 | return pvt->dcsb0[csrow]; | ||
225 | else | ||
226 | return pvt->dcsb1[csrow]; | ||
227 | } | 221 | } |
228 | 222 | ||
229 | /* | 223 | /* |
230 | * Return the 'mask' address the i'th CS entry. This function is needed because | 224 | * returns true if the SysAddr given by sys_addr matches the |
231 | * there number of DCSM registers on Rev E and prior vs Rev F and later is | 225 | * DRAM base/limit associated with node_id |
232 | * different. | ||
233 | */ | 226 | */ |
234 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) | 227 | static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, |
228 | unsigned nid) | ||
235 | { | 229 | { |
236 | if (dct == 0) | 230 | u64 addr; |
237 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; | ||
238 | else | ||
239 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; | ||
240 | } | ||
241 | |||
242 | |||
243 | /* | ||
244 | * In *base and *limit, pass back the full 40-bit base and limit physical | ||
245 | * addresses for the node given by node_id. This information is obtained from | ||
246 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The | ||
247 | * base and limit addresses are of type SysAddr, as defined at the start of | ||
248 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses | ||
249 | * in the address range they represent. | ||
250 | */ | ||
251 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, | ||
252 | u64 *base, u64 *limit) | ||
253 | { | ||
254 | *base = pvt->dram_base[node_id]; | ||
255 | *limit = pvt->dram_limit[node_id]; | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated | ||
260 | * with node_id | ||
261 | */ | ||
262 | static int amd64_base_limit_match(struct amd64_pvt *pvt, | ||
263 | u64 sys_addr, int node_id) | ||
264 | { | ||
265 | u64 base, limit, addr; | ||
266 | |||
267 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); | ||
268 | 231 | ||
269 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be | 232 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be |
270 | * all ones if the most significant implemented address bit is 1. | 233 | * all ones if the most significant implemented address bit is 1. |
@@ -274,7 +237,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt, | |||
274 | */ | 237 | */ |
275 | addr = sys_addr & 0x000000ffffffffffull; | 238 | addr = sys_addr & 0x000000ffffffffffull; |
276 | 239 | ||
277 | return (addr >= base) && (addr <= limit); | 240 | return ((addr >= get_dram_base(pvt, nid)) && |
241 | (addr <= get_dram_limit(pvt, nid))); | ||
278 | } | 242 | } |
279 | 243 | ||
280 | /* | 244 | /* |
@@ -287,7 +251,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
287 | u64 sys_addr) | 251 | u64 sys_addr) |
288 | { | 252 | { |
289 | struct amd64_pvt *pvt; | 253 | struct amd64_pvt *pvt; |
290 | int node_id; | 254 | unsigned node_id; |
291 | u32 intlv_en, bits; | 255 | u32 intlv_en, bits; |
292 | 256 | ||
293 | /* | 257 | /* |
@@ -301,10 +265,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
301 | * registers. Therefore we arbitrarily choose to read it from the | 265 | * registers. Therefore we arbitrarily choose to read it from the |
302 | * register for node 0. | 266 | * register for node 0. |
303 | */ | 267 | */ |
304 | intlv_en = pvt->dram_IntlvEn[0]; | 268 | intlv_en = dram_intlv_en(pvt, 0); |
305 | 269 | ||
306 | if (intlv_en == 0) { | 270 | if (intlv_en == 0) { |
307 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { | 271 | for (node_id = 0; node_id < DRAM_RANGES; node_id++) { |
308 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) | 272 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
309 | goto found; | 273 | goto found; |
310 | } | 274 | } |
@@ -314,34 +278,30 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
314 | if (unlikely((intlv_en != 0x01) && | 278 | if (unlikely((intlv_en != 0x01) && |
315 | (intlv_en != 0x03) && | 279 | (intlv_en != 0x03) && |
316 | (intlv_en != 0x07))) { | 280 | (intlv_en != 0x07))) { |
317 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " | 281 | amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); |
318 | "IntlvEn field of DRAM Base Register for node 0: " | ||
319 | "this probably indicates a BIOS bug.\n", intlv_en); | ||
320 | return NULL; | 282 | return NULL; |
321 | } | 283 | } |
322 | 284 | ||
323 | bits = (((u32) sys_addr) >> 12) & intlv_en; | 285 | bits = (((u32) sys_addr) >> 12) & intlv_en; |
324 | 286 | ||
325 | for (node_id = 0; ; ) { | 287 | for (node_id = 0; ; ) { |
326 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) | 288 | if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits) |
327 | break; /* intlv_sel field matches */ | 289 | break; /* intlv_sel field matches */ |
328 | 290 | ||
329 | if (++node_id >= DRAM_REG_COUNT) | 291 | if (++node_id >= DRAM_RANGES) |
330 | goto err_no_match; | 292 | goto err_no_match; |
331 | } | 293 | } |
332 | 294 | ||
333 | /* sanity test for sys_addr */ | 295 | /* sanity test for sys_addr */ |
334 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | 296 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { |
335 | amd64_printk(KERN_WARNING, | 297 | amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" |
336 | "%s(): sys_addr 0x%llx falls outside base/limit " | 298 | "range for node %d with node interleaving enabled.\n", |
337 | "address range for node %d with node interleaving " | 299 | __func__, sys_addr, node_id); |
338 | "enabled.\n", | ||
339 | __func__, sys_addr, node_id); | ||
340 | return NULL; | 300 | return NULL; |
341 | } | 301 | } |
342 | 302 | ||
343 | found: | 303 | found: |
344 | return edac_mc_find(node_id); | 304 | return edac_mc_find((int)node_id); |
345 | 305 | ||
346 | err_no_match: | 306 | err_no_match: |
347 | debugf2("sys_addr 0x%lx doesn't match any node\n", | 307 | debugf2("sys_addr 0x%lx doesn't match any node\n", |
@@ -351,37 +311,50 @@ err_no_match: | |||
351 | } | 311 | } |
352 | 312 | ||
353 | /* | 313 | /* |
354 | * Extract the DRAM CS base address from selected csrow register. | 314 | * compute the CS base address of the @csrow on the DRAM controller @dct. |
355 | */ | 315 | * For details see F2x[5C:40] in the processor's BKDG |
356 | static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) | ||
357 | { | ||
358 | return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << | ||
359 | pvt->dcs_shift; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. | ||
364 | */ | 316 | */ |
365 | static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) | 317 | static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, |
318 | u64 *base, u64 *mask) | ||
366 | { | 319 | { |
367 | u64 dcsm_bits, other_bits; | 320 | u64 csbase, csmask, base_bits, mask_bits; |
368 | u64 mask; | 321 | u8 addr_shift; |
369 | 322 | ||
370 | /* Extract bits from DRAM CS Mask. */ | 323 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
371 | dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; | 324 | csbase = pvt->csels[dct].csbases[csrow]; |
325 | csmask = pvt->csels[dct].csmasks[csrow]; | ||
326 | base_bits = GENMASK(21, 31) | GENMASK(9, 15); | ||
327 | mask_bits = GENMASK(21, 29) | GENMASK(9, 15); | ||
328 | addr_shift = 4; | ||
329 | } else { | ||
330 | csbase = pvt->csels[dct].csbases[csrow]; | ||
331 | csmask = pvt->csels[dct].csmasks[csrow >> 1]; | ||
332 | addr_shift = 8; | ||
372 | 333 | ||
373 | other_bits = pvt->dcsm_mask; | 334 | if (boot_cpu_data.x86 == 0x15) |
374 | other_bits = ~(other_bits << pvt->dcs_shift); | 335 | base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); |
336 | else | ||
337 | base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); | ||
338 | } | ||
375 | 339 | ||
376 | /* | 340 | *base = (csbase & base_bits) << addr_shift; |
377 | * The extracted bits from DCSM belong in the spaces represented by | ||
378 | * the cleared bits in other_bits. | ||
379 | */ | ||
380 | mask = (dcsm_bits << pvt->dcs_shift) | other_bits; | ||
381 | 341 | ||
382 | return mask; | 342 | *mask = ~0ULL; |
343 | /* poke holes for the csmask */ | ||
344 | *mask &= ~(mask_bits << addr_shift); | ||
345 | /* OR them in */ | ||
346 | *mask |= (csmask & mask_bits) << addr_shift; | ||
383 | } | 347 | } |
384 | 348 | ||
349 | #define for_each_chip_select(i, dct, pvt) \ | ||
350 | for (i = 0; i < pvt->csels[dct].b_cnt; i++) | ||
351 | |||
352 | #define chip_select_base(i, dct, pvt) \ | ||
353 | pvt->csels[dct].csbases[i] | ||
354 | |||
355 | #define for_each_chip_select_mask(i, dct, pvt) \ | ||
356 | for (i = 0; i < pvt->csels[dct].m_cnt; i++) | ||
357 | |||
385 | /* | 358 | /* |
386 | * @input_addr is an InputAddr associated with the node given by mci. Return the | 359 | * @input_addr is an InputAddr associated with the node given by mci. Return the |
387 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). | 360 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). |
@@ -394,19 +367,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
394 | 367 | ||
395 | pvt = mci->pvt_info; | 368 | pvt = mci->pvt_info; |
396 | 369 | ||
397 | /* | 370 | for_each_chip_select(csrow, 0, pvt) { |
398 | * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS | 371 | if (!csrow_enabled(csrow, 0, pvt)) |
399 | * base/mask register pair, test the condition shown near the start of | ||
400 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | ||
401 | */ | ||
402 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { | ||
403 | |||
404 | /* This DRAM chip select is disabled on this node */ | ||
405 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | ||
406 | continue; | 372 | continue; |
407 | 373 | ||
408 | base = base_from_dct_base(pvt, csrow); | 374 | get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); |
409 | mask = ~mask_from_dct_mask(pvt, csrow); | 375 | |
376 | mask = ~mask; | ||
410 | 377 | ||
411 | if ((input_addr & mask) == (base & mask)) { | 378 | if ((input_addr & mask) == (base & mask)) { |
412 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", | 379 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", |
@@ -416,7 +383,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
416 | return csrow; | 383 | return csrow; |
417 | } | 384 | } |
418 | } | 385 | } |
419 | |||
420 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", | 386 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", |
421 | (unsigned long)input_addr, pvt->mc_node_id); | 387 | (unsigned long)input_addr, pvt->mc_node_id); |
422 | 388 | ||
@@ -424,19 +390,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
424 | } | 390 | } |
425 | 391 | ||
426 | /* | 392 | /* |
427 | * Return the base value defined by the DRAM Base register for the node | ||
428 | * represented by mci. This function returns the full 40-bit value despite the | ||
429 | * fact that the register only stores bits 39-24 of the value. See section | ||
430 | * 3.4.4.1 (BKDG #26094, K8, revA-E) | ||
431 | */ | ||
432 | static inline u64 get_dram_base(struct mem_ctl_info *mci) | ||
433 | { | ||
434 | struct amd64_pvt *pvt = mci->pvt_info; | ||
435 | |||
436 | return pvt->dram_base[pvt->mc_node_id]; | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) | 393 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) |
441 | * for the node represented by mci. Info is passed back in *hole_base, | 394 | * for the node represented by mci. Info is passed back in *hole_base, |
442 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if | 395 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if |
@@ -465,14 +418,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
465 | return 1; | 418 | return 1; |
466 | } | 419 | } |
467 | 420 | ||
468 | /* only valid for Fam10h */ | 421 | /* valid for Fam10h and above */ |
469 | if (boot_cpu_data.x86 == 0x10 && | 422 | if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { |
470 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { | ||
471 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); | 423 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); |
472 | return 1; | 424 | return 1; |
473 | } | 425 | } |
474 | 426 | ||
475 | if ((pvt->dhar & DHAR_VALID) == 0) { | 427 | if (!dhar_valid(pvt)) { |
476 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", | 428 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", |
477 | pvt->mc_node_id); | 429 | pvt->mc_node_id); |
478 | return 1; | 430 | return 1; |
@@ -496,15 +448,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
496 | * addresses in the hole so that they start at 0x100000000. | 448 | * addresses in the hole so that they start at 0x100000000. |
497 | */ | 449 | */ |
498 | 450 | ||
499 | base = dhar_base(pvt->dhar); | 451 | base = dhar_base(pvt); |
500 | 452 | ||
501 | *hole_base = base; | 453 | *hole_base = base; |
502 | *hole_size = (0x1ull << 32) - base; | 454 | *hole_size = (0x1ull << 32) - base; |
503 | 455 | ||
504 | if (boot_cpu_data.x86 > 0xf) | 456 | if (boot_cpu_data.x86 > 0xf) |
505 | *hole_offset = f10_dhar_offset(pvt->dhar); | 457 | *hole_offset = f10_dhar_offset(pvt); |
506 | else | 458 | else |
507 | *hole_offset = k8_dhar_offset(pvt->dhar); | 459 | *hole_offset = k8_dhar_offset(pvt); |
508 | 460 | ||
509 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | 461 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", |
510 | pvt->mc_node_id, (unsigned long)*hole_base, | 462 | pvt->mc_node_id, (unsigned long)*hole_base, |
@@ -545,10 +497,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); | |||
545 | */ | 497 | */ |
546 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | 498 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) |
547 | { | 499 | { |
500 | struct amd64_pvt *pvt = mci->pvt_info; | ||
548 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; | 501 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; |
549 | int ret = 0; | 502 | int ret = 0; |
550 | 503 | ||
551 | dram_base = get_dram_base(mci); | 504 | dram_base = get_dram_base(pvt, pvt->mc_node_id); |
552 | 505 | ||
553 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | 506 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, |
554 | &hole_size); | 507 | &hole_size); |
@@ -576,7 +529,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
576 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture | 529 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture |
577 | * Programmer's Manual Volume 1 Application Programming. | 530 | * Programmer's Manual Volume 1 Application Programming. |
578 | */ | 531 | */ |
579 | dram_addr = (sys_addr & 0xffffffffffull) - dram_base; | 532 | dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; |
580 | 533 | ||
581 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " | 534 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " |
582 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, | 535 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, |
@@ -612,9 +565,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
612 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | 565 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) |
613 | * concerning translating a DramAddr to an InputAddr. | 566 | * concerning translating a DramAddr to an InputAddr. |
614 | */ | 567 | */ |
615 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | 568 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); |
616 | input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + | 569 | input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + |
617 | (dram_addr & 0xfff); | 570 | (dram_addr & 0xfff); |
618 | 571 | ||
619 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", | 572 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", |
620 | intlv_shift, (unsigned long)dram_addr, | 573 | intlv_shift, (unsigned long)dram_addr, |
@@ -648,7 +601,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
648 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | 601 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) |
649 | { | 602 | { |
650 | struct amd64_pvt *pvt; | 603 | struct amd64_pvt *pvt; |
651 | int node_id, intlv_shift; | 604 | unsigned node_id, intlv_shift; |
652 | u64 bits, dram_addr; | 605 | u64 bits, dram_addr; |
653 | u32 intlv_sel; | 606 | u32 intlv_sel; |
654 | 607 | ||
@@ -662,10 +615,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
662 | */ | 615 | */ |
663 | pvt = mci->pvt_info; | 616 | pvt = mci->pvt_info; |
664 | node_id = pvt->mc_node_id; | 617 | node_id = pvt->mc_node_id; |
665 | BUG_ON((node_id < 0) || (node_id > 7)); | ||
666 | 618 | ||
667 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | 619 | BUG_ON(node_id > 7); |
668 | 620 | ||
621 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); | ||
669 | if (intlv_shift == 0) { | 622 | if (intlv_shift == 0) { |
670 | debugf1(" InputAddr 0x%lx translates to DramAddr of " | 623 | debugf1(" InputAddr 0x%lx translates to DramAddr of " |
671 | "same value\n", (unsigned long)input_addr); | 624 | "same value\n", (unsigned long)input_addr); |
@@ -673,10 +626,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
673 | return input_addr; | 626 | return input_addr; |
674 | } | 627 | } |
675 | 628 | ||
676 | bits = ((input_addr & 0xffffff000ull) << intlv_shift) + | 629 | bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) + |
677 | (input_addr & 0xfff); | 630 | (input_addr & 0xfff); |
678 | 631 | ||
679 | intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); | 632 | intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); |
680 | dram_addr = bits + (intlv_sel << 12); | 633 | dram_addr = bits + (intlv_sel << 12); |
681 | 634 | ||
682 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " | 635 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " |
@@ -693,7 +646,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
693 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | 646 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) |
694 | { | 647 | { |
695 | struct amd64_pvt *pvt = mci->pvt_info; | 648 | struct amd64_pvt *pvt = mci->pvt_info; |
696 | u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; | 649 | u64 hole_base, hole_offset, hole_size, base, sys_addr; |
697 | int ret = 0; | 650 | int ret = 0; |
698 | 651 | ||
699 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | 652 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, |
@@ -711,7 +664,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
711 | } | 664 | } |
712 | } | 665 | } |
713 | 666 | ||
714 | amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); | 667 | base = get_dram_base(pvt, pvt->mc_node_id); |
715 | sys_addr = dram_addr + base; | 668 | sys_addr = dram_addr + base; |
716 | 669 | ||
717 | /* | 670 | /* |
@@ -756,13 +709,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |||
756 | u64 base, mask; | 709 | u64 base, mask; |
757 | 710 | ||
758 | pvt = mci->pvt_info; | 711 | pvt = mci->pvt_info; |
759 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); | 712 | BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt)); |
760 | 713 | ||
761 | base = base_from_dct_base(pvt, csrow); | 714 | get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); |
762 | mask = mask_from_dct_mask(pvt, csrow); | ||
763 | 715 | ||
764 | *input_addr_min = base & ~mask; | 716 | *input_addr_min = base & ~mask; |
765 | *input_addr_max = base | mask | pvt->dcs_mask_notused; | 717 | *input_addr_max = base | mask; |
766 | } | 718 | } |
767 | 719 | ||
768 | /* Map the Error address to a PAGE and PAGE OFFSET. */ | 720 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
@@ -788,41 +740,20 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |||
788 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); | 740 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); |
789 | 741 | ||
790 | if (csrow == -1) | 742 | if (csrow == -1) |
791 | amd64_mc_printk(mci, KERN_ERR, | 743 | amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " |
792 | "Failed to translate InputAddr to csrow for " | 744 | "address 0x%lx\n", (unsigned long)sys_addr); |
793 | "address 0x%lx\n", (unsigned long)sys_addr); | ||
794 | return csrow; | 745 | return csrow; |
795 | } | 746 | } |
796 | 747 | ||
797 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); | 748 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
798 | 749 | ||
799 | static u16 extract_syndrome(struct err_regs *err) | ||
800 | { | ||
801 | return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); | ||
802 | } | ||
803 | |||
804 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) | ||
805 | { | ||
806 | if (boot_cpu_data.x86 == 0x11) | ||
807 | edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n"); | ||
808 | else if (boot_cpu_data.x86 == 0x10) | ||
809 | edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); | ||
810 | else if (boot_cpu_data.x86 == 0xf) | ||
811 | edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", | ||
812 | (pvt->ext_model >= K8_REV_F) ? | ||
813 | "Rev F or later" : "Rev E or earlier"); | ||
814 | else | ||
815 | /* we'll hardly ever ever get here */ | ||
816 | edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n"); | ||
817 | } | ||
818 | |||
819 | /* | 750 | /* |
820 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs | 751 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs |
821 | * are ECC capable. | 752 | * are ECC capable. |
822 | */ | 753 | */ |
823 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | 754 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) |
824 | { | 755 | { |
825 | int bit; | 756 | u8 bit; |
826 | enum dev_type edac_cap = EDAC_FLAG_NONE; | 757 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
827 | 758 | ||
828 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) | 759 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
@@ -835,8 +766,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |||
835 | return edac_cap; | 766 | return edac_cap; |
836 | } | 767 | } |
837 | 768 | ||
838 | 769 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); | |
839 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); | ||
840 | 770 | ||
841 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) | 771 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) |
842 | { | 772 | { |
@@ -849,8 +779,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
849 | debugf1(" PAR/ERR parity: %s\n", | 779 | debugf1(" PAR/ERR parity: %s\n", |
850 | (dclr & BIT(8)) ? "enabled" : "disabled"); | 780 | (dclr & BIT(8)) ? "enabled" : "disabled"); |
851 | 781 | ||
852 | debugf1(" DCT 128bit mode width: %s\n", | 782 | if (boot_cpu_data.x86 == 0x10) |
853 | (dclr & BIT(11)) ? "128b" : "64b"); | 783 | debugf1(" DCT 128bit mode width: %s\n", |
784 | (dclr & BIT(11)) ? "128b" : "64b"); | ||
854 | 785 | ||
855 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | 786 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", |
856 | (dclr & BIT(12)) ? "yes" : "no", | 787 | (dclr & BIT(12)) ? "yes" : "no", |
@@ -860,18 +791,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
860 | } | 791 | } |
861 | 792 | ||
862 | /* Display and decode various NB registers for debug purposes. */ | 793 | /* Display and decode various NB registers for debug purposes. */ |
863 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | 794 | static void dump_misc_regs(struct amd64_pvt *pvt) |
864 | { | 795 | { |
865 | int ganged; | ||
866 | |||
867 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); | 796 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
868 | 797 | ||
869 | debugf1(" NB two channel DRAM capable: %s\n", | 798 | debugf1(" NB two channel DRAM capable: %s\n", |
870 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); | 799 | (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); |
871 | 800 | ||
872 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", | 801 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
873 | (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", | 802 | (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", |
874 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); | 803 | (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); |
875 | 804 | ||
876 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | 805 | amd64_dump_dramcfg_low(pvt->dclr0, 0); |
877 | 806 | ||
@@ -879,154 +808,95 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |||
879 | 808 | ||
880 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " | 809 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
881 | "offset: 0x%08x\n", | 810 | "offset: 0x%08x\n", |
882 | pvt->dhar, | 811 | pvt->dhar, dhar_base(pvt), |
883 | dhar_base(pvt->dhar), | 812 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) |
884 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) | 813 | : f10_dhar_offset(pvt)); |
885 | : f10_dhar_offset(pvt->dhar)); | 814 | |
815 | debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); | ||
886 | 816 | ||
887 | debugf1(" DramHoleValid: %s\n", | 817 | amd64_debug_display_dimm_sizes(pvt, 0); |
888 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); | ||
889 | 818 | ||
890 | /* everything below this point is Fam10h and above */ | 819 | /* everything below this point is Fam10h and above */ |
891 | if (boot_cpu_data.x86 == 0xf) { | 820 | if (boot_cpu_data.x86 == 0xf) |
892 | amd64_debug_display_dimm_sizes(0, pvt); | ||
893 | return; | 821 | return; |
894 | } | ||
895 | 822 | ||
896 | amd64_printk(KERN_INFO, "using %s syndromes.\n", | 823 | amd64_debug_display_dimm_sizes(pvt, 1); |
897 | ((pvt->syn_type == 8) ? "x8" : "x4")); | 824 | |
825 | amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); | ||
898 | 826 | ||
899 | /* Only if NOT ganged does dclr1 have valid info */ | 827 | /* Only if NOT ganged does dclr1 have valid info */ |
900 | if (!dct_ganging_enabled(pvt)) | 828 | if (!dct_ganging_enabled(pvt)) |
901 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | 829 | amd64_dump_dramcfg_low(pvt->dclr1, 1); |
902 | |||
903 | /* | ||
904 | * Determine if ganged and then dump memory sizes for first controller, | ||
905 | * and if NOT ganged dump info for 2nd controller. | ||
906 | */ | ||
907 | ganged = dct_ganging_enabled(pvt); | ||
908 | |||
909 | amd64_debug_display_dimm_sizes(0, pvt); | ||
910 | |||
911 | if (!ganged) | ||
912 | amd64_debug_display_dimm_sizes(1, pvt); | ||
913 | } | ||
914 | |||
915 | /* Read in both of DBAM registers */ | ||
916 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | ||
917 | { | ||
918 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); | ||
919 | |||
920 | if (boot_cpu_data.x86 >= 0x10) | ||
921 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); | ||
922 | } | 830 | } |
923 | 831 | ||
924 | /* | 832 | /* |
925 | * NOTE: CPU Revision Dependent code: Rev E and Rev F | 833 | * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] |
926 | * | ||
927 | * Set the DCSB and DCSM mask values depending on the CPU revision value. Also | ||
928 | * set the shift factor for the DCSB and DCSM values. | ||
929 | * | ||
930 | * ->dcs_mask_notused, RevE: | ||
931 | * | ||
932 | * To find the max InputAddr for the csrow, start with the base address and set | ||
933 | * all bits that are "don't care" bits in the test at the start of section | ||
934 | * 3.5.4 (p. 84). | ||
935 | * | ||
936 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | ||
937 | * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS | ||
938 | * represents bits [24:20] and [12:0], which are all bits in the above-mentioned | ||
939 | * gaps. | ||
940 | * | ||
941 | * ->dcs_mask_notused, RevF and later: | ||
942 | * | ||
943 | * To find the max InputAddr for the csrow, start with the base address and set | ||
944 | * all bits that are "don't care" bits in the test at the start of NPT section | ||
945 | * 4.5.4 (p. 87). | ||
946 | * | ||
947 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | ||
948 | * between bit ranges [36:27] and [21:13]. | ||
949 | * | ||
950 | * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], | ||
951 | * which are all bits in the above-mentioned gaps. | ||
952 | */ | 834 | */ |
953 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | 835 | static void prep_chip_selects(struct amd64_pvt *pvt) |
954 | { | 836 | { |
955 | |||
956 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { | 837 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
957 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | 838 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
958 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | 839 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; |
959 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | ||
960 | pvt->dcs_shift = REV_E_DCS_SHIFT; | ||
961 | pvt->cs_count = 8; | ||
962 | pvt->num_dcsm = 8; | ||
963 | } else { | 840 | } else { |
964 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; | 841 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
965 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | 842 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; |
966 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | ||
967 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | ||
968 | |||
969 | if (boot_cpu_data.x86 == 0x11) { | ||
970 | pvt->cs_count = 4; | ||
971 | pvt->num_dcsm = 2; | ||
972 | } else { | ||
973 | pvt->cs_count = 8; | ||
974 | pvt->num_dcsm = 4; | ||
975 | } | ||
976 | } | 843 | } |
977 | } | 844 | } |
978 | 845 | ||
979 | /* | 846 | /* |
980 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers | 847 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers |
981 | */ | 848 | */ |
982 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | 849 | static void read_dct_base_mask(struct amd64_pvt *pvt) |
983 | { | 850 | { |
984 | int cs, reg; | 851 | int cs; |
852 | |||
853 | prep_chip_selects(pvt); | ||
985 | 854 | ||
986 | amd64_set_dct_base_and_mask(pvt); | 855 | for_each_chip_select(cs, 0, pvt) { |
856 | int reg0 = DCSB0 + (cs * 4); | ||
857 | int reg1 = DCSB1 + (cs * 4); | ||
858 | u32 *base0 = &pvt->csels[0].csbases[cs]; | ||
859 | u32 *base1 = &pvt->csels[1].csbases[cs]; | ||
987 | 860 | ||
988 | for (cs = 0; cs < pvt->cs_count; cs++) { | 861 | if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) |
989 | reg = K8_DCSB0 + (cs * 4); | ||
990 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) | ||
991 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", | 862 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
992 | cs, pvt->dcsb0[cs], reg); | 863 | cs, *base0, reg0); |
993 | 864 | ||
994 | /* If DCT are NOT ganged, then read in DCT1's base */ | 865 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
995 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 866 | continue; |
996 | reg = F10_DCSB1 + (cs * 4); | 867 | |
997 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, | 868 | if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) |
998 | &pvt->dcsb1[cs])) | 869 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
999 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", | 870 | cs, *base1, reg1); |
1000 | cs, pvt->dcsb1[cs], reg); | ||
1001 | } else { | ||
1002 | pvt->dcsb1[cs] = 0; | ||
1003 | } | ||
1004 | } | 871 | } |
1005 | 872 | ||
1006 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | 873 | for_each_chip_select_mask(cs, 0, pvt) { |
1007 | reg = K8_DCSM0 + (cs * 4); | 874 | int reg0 = DCSM0 + (cs * 4); |
1008 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) | 875 | int reg1 = DCSM1 + (cs * 4); |
876 | u32 *mask0 = &pvt->csels[0].csmasks[cs]; | ||
877 | u32 *mask1 = &pvt->csels[1].csmasks[cs]; | ||
878 | |||
879 | if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) | ||
1009 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", | 880 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
1010 | cs, pvt->dcsm0[cs], reg); | 881 | cs, *mask0, reg0); |
1011 | 882 | ||
1012 | /* If DCT are NOT ganged, then read in DCT1's mask */ | 883 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
1013 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 884 | continue; |
1014 | reg = F10_DCSM1 + (cs * 4); | 885 | |
1015 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, | 886 | if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) |
1016 | &pvt->dcsm1[cs])) | 887 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
1017 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", | 888 | cs, *mask1, reg1); |
1018 | cs, pvt->dcsm1[cs], reg); | ||
1019 | } else { | ||
1020 | pvt->dcsm1[cs] = 0; | ||
1021 | } | ||
1022 | } | 889 | } |
1023 | } | 890 | } |
1024 | 891 | ||
1025 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | 892 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) |
1026 | { | 893 | { |
1027 | enum mem_type type; | 894 | enum mem_type type; |
1028 | 895 | ||
1029 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { | 896 | /* F15h supports only DDR3 */ |
897 | if (boot_cpu_data.x86 >= 0x15) | ||
898 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | ||
899 | else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { | ||
1030 | if (pvt->dchr0 & DDR3_MODE) | 900 | if (pvt->dchr0 & DDR3_MODE) |
1031 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | 901 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; |
1032 | else | 902 | else |
@@ -1035,35 +905,22 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | |||
1035 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; | 905 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; |
1036 | } | 906 | } |
1037 | 907 | ||
1038 | debugf1(" Memory type is: %s\n", edac_mem_types[type]); | 908 | amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); |
1039 | 909 | ||
1040 | return type; | 910 | return type; |
1041 | } | 911 | } |
1042 | 912 | ||
1043 | /* | 913 | /* Get the number of DCT channels the memory controller is using. */ |
1044 | * Read the DRAM Configuration Low register. It differs between CG, D & E revs | ||
1045 | * and the later RevF memory controllers (DDR vs DDR2) | ||
1046 | * | ||
1047 | * Return: | ||
1048 | * number of memory channels in operation | ||
1049 | * Pass back: | ||
1050 | * contents of the DCL0_LOW register | ||
1051 | */ | ||
1052 | static int k8_early_channel_count(struct amd64_pvt *pvt) | 914 | static int k8_early_channel_count(struct amd64_pvt *pvt) |
1053 | { | 915 | { |
1054 | int flag, err = 0; | 916 | int flag; |
1055 | 917 | ||
1056 | err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | 918 | if (pvt->ext_model >= K8_REV_F) |
1057 | if (err) | ||
1058 | return err; | ||
1059 | |||
1060 | if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) { | ||
1061 | /* RevF (NPT) and later */ | 919 | /* RevF (NPT) and later */ |
1062 | flag = pvt->dclr0 & F10_WIDTH_128; | 920 | flag = pvt->dclr0 & WIDTH_128; |
1063 | } else { | 921 | else |
1064 | /* RevE and earlier */ | 922 | /* RevE and earlier */ |
1065 | flag = pvt->dclr0 & REVE_WIDTH_128; | 923 | flag = pvt->dclr0 & REVE_WIDTH_128; |
1066 | } | ||
1067 | 924 | ||
1068 | /* not used */ | 925 | /* not used */ |
1069 | pvt->dclr1 = 0; | 926 | pvt->dclr1 = 0; |
@@ -1071,55 +928,121 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) | |||
1071 | return (flag) ? 2 : 1; | 928 | return (flag) ? 2 : 1; |
1072 | } | 929 | } |
1073 | 930 | ||
1074 | /* extract the ERROR ADDRESS for the K8 CPUs */ | 931 | /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ |
1075 | static u64 k8_get_error_address(struct mem_ctl_info *mci, | 932 | static u64 get_error_address(struct mce *m) |
1076 | struct err_regs *info) | ||
1077 | { | 933 | { |
1078 | return (((u64) (info->nbeah & 0xff)) << 32) + | 934 | struct cpuinfo_x86 *c = &boot_cpu_data; |
1079 | (info->nbeal & ~0x03); | 935 | u64 addr; |
936 | u8 start_bit = 1; | ||
937 | u8 end_bit = 47; | ||
938 | |||
939 | if (c->x86 == 0xf) { | ||
940 | start_bit = 3; | ||
941 | end_bit = 39; | ||
942 | } | ||
943 | |||
944 | addr = m->addr & GENMASK(start_bit, end_bit); | ||
945 | |||
946 | /* | ||
947 | * Erratum 637 workaround | ||
948 | */ | ||
949 | if (c->x86 == 0x15) { | ||
950 | struct amd64_pvt *pvt; | ||
951 | u64 cc6_base, tmp_addr; | ||
952 | u32 tmp; | ||
953 | u8 mce_nid, intlv_en; | ||
954 | |||
955 | if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7) | ||
956 | return addr; | ||
957 | |||
958 | mce_nid = amd_get_nb_id(m->extcpu); | ||
959 | pvt = mcis[mce_nid]->pvt_info; | ||
960 | |||
961 | amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); | ||
962 | intlv_en = tmp >> 21 & 0x7; | ||
963 | |||
964 | /* add [47:27] + 3 trailing bits */ | ||
965 | cc6_base = (tmp & GENMASK(0, 20)) << 3; | ||
966 | |||
967 | /* reverse and add DramIntlvEn */ | ||
968 | cc6_base |= intlv_en ^ 0x7; | ||
969 | |||
970 | /* pin at [47:24] */ | ||
971 | cc6_base <<= 24; | ||
972 | |||
973 | if (!intlv_en) | ||
974 | return cc6_base | (addr & GENMASK(0, 23)); | ||
975 | |||
976 | amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); | ||
977 | |||
978 | /* faster log2 */ | ||
979 | tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1); | ||
980 | |||
981 | /* OR DramIntlvSel into bits [14:12] */ | ||
982 | tmp_addr |= (tmp & GENMASK(21, 23)) >> 9; | ||
983 | |||
984 | /* add remaining [11:0] bits from original MC4_ADDR */ | ||
985 | tmp_addr |= addr & GENMASK(0, 11); | ||
986 | |||
987 | return cc6_base | tmp_addr; | ||
988 | } | ||
989 | |||
990 | return addr; | ||
1080 | } | 991 | } |
1081 | 992 | ||
1082 | /* | 993 | static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) |
1083 | * Read the Base and Limit registers for K8 based Memory controllers; extract | ||
1084 | * fields from the 'raw' reg into separate data fields | ||
1085 | * | ||
1086 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN | ||
1087 | */ | ||
1088 | static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | ||
1089 | { | 994 | { |
1090 | u32 low; | 995 | struct cpuinfo_x86 *c = &boot_cpu_data; |
1091 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ | 996 | int off = range << 3; |
1092 | 997 | ||
1093 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); | 998 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); |
999 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); | ||
1094 | 1000 | ||
1095 | /* Extract parts into separate data entries */ | 1001 | if (c->x86 == 0xf) |
1096 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; | 1002 | return; |
1097 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; | ||
1098 | pvt->dram_rw_en[dram] = (low & 0x3); | ||
1099 | 1003 | ||
1100 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); | 1004 | if (!dram_rw(pvt, range)) |
1005 | return; | ||
1101 | 1006 | ||
1102 | /* | 1007 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); |
1103 | * Extract parts into separate data entries. Limit is the HIGHEST memory | 1008 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); |
1104 | * location of the region, so lower 24 bits need to be all ones | 1009 | |
1105 | */ | 1010 | /* Factor in CC6 save area by reading dst node's limit reg */ |
1106 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; | 1011 | if (c->x86 == 0x15) { |
1107 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; | 1012 | struct pci_dev *f1 = NULL; |
1108 | pvt->dram_DstNode[dram] = (low & 0x7); | 1013 | u8 nid = dram_dst_node(pvt, range); |
1014 | u32 llim; | ||
1015 | |||
1016 | f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1)); | ||
1017 | if (WARN_ON(!f1)) | ||
1018 | return; | ||
1019 | |||
1020 | amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); | ||
1021 | |||
1022 | pvt->ranges[range].lim.lo &= GENMASK(0, 15); | ||
1023 | |||
1024 | /* {[39:27],111b} */ | ||
1025 | pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; | ||
1026 | |||
1027 | pvt->ranges[range].lim.hi &= GENMASK(0, 7); | ||
1028 | |||
1029 | /* [47:40] */ | ||
1030 | pvt->ranges[range].lim.hi |= llim >> 13; | ||
1031 | |||
1032 | pci_dev_put(f1); | ||
1033 | } | ||
1109 | } | 1034 | } |
1110 | 1035 | ||
1111 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1036 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1112 | struct err_regs *err_info, u64 sys_addr) | 1037 | u16 syndrome) |
1113 | { | 1038 | { |
1114 | struct mem_ctl_info *src_mci; | 1039 | struct mem_ctl_info *src_mci; |
1040 | struct amd64_pvt *pvt = mci->pvt_info; | ||
1115 | int channel, csrow; | 1041 | int channel, csrow; |
1116 | u32 page, offset; | 1042 | u32 page, offset; |
1117 | u16 syndrome; | ||
1118 | |||
1119 | syndrome = extract_syndrome(err_info); | ||
1120 | 1043 | ||
1121 | /* CHIPKILL enabled */ | 1044 | /* CHIPKILL enabled */ |
1122 | if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { | 1045 | if (pvt->nbcfg & NBCFG_CHIPKILL) { |
1123 | channel = get_channel_from_ecc_syndrome(mci, syndrome); | 1046 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
1124 | if (channel < 0) { | 1047 | if (channel < 0) { |
1125 | /* | 1048 | /* |
@@ -1127,9 +1050,8 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1127 | * 2 DIMMs is in error. So we need to ID 'both' of them | 1050 | * 2 DIMMs is in error. So we need to ID 'both' of them |
1128 | * as suspect. | 1051 | * as suspect. |
1129 | */ | 1052 | */ |
1130 | amd64_mc_printk(mci, KERN_WARNING, | 1053 | amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " |
1131 | "unknown syndrome 0x%04x - possible " | 1054 | "error reporting race\n", syndrome); |
1132 | "error reporting race\n", syndrome); | ||
1133 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1055 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1134 | return; | 1056 | return; |
1135 | } | 1057 | } |
@@ -1151,8 +1073,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1151 | */ | 1073 | */ |
1152 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | 1074 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
1153 | if (!src_mci) { | 1075 | if (!src_mci) { |
1154 | amd64_mc_printk(mci, KERN_ERR, | 1076 | amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", |
1155 | "failed to map error address 0x%lx to a node\n", | ||
1156 | (unsigned long)sys_addr); | 1077 | (unsigned long)sys_addr); |
1157 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1078 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1158 | return; | 1079 | return; |
@@ -1170,18 +1091,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1170 | } | 1091 | } |
1171 | } | 1092 | } |
1172 | 1093 | ||
1173 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | 1094 | static int ddr2_cs_size(unsigned i, bool dct_width) |
1174 | { | 1095 | { |
1175 | int *dbam_map; | 1096 | unsigned shift = 0; |
1176 | 1097 | ||
1177 | if (pvt->ext_model >= K8_REV_F) | 1098 | if (i <= 2) |
1178 | dbam_map = ddr2_dbam; | 1099 | shift = i; |
1179 | else if (pvt->ext_model >= K8_REV_D) | 1100 | else if (!(i & 0x1)) |
1180 | dbam_map = ddr2_dbam_revD; | 1101 | shift = i >> 1; |
1181 | else | 1102 | else |
1182 | dbam_map = ddr2_dbam_revCG; | 1103 | shift = (i + 1) >> 1; |
1183 | 1104 | ||
1184 | return dbam_map[cs_mode]; | 1105 | return 128 << (shift + !!dct_width); |
1106 | } | ||
1107 | |||
1108 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, | ||
1109 | unsigned cs_mode) | ||
1110 | { | ||
1111 | u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; | ||
1112 | |||
1113 | if (pvt->ext_model >= K8_REV_F) { | ||
1114 | WARN_ON(cs_mode > 11); | ||
1115 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); | ||
1116 | } | ||
1117 | else if (pvt->ext_model >= K8_REV_D) { | ||
1118 | WARN_ON(cs_mode > 10); | ||
1119 | |||
1120 | if (cs_mode == 3 || cs_mode == 8) | ||
1121 | return 32 << (cs_mode - 1); | ||
1122 | else | ||
1123 | return 32 << cs_mode; | ||
1124 | } | ||
1125 | else { | ||
1126 | WARN_ON(cs_mode > 6); | ||
1127 | return 32 << cs_mode; | ||
1128 | } | ||
1185 | } | 1129 | } |
1186 | 1130 | ||
1187 | /* | 1131 | /* |
@@ -1192,17 +1136,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | |||
1192 | * Pass back: | 1136 | * Pass back: |
1193 | * contents of the DCL0_LOW register | 1137 | * contents of the DCL0_LOW register |
1194 | */ | 1138 | */ |
1195 | static int f10_early_channel_count(struct amd64_pvt *pvt) | 1139 | static int f1x_early_channel_count(struct amd64_pvt *pvt) |
1196 | { | 1140 | { |
1197 | int dbams[] = { DBAM0, DBAM1 }; | ||
1198 | int i, j, channels = 0; | 1141 | int i, j, channels = 0; |
1199 | u32 dbam; | ||
1200 | 1142 | ||
1201 | /* If we are in 128 bit mode, then we are using 2 channels */ | 1143 | /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ |
1202 | if (pvt->dclr0 & F10_WIDTH_128) { | 1144 | if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) |
1203 | channels = 2; | 1145 | return 2; |
1204 | return channels; | ||
1205 | } | ||
1206 | 1146 | ||
1207 | /* | 1147 | /* |
1208 | * Need to check if in unganged mode: In such, there are 2 channels, | 1148 | * Need to check if in unganged mode: In such, there are 2 channels, |
@@ -1219,9 +1159,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1219 | * is more than just one DIMM present in unganged mode. Need to check | 1159 | * is more than just one DIMM present in unganged mode. Need to check |
1220 | * both controllers since DIMMs can be placed in either one. | 1160 | * both controllers since DIMMs can be placed in either one. |
1221 | */ | 1161 | */ |
1222 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { | 1162 | for (i = 0; i < 2; i++) { |
1223 | if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) | 1163 | u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); |
1224 | goto err_reg; | ||
1225 | 1164 | ||
1226 | for (j = 0; j < 4; j++) { | 1165 | for (j = 0; j < 4; j++) { |
1227 | if (DBAM_DIMM(j, dbam) > 0) { | 1166 | if (DBAM_DIMM(j, dbam) > 0) { |
@@ -1234,248 +1173,194 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1234 | if (channels > 2) | 1173 | if (channels > 2) |
1235 | channels = 2; | 1174 | channels = 2; |
1236 | 1175 | ||
1237 | debugf0("MCT channel count: %d\n", channels); | 1176 | amd64_info("MCT channel count: %d\n", channels); |
1238 | 1177 | ||
1239 | return channels; | 1178 | return channels; |
1240 | |||
1241 | err_reg: | ||
1242 | return -1; | ||
1243 | |||
1244 | } | 1179 | } |
1245 | 1180 | ||
1246 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | 1181 | static int ddr3_cs_size(unsigned i, bool dct_width) |
1247 | { | 1182 | { |
1248 | int *dbam_map; | 1183 | unsigned shift = 0; |
1184 | int cs_size = 0; | ||
1249 | 1185 | ||
1250 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | 1186 | if (i == 0 || i == 3 || i == 4) |
1251 | dbam_map = ddr3_dbam; | 1187 | cs_size = -1; |
1188 | else if (i <= 2) | ||
1189 | shift = i; | ||
1190 | else if (i == 12) | ||
1191 | shift = 7; | ||
1192 | else if (!(i & 0x1)) | ||
1193 | shift = i >> 1; | ||
1252 | else | 1194 | else |
1253 | dbam_map = ddr2_dbam; | 1195 | shift = (i + 1) >> 1; |
1254 | 1196 | ||
1255 | return dbam_map[cs_mode]; | 1197 | if (cs_size != -1) |
1256 | } | 1198 | cs_size = (128 * (1 << !!dct_width)) << shift; |
1257 | 1199 | ||
1258 | /* Enable extended configuration access via 0xCF8 feature */ | 1200 | return cs_size; |
1259 | static void amd64_setup(struct amd64_pvt *pvt) | ||
1260 | { | ||
1261 | u32 reg; | ||
1262 | |||
1263 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); | ||
1264 | |||
1265 | pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); | ||
1266 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | ||
1267 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | ||
1268 | } | 1201 | } |
1269 | 1202 | ||
1270 | /* Restore the extended configuration access via 0xCF8 feature */ | 1203 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, |
1271 | static void amd64_teardown(struct amd64_pvt *pvt) | 1204 | unsigned cs_mode) |
1272 | { | 1205 | { |
1273 | u32 reg; | 1206 | u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; |
1274 | |||
1275 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); | ||
1276 | 1207 | ||
1277 | reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; | 1208 | WARN_ON(cs_mode > 11); |
1278 | if (pvt->flags.cf8_extcfg) | ||
1279 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | ||
1280 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | ||
1281 | } | ||
1282 | 1209 | ||
1283 | static u64 f10_get_error_address(struct mem_ctl_info *mci, | 1210 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) |
1284 | struct err_regs *info) | 1211 | return ddr3_cs_size(cs_mode, dclr & WIDTH_128); |
1285 | { | 1212 | else |
1286 | return (((u64) (info->nbeah & 0xffff)) << 32) + | 1213 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); |
1287 | (info->nbeal & ~0x01); | ||
1288 | } | 1214 | } |
1289 | 1215 | ||
1290 | /* | 1216 | /* |
1291 | * Read the Base and Limit registers for F10 based Memory controllers. Extract | 1217 | * F15h supports only 64bit DCT interfaces |
1292 | * fields from the 'raw' reg into separate data fields. | ||
1293 | * | ||
1294 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. | ||
1295 | */ | 1218 | */ |
1296 | static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | 1219 | static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, |
1220 | unsigned cs_mode) | ||
1297 | { | 1221 | { |
1298 | u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; | 1222 | WARN_ON(cs_mode > 12); |
1299 | |||
1300 | low_offset = K8_DRAM_BASE_LOW + (dram << 3); | ||
1301 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); | ||
1302 | |||
1303 | /* read the 'raw' DRAM BASE Address register */ | ||
1304 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); | ||
1305 | 1223 | ||
1306 | /* Read from the ECS data register */ | 1224 | return ddr3_cs_size(cs_mode, false); |
1307 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base); | ||
1308 | |||
1309 | /* Extract parts into separate data entries */ | ||
1310 | pvt->dram_rw_en[dram] = (low_base & 0x3); | ||
1311 | |||
1312 | if (pvt->dram_rw_en[dram] == 0) | ||
1313 | return; | ||
1314 | |||
1315 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | ||
1316 | |||
1317 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | | ||
1318 | (((u64)low_base & 0xFFFF0000) << 8); | ||
1319 | |||
1320 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | ||
1321 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | ||
1322 | |||
1323 | /* read the 'raw' LIMIT registers */ | ||
1324 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); | ||
1325 | |||
1326 | /* Read from the ECS data register for the HIGH portion */ | ||
1327 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit); | ||
1328 | |||
1329 | pvt->dram_DstNode[dram] = (low_limit & 0x7); | ||
1330 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; | ||
1331 | |||
1332 | /* | ||
1333 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | ||
1334 | * memory location of the region, so low 24 bits need to be all ones. | ||
1335 | */ | ||
1336 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | | ||
1337 | (((u64) low_limit & 0xFFFF0000) << 8) | | ||
1338 | 0x00FFFFFF; | ||
1339 | } | 1225 | } |
1340 | 1226 | ||
1341 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | 1227 | static void read_dram_ctl_register(struct amd64_pvt *pvt) |
1342 | { | 1228 | { |
1343 | 1229 | ||
1344 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, | 1230 | if (boot_cpu_data.x86 == 0xf) |
1345 | &pvt->dram_ctl_select_low)) { | 1231 | return; |
1346 | debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " | 1232 | |
1347 | "High range addresses at: 0x%x\n", | 1233 | if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { |
1348 | pvt->dram_ctl_select_low, | 1234 | debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", |
1349 | dct_sel_baseaddr(pvt)); | 1235 | pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); |
1350 | 1236 | ||
1351 | debugf0(" DCT mode: %s, All DCTs on: %s\n", | 1237 | debugf0(" DCTs operate in %s mode.\n", |
1352 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), | 1238 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); |
1353 | (dct_dram_enabled(pvt) ? "yes" : "no")); | ||
1354 | 1239 | ||
1355 | if (!dct_ganging_enabled(pvt)) | 1240 | if (!dct_ganging_enabled(pvt)) |
1356 | debugf0(" Address range split per DCT: %s\n", | 1241 | debugf0(" Address range split per DCT: %s\n", |
1357 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | 1242 | (dct_high_range_enabled(pvt) ? "yes" : "no")); |
1358 | 1243 | ||
1359 | debugf0(" DCT data interleave for ECC: %s, " | 1244 | debugf0(" data interleave for ECC: %s, " |
1360 | "DRAM cleared since last warm reset: %s\n", | 1245 | "DRAM cleared since last warm reset: %s\n", |
1361 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | 1246 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), |
1362 | (dct_memory_cleared(pvt) ? "yes" : "no")); | 1247 | (dct_memory_cleared(pvt) ? "yes" : "no")); |
1363 | 1248 | ||
1364 | debugf0(" DCT channel interleave: %s, " | 1249 | debugf0(" channel interleave: %s, " |
1365 | "DCT interleave bits selector: 0x%x\n", | 1250 | "interleave bits selector: 0x%x\n", |
1366 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | 1251 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), |
1367 | dct_sel_interleave_addr(pvt)); | 1252 | dct_sel_interleave_addr(pvt)); |
1368 | } | 1253 | } |
1369 | 1254 | ||
1370 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, | 1255 | amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); |
1371 | &pvt->dram_ctl_select_high); | ||
1372 | } | 1256 | } |
1373 | 1257 | ||
1374 | /* | 1258 | /* |
1375 | * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory | 1259 | * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory |
1376 | * Interleaving Modes. | 1260 | * Interleaving Modes. |
1377 | */ | 1261 | */ |
1378 | static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, | 1262 | static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, |
1379 | int hi_range_sel, u32 intlv_en) | 1263 | bool hi_range_sel, u8 intlv_en) |
1380 | { | 1264 | { |
1381 | u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; | 1265 | u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; |
1382 | 1266 | ||
1383 | if (dct_ganging_enabled(pvt)) | 1267 | if (dct_ganging_enabled(pvt)) |
1384 | cs = 0; | 1268 | return 0; |
1385 | else if (hi_range_sel) | ||
1386 | cs = dct_sel_high; | ||
1387 | else if (dct_interleave_enabled(pvt)) { | ||
1388 | /* | ||
1389 | * see F2x110[DctSelIntLvAddr] - channel interleave mode | ||
1390 | */ | ||
1391 | if (dct_sel_interleave_addr(pvt) == 0) | ||
1392 | cs = sys_addr >> 6 & 1; | ||
1393 | else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { | ||
1394 | temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; | ||
1395 | 1269 | ||
1396 | if (dct_sel_interleave_addr(pvt) & 1) | 1270 | if (hi_range_sel) |
1397 | cs = (sys_addr >> 9 & 1) ^ temp; | 1271 | return dct_sel_high; |
1398 | else | ||
1399 | cs = (sys_addr >> 6 & 1) ^ temp; | ||
1400 | } else if (intlv_en & 4) | ||
1401 | cs = sys_addr >> 15 & 1; | ||
1402 | else if (intlv_en & 2) | ||
1403 | cs = sys_addr >> 14 & 1; | ||
1404 | else if (intlv_en & 1) | ||
1405 | cs = sys_addr >> 13 & 1; | ||
1406 | else | ||
1407 | cs = sys_addr >> 12 & 1; | ||
1408 | } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) | ||
1409 | cs = ~dct_sel_high & 1; | ||
1410 | else | ||
1411 | cs = 0; | ||
1412 | 1272 | ||
1413 | return cs; | 1273 | /* |
1414 | } | 1274 | * see F2x110[DctSelIntLvAddr] - channel interleave mode |
1275 | */ | ||
1276 | if (dct_interleave_enabled(pvt)) { | ||
1277 | u8 intlv_addr = dct_sel_interleave_addr(pvt); | ||
1415 | 1278 | ||
1416 | static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) | 1279 | /* return DCT select function: 0=DCT0, 1=DCT1 */ |
1417 | { | 1280 | if (!intlv_addr) |
1418 | if (intlv_en == 1) | 1281 | return sys_addr >> 6 & 1; |
1419 | return 1; | 1282 | |
1420 | else if (intlv_en == 3) | 1283 | if (intlv_addr & 0x2) { |
1421 | return 2; | 1284 | u8 shift = intlv_addr & 0x1 ? 9 : 6; |
1422 | else if (intlv_en == 7) | 1285 | u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; |
1423 | return 3; | 1286 | |
1287 | return ((sys_addr >> shift) & 1) ^ temp; | ||
1288 | } | ||
1289 | |||
1290 | return (sys_addr >> (12 + hweight8(intlv_en))) & 1; | ||
1291 | } | ||
1292 | |||
1293 | if (dct_high_range_enabled(pvt)) | ||
1294 | return ~dct_sel_high & 1; | ||
1424 | 1295 | ||
1425 | return 0; | 1296 | return 0; |
1426 | } | 1297 | } |
1427 | 1298 | ||
1428 | /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ | 1299 | /* Convert the sys_addr to the normalized DCT address */ |
1429 | static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, | 1300 | static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range, |
1430 | u32 dct_sel_base_addr, | 1301 | u64 sys_addr, bool hi_rng, |
1431 | u64 dct_sel_base_off, | 1302 | u32 dct_sel_base_addr) |
1432 | u32 hole_valid, u32 hole_off, | ||
1433 | u64 dram_base) | ||
1434 | { | 1303 | { |
1435 | u64 chan_off; | 1304 | u64 chan_off; |
1305 | u64 dram_base = get_dram_base(pvt, range); | ||
1306 | u64 hole_off = f10_dhar_offset(pvt); | ||
1307 | u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16; | ||
1436 | 1308 | ||
1437 | if (hi_range_sel) { | 1309 | if (hi_rng) { |
1438 | if (!(dct_sel_base_addr & 0xFFFF0000) && | 1310 | /* |
1439 | hole_valid && (sys_addr >= 0x100000000ULL)) | 1311 | * if |
1440 | chan_off = hole_off << 16; | 1312 | * base address of high range is below 4Gb |
1313 | * (bits [47:27] at [31:11]) | ||
1314 | * DRAM address space on this DCT is hoisted above 4Gb && | ||
1315 | * sys_addr > 4Gb | ||
1316 | * | ||
1317 | * remove hole offset from sys_addr | ||
1318 | * else | ||
1319 | * remove high range offset from sys_addr | ||
1320 | */ | ||
1321 | if ((!(dct_sel_base_addr >> 16) || | ||
1322 | dct_sel_base_addr < dhar_base(pvt)) && | ||
1323 | dhar_valid(pvt) && | ||
1324 | (sys_addr >= BIT_64(32))) | ||
1325 | chan_off = hole_off; | ||
1441 | else | 1326 | else |
1442 | chan_off = dct_sel_base_off; | 1327 | chan_off = dct_sel_base_off; |
1443 | } else { | 1328 | } else { |
1444 | if (hole_valid && (sys_addr >= 0x100000000ULL)) | 1329 | /* |
1445 | chan_off = hole_off << 16; | 1330 | * if |
1331 | * we have a valid hole && | ||
1332 | * sys_addr > 4Gb | ||
1333 | * | ||
1334 | * remove hole | ||
1335 | * else | ||
1336 | * remove dram base to normalize to DCT address | ||
1337 | */ | ||
1338 | if (dhar_valid(pvt) && (sys_addr >= BIT_64(32))) | ||
1339 | chan_off = hole_off; | ||
1446 | else | 1340 | else |
1447 | chan_off = dram_base & 0xFFFFF8000000ULL; | 1341 | chan_off = dram_base; |
1448 | } | 1342 | } |
1449 | 1343 | ||
1450 | return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - | 1344 | return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47)); |
1451 | (chan_off & 0x0000FFFFFF800000ULL); | ||
1452 | } | 1345 | } |
1453 | 1346 | ||
1454 | /* Hack for the time being - Can we get this from BIOS?? */ | ||
1455 | #define CH0SPARE_RANK 0 | ||
1456 | #define CH1SPARE_RANK 1 | ||
1457 | |||
1458 | /* | 1347 | /* |
1459 | * checks if the csrow passed in is marked as SPARED, if so returns the new | 1348 | * checks if the csrow passed in is marked as SPARED, if so returns the new |
1460 | * spare row | 1349 | * spare row |
1461 | */ | 1350 | */ |
1462 | static inline int f10_process_possible_spare(int csrow, | 1351 | static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow) |
1463 | u32 cs, struct amd64_pvt *pvt) | 1352 | { |
1464 | { | 1353 | int tmp_cs; |
1465 | u32 swap_done; | 1354 | |
1466 | u32 bad_dram_cs; | 1355 | if (online_spare_swap_done(pvt, dct) && |
1467 | 1356 | csrow == online_spare_bad_dramcs(pvt, dct)) { | |
1468 | /* Depending on channel, isolate respective SPARING info */ | 1357 | |
1469 | if (cs) { | 1358 | for_each_chip_select(tmp_cs, dct, pvt) { |
1470 | swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); | 1359 | if (chip_select_base(tmp_cs, dct, pvt) & 0x2) { |
1471 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); | 1360 | csrow = tmp_cs; |
1472 | if (swap_done && (csrow == bad_dram_cs)) | 1361 | break; |
1473 | csrow = CH1SPARE_RANK; | 1362 | } |
1474 | } else { | 1363 | } |
1475 | swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); | ||
1476 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); | ||
1477 | if (swap_done && (csrow == bad_dram_cs)) | ||
1478 | csrow = CH0SPARE_RANK; | ||
1479 | } | 1364 | } |
1480 | return csrow; | 1365 | return csrow; |
1481 | } | 1366 | } |
@@ -1488,53 +1373,39 @@ static inline int f10_process_possible_spare(int csrow, | |||
1488 | * -EINVAL: NOT FOUND | 1373 | * -EINVAL: NOT FOUND |
1489 | * 0..csrow = Chip-Select Row | 1374 | * 0..csrow = Chip-Select Row |
1490 | */ | 1375 | */ |
1491 | static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | 1376 | static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) |
1492 | { | 1377 | { |
1493 | struct mem_ctl_info *mci; | 1378 | struct mem_ctl_info *mci; |
1494 | struct amd64_pvt *pvt; | 1379 | struct amd64_pvt *pvt; |
1495 | u32 cs_base, cs_mask; | 1380 | u64 cs_base, cs_mask; |
1496 | int cs_found = -EINVAL; | 1381 | int cs_found = -EINVAL; |
1497 | int csrow; | 1382 | int csrow; |
1498 | 1383 | ||
1499 | mci = mci_lookup[nid]; | 1384 | mci = mcis[nid]; |
1500 | if (!mci) | 1385 | if (!mci) |
1501 | return cs_found; | 1386 | return cs_found; |
1502 | 1387 | ||
1503 | pvt = mci->pvt_info; | 1388 | pvt = mci->pvt_info; |
1504 | 1389 | ||
1505 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | 1390 | debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); |
1506 | |||
1507 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { | ||
1508 | 1391 | ||
1509 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | 1392 | for_each_chip_select(csrow, dct, pvt) { |
1510 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | 1393 | if (!csrow_enabled(csrow, dct, pvt)) |
1511 | continue; | 1394 | continue; |
1512 | 1395 | ||
1513 | /* | 1396 | get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); |
1514 | * We have an ENABLED CSROW, Isolate just the MASK bits of the | ||
1515 | * target: [28:19] and [13:5], which map to [36:27] and [21:13] | ||
1516 | * of the actual address. | ||
1517 | */ | ||
1518 | cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; | ||
1519 | |||
1520 | /* | ||
1521 | * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and | ||
1522 | * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) | ||
1523 | */ | ||
1524 | cs_mask = amd64_get_dct_mask(pvt, cs, csrow); | ||
1525 | 1397 | ||
1526 | debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", | 1398 | debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", |
1527 | csrow, cs_base, cs_mask); | 1399 | csrow, cs_base, cs_mask); |
1528 | 1400 | ||
1529 | cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; | 1401 | cs_mask = ~cs_mask; |
1530 | 1402 | ||
1531 | debugf1(" Final CSMask=0x%x\n", cs_mask); | 1403 | debugf1(" (InputAddr & ~CSMask)=0x%llx " |
1532 | debugf1(" (InputAddr & ~CSMask)=0x%x " | 1404 | "(CSBase & ~CSMask)=0x%llx\n", |
1533 | "(CSBase & ~CSMask)=0x%x\n", | 1405 | (in_addr & cs_mask), (cs_base & cs_mask)); |
1534 | (in_addr & ~cs_mask), (cs_base & ~cs_mask)); | ||
1535 | 1406 | ||
1536 | if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { | 1407 | if ((in_addr & cs_mask) == (cs_base & cs_mask)) { |
1537 | cs_found = f10_process_possible_spare(csrow, cs, pvt); | 1408 | cs_found = f10_process_possible_spare(pvt, dct, csrow); |
1538 | 1409 | ||
1539 | debugf1(" MATCH csrow=%d\n", cs_found); | 1410 | debugf1(" MATCH csrow=%d\n", cs_found); |
1540 | break; | 1411 | break; |
@@ -1543,38 +1414,71 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |||
1543 | return cs_found; | 1414 | return cs_found; |
1544 | } | 1415 | } |
1545 | 1416 | ||
1546 | /* For a given @dram_range, check if @sys_addr falls within it. */ | 1417 | /* |
1547 | static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | 1418 | * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is |
1548 | u64 sys_addr, int *nid, int *chan_sel) | 1419 | * swapped with a region located at the bottom of memory so that the GPU can use |
1420 | * the interleaved region and thus two channels. | ||
1421 | */ | ||
1422 | static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) | ||
1549 | { | 1423 | { |
1550 | int node_id, cs_found = -EINVAL, high_range = 0; | 1424 | u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; |
1551 | u32 intlv_en, intlv_sel, intlv_shift, hole_off; | ||
1552 | u32 hole_valid, tmp, dct_sel_base, channel; | ||
1553 | u64 dram_base, chan_addr, dct_sel_base_off; | ||
1554 | 1425 | ||
1555 | dram_base = pvt->dram_base[dram_range]; | 1426 | if (boot_cpu_data.x86 == 0x10) { |
1556 | intlv_en = pvt->dram_IntlvEn[dram_range]; | 1427 | /* only revC3 and revE have that feature */ |
1428 | if (boot_cpu_data.x86_model < 4 || | ||
1429 | (boot_cpu_data.x86_model < 0xa && | ||
1430 | boot_cpu_data.x86_mask < 3)) | ||
1431 | return sys_addr; | ||
1432 | } | ||
1557 | 1433 | ||
1558 | node_id = pvt->dram_DstNode[dram_range]; | 1434 | amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); |
1559 | intlv_sel = pvt->dram_IntlvSel[dram_range]; | ||
1560 | 1435 | ||
1561 | debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", | 1436 | if (!(swap_reg & 0x1)) |
1562 | dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); | 1437 | return sys_addr; |
1563 | 1438 | ||
1564 | /* | 1439 | swap_base = (swap_reg >> 3) & 0x7f; |
1565 | * This assumes that one node's DHAR is the same as all the other | 1440 | swap_limit = (swap_reg >> 11) & 0x7f; |
1566 | * nodes' DHAR. | 1441 | rgn_size = (swap_reg >> 20) & 0x7f; |
1567 | */ | 1442 | tmp_addr = sys_addr >> 27; |
1568 | hole_off = (pvt->dhar & 0x0000FF80); | 1443 | |
1569 | hole_valid = (pvt->dhar & 0x1); | 1444 | if (!(sys_addr >> 34) && |
1570 | dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; | 1445 | (((tmp_addr >= swap_base) && |
1446 | (tmp_addr <= swap_limit)) || | ||
1447 | (tmp_addr < rgn_size))) | ||
1448 | return sys_addr ^ (u64)swap_base << 27; | ||
1571 | 1449 | ||
1572 | debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", | 1450 | return sys_addr; |
1573 | hole_off, hole_valid, intlv_sel); | 1451 | } |
1574 | 1452 | ||
1575 | if (intlv_en || | 1453 | /* For a given @dram_range, check if @sys_addr falls within it. */ |
1576 | (intlv_sel != ((sys_addr >> 12) & intlv_en))) | 1454 | static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, |
1455 | u64 sys_addr, int *nid, int *chan_sel) | ||
1456 | { | ||
1457 | int cs_found = -EINVAL; | ||
1458 | u64 chan_addr; | ||
1459 | u32 dct_sel_base; | ||
1460 | u8 channel; | ||
1461 | bool high_range = false; | ||
1462 | |||
1463 | u8 node_id = dram_dst_node(pvt, range); | ||
1464 | u8 intlv_en = dram_intlv_en(pvt, range); | ||
1465 | u32 intlv_sel = dram_intlv_sel(pvt, range); | ||
1466 | |||
1467 | debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", | ||
1468 | range, sys_addr, get_dram_limit(pvt, range)); | ||
1469 | |||
1470 | if (dhar_valid(pvt) && | ||
1471 | dhar_base(pvt) <= sys_addr && | ||
1472 | sys_addr < BIT_64(32)) { | ||
1473 | amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", | ||
1474 | sys_addr); | ||
1577 | return -EINVAL; | 1475 | return -EINVAL; |
1476 | } | ||
1477 | |||
1478 | if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) | ||
1479 | return -EINVAL; | ||
1480 | |||
1481 | sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); | ||
1578 | 1482 | ||
1579 | dct_sel_base = dct_sel_baseaddr(pvt); | 1483 | dct_sel_base = dct_sel_baseaddr(pvt); |
1580 | 1484 | ||
@@ -1585,38 +1489,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |||
1585 | if (dct_high_range_enabled(pvt) && | 1489 | if (dct_high_range_enabled(pvt) && |
1586 | !dct_ganging_enabled(pvt) && | 1490 | !dct_ganging_enabled(pvt) && |
1587 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) | 1491 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) |
1588 | high_range = 1; | 1492 | high_range = true; |
1589 | 1493 | ||
1590 | channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); | 1494 | channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en); |
1591 | 1495 | ||
1592 | chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, | 1496 | chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr, |
1593 | dct_sel_base_off, hole_valid, | 1497 | high_range, dct_sel_base); |
1594 | hole_off, dram_base); | ||
1595 | 1498 | ||
1596 | intlv_shift = f10_map_intlv_en_to_shift(intlv_en); | 1499 | /* Remove node interleaving, see F1x120 */ |
1500 | if (intlv_en) | ||
1501 | chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) | | ||
1502 | (chan_addr & 0xfff); | ||
1597 | 1503 | ||
1598 | /* remove Node ID (in case of memory interleaving) */ | 1504 | /* remove channel interleave */ |
1599 | tmp = chan_addr & 0xFC0; | ||
1600 | |||
1601 | chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; | ||
1602 | |||
1603 | /* remove channel interleave and hash */ | ||
1604 | if (dct_interleave_enabled(pvt) && | 1505 | if (dct_interleave_enabled(pvt) && |
1605 | !dct_high_range_enabled(pvt) && | 1506 | !dct_high_range_enabled(pvt) && |
1606 | !dct_ganging_enabled(pvt)) { | 1507 | !dct_ganging_enabled(pvt)) { |
1607 | if (dct_sel_interleave_addr(pvt) != 1) | 1508 | |
1608 | chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; | 1509 | if (dct_sel_interleave_addr(pvt) != 1) { |
1609 | else { | 1510 | if (dct_sel_interleave_addr(pvt) == 0x3) |
1610 | tmp = chan_addr & 0xFC0; | 1511 | /* hash 9 */ |
1611 | chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) | 1512 | chan_addr = ((chan_addr >> 10) << 9) | |
1612 | | tmp; | 1513 | (chan_addr & 0x1ff); |
1613 | } | 1514 | else |
1515 | /* A[6] or hash 6 */ | ||
1516 | chan_addr = ((chan_addr >> 7) << 6) | | ||
1517 | (chan_addr & 0x3f); | ||
1518 | } else | ||
1519 | /* A[12] */ | ||
1520 | chan_addr = ((chan_addr >> 13) << 12) | | ||
1521 | (chan_addr & 0xfff); | ||
1614 | } | 1522 | } |
1615 | 1523 | ||
1616 | debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", | 1524 | debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); |
1617 | chan_addr, (u32)(chan_addr >> 8)); | ||
1618 | 1525 | ||
1619 | cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); | 1526 | cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); |
1620 | 1527 | ||
1621 | if (cs_found >= 0) { | 1528 | if (cs_found >= 0) { |
1622 | *nid = node_id; | 1529 | *nid = node_id; |
@@ -1625,23 +1532,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |||
1625 | return cs_found; | 1532 | return cs_found; |
1626 | } | 1533 | } |
1627 | 1534 | ||
1628 | static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | 1535 | static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, |
1629 | int *node, int *chan_sel) | 1536 | int *node, int *chan_sel) |
1630 | { | 1537 | { |
1631 | int dram_range, cs_found = -EINVAL; | 1538 | int cs_found = -EINVAL; |
1632 | u64 dram_base, dram_limit; | 1539 | unsigned range; |
1633 | 1540 | ||
1634 | for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { | 1541 | for (range = 0; range < DRAM_RANGES; range++) { |
1635 | 1542 | ||
1636 | if (!pvt->dram_rw_en[dram_range]) | 1543 | if (!dram_rw(pvt, range)) |
1637 | continue; | 1544 | continue; |
1638 | 1545 | ||
1639 | dram_base = pvt->dram_base[dram_range]; | 1546 | if ((get_dram_base(pvt, range) <= sys_addr) && |
1640 | dram_limit = pvt->dram_limit[dram_range]; | 1547 | (get_dram_limit(pvt, range) >= sys_addr)) { |
1641 | |||
1642 | if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { | ||
1643 | 1548 | ||
1644 | cs_found = f10_match_to_this_node(pvt, dram_range, | 1549 | cs_found = f1x_match_to_this_node(pvt, range, |
1645 | sys_addr, node, | 1550 | sys_addr, node, |
1646 | chan_sel); | 1551 | chan_sel); |
1647 | if (cs_found >= 0) | 1552 | if (cs_found >= 0) |
@@ -1658,16 +1563,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |||
1658 | * The @sys_addr is usually an error address received from the hardware | 1563 | * The @sys_addr is usually an error address received from the hardware |
1659 | * (MCX_ADDR). | 1564 | * (MCX_ADDR). |
1660 | */ | 1565 | */ |
1661 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1566 | static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1662 | struct err_regs *err_info, | 1567 | u16 syndrome) |
1663 | u64 sys_addr) | ||
1664 | { | 1568 | { |
1665 | struct amd64_pvt *pvt = mci->pvt_info; | 1569 | struct amd64_pvt *pvt = mci->pvt_info; |
1666 | u32 page, offset; | 1570 | u32 page, offset; |
1667 | int nid, csrow, chan = 0; | 1571 | int nid, csrow, chan = 0; |
1668 | u16 syndrome; | ||
1669 | 1572 | ||
1670 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | 1573 | csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); |
1671 | 1574 | ||
1672 | if (csrow < 0) { | 1575 | if (csrow < 0) { |
1673 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1576 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
@@ -1676,14 +1579,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1676 | 1579 | ||
1677 | error_address_to_page_and_offset(sys_addr, &page, &offset); | 1580 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
1678 | 1581 | ||
1679 | syndrome = extract_syndrome(err_info); | ||
1680 | |||
1681 | /* | 1582 | /* |
1682 | * We need the syndromes for channel detection only when we're | 1583 | * We need the syndromes for channel detection only when we're |
1683 | * ganged. Otherwise @chan should already contain the channel at | 1584 | * ganged. Otherwise @chan should already contain the channel at |
1684 | * this point. | 1585 | * this point. |
1685 | */ | 1586 | */ |
1686 | if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) | 1587 | if (dct_ganging_enabled(pvt)) |
1687 | chan = get_channel_from_ecc_syndrome(mci, syndrome); | 1588 | chan = get_channel_from_ecc_syndrome(mci, syndrome); |
1688 | 1589 | ||
1689 | if (chan >= 0) | 1590 | if (chan >= 0) |
@@ -1700,16 +1601,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1700 | 1601 | ||
1701 | /* | 1602 | /* |
1702 | * debug routine to display the memory sizes of all logical DIMMs and its | 1603 | * debug routine to display the memory sizes of all logical DIMMs and its |
1703 | * CSROWs as well | 1604 | * CSROWs |
1704 | */ | 1605 | */ |
1705 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | 1606 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) |
1706 | { | 1607 | { |
1707 | int dimm, size0, size1, factor = 0; | 1608 | int dimm, size0, size1, factor = 0; |
1708 | u32 dbam; | 1609 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; |
1709 | u32 *dcsb; | 1610 | u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; |
1710 | 1611 | ||
1711 | if (boot_cpu_data.x86 == 0xf) { | 1612 | if (boot_cpu_data.x86 == 0xf) { |
1712 | if (pvt->dclr0 & F10_WIDTH_128) | 1613 | if (pvt->dclr0 & WIDTH_128) |
1713 | factor = 1; | 1614 | factor = 1; |
1714 | 1615 | ||
1715 | /* K8 families < revF not supported yet */ | 1616 | /* K8 families < revF not supported yet */ |
@@ -1719,11 +1620,11 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1719 | WARN_ON(ctrl != 0); | 1620 | WARN_ON(ctrl != 0); |
1720 | } | 1621 | } |
1721 | 1622 | ||
1722 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | 1623 | dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; |
1723 | ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); | 1624 | dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases |
1625 | : pvt->csels[0].csbases; | ||
1724 | 1626 | ||
1725 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | 1627 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); |
1726 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; | ||
1727 | 1628 | ||
1728 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); | 1629 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); |
1729 | 1630 | ||
@@ -1731,67 +1632,53 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1731 | for (dimm = 0; dimm < 4; dimm++) { | 1632 | for (dimm = 0; dimm < 4; dimm++) { |
1732 | 1633 | ||
1733 | size0 = 0; | 1634 | size0 = 0; |
1734 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) | 1635 | if (dcsb[dimm*2] & DCSB_CS_ENABLE) |
1735 | size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); | 1636 | size0 = pvt->ops->dbam_to_cs(pvt, ctrl, |
1637 | DBAM_DIMM(dimm, dbam)); | ||
1736 | 1638 | ||
1737 | size1 = 0; | 1639 | size1 = 0; |
1738 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | 1640 | if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) |
1739 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); | 1641 | size1 = pvt->ops->dbam_to_cs(pvt, ctrl, |
1642 | DBAM_DIMM(dimm, dbam)); | ||
1740 | 1643 | ||
1741 | edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", | 1644 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
1742 | dimm * 2, size0 << factor, | 1645 | dimm * 2, size0 << factor, |
1743 | dimm * 2 + 1, size1 << factor); | 1646 | dimm * 2 + 1, size1 << factor); |
1744 | } | 1647 | } |
1745 | } | 1648 | } |
1746 | 1649 | ||
1747 | /* | ||
1748 | * There currently are 3 types type of MC devices for AMD Athlon/Opterons | ||
1749 | * (as per PCI DEVICE_IDs): | ||
1750 | * | ||
1751 | * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI | ||
1752 | * DEVICE ID, even though there is differences between the different Revisions | ||
1753 | * (CG,D,E,F). | ||
1754 | * | ||
1755 | * Family F10h and F11h. | ||
1756 | * | ||
1757 | */ | ||
1758 | static struct amd64_family_type amd64_family_types[] = { | 1650 | static struct amd64_family_type amd64_family_types[] = { |
1759 | [K8_CPUS] = { | 1651 | [K8_CPUS] = { |
1760 | .ctl_name = "RevF", | 1652 | .ctl_name = "K8", |
1761 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, | 1653 | .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, |
1762 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, | 1654 | .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, |
1763 | .ops = { | 1655 | .ops = { |
1764 | .early_channel_count = k8_early_channel_count, | 1656 | .early_channel_count = k8_early_channel_count, |
1765 | .get_error_address = k8_get_error_address, | ||
1766 | .read_dram_base_limit = k8_read_dram_base_limit, | ||
1767 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, | 1657 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, |
1768 | .dbam_to_cs = k8_dbam_to_chip_select, | 1658 | .dbam_to_cs = k8_dbam_to_chip_select, |
1659 | .read_dct_pci_cfg = k8_read_dct_pci_cfg, | ||
1769 | } | 1660 | } |
1770 | }, | 1661 | }, |
1771 | [F10_CPUS] = { | 1662 | [F10_CPUS] = { |
1772 | .ctl_name = "Family 10h", | 1663 | .ctl_name = "F10h", |
1773 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, | 1664 | .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, |
1774 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, | 1665 | .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, |
1775 | .ops = { | 1666 | .ops = { |
1776 | .early_channel_count = f10_early_channel_count, | 1667 | .early_channel_count = f1x_early_channel_count, |
1777 | .get_error_address = f10_get_error_address, | 1668 | .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, |
1778 | .read_dram_base_limit = f10_read_dram_base_limit, | ||
1779 | .read_dram_ctl_register = f10_read_dram_ctl_register, | ||
1780 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | ||
1781 | .dbam_to_cs = f10_dbam_to_chip_select, | 1669 | .dbam_to_cs = f10_dbam_to_chip_select, |
1670 | .read_dct_pci_cfg = f10_read_dct_pci_cfg, | ||
1782 | } | 1671 | } |
1783 | }, | 1672 | }, |
1784 | [F11_CPUS] = { | 1673 | [F15_CPUS] = { |
1785 | .ctl_name = "Family 11h", | 1674 | .ctl_name = "F15h", |
1786 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, | 1675 | .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, |
1787 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, | 1676 | .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3, |
1788 | .ops = { | 1677 | .ops = { |
1789 | .early_channel_count = f10_early_channel_count, | 1678 | .early_channel_count = f1x_early_channel_count, |
1790 | .get_error_address = f10_get_error_address, | 1679 | .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, |
1791 | .read_dram_base_limit = f10_read_dram_base_limit, | 1680 | .dbam_to_cs = f15_dbam_to_chip_select, |
1792 | .read_dram_ctl_register = f10_read_dram_ctl_register, | 1681 | .read_dct_pci_cfg = f15_read_dct_pci_cfg, |
1793 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | ||
1794 | .dbam_to_cs = f10_dbam_to_chip_select, | ||
1795 | } | 1682 | } |
1796 | }, | 1683 | }, |
1797 | }; | 1684 | }; |
@@ -1881,15 +1768,15 @@ static u16 x8_vectors[] = { | |||
1881 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, | 1768 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, |
1882 | }; | 1769 | }; |
1883 | 1770 | ||
1884 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | 1771 | static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, |
1885 | int v_dim) | 1772 | unsigned v_dim) |
1886 | { | 1773 | { |
1887 | unsigned int i, err_sym; | 1774 | unsigned int i, err_sym; |
1888 | 1775 | ||
1889 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { | 1776 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { |
1890 | u16 s = syndrome; | 1777 | u16 s = syndrome; |
1891 | int v_idx = err_sym * v_dim; | 1778 | unsigned v_idx = err_sym * v_dim; |
1892 | int v_end = (err_sym + 1) * v_dim; | 1779 | unsigned v_end = (err_sym + 1) * v_dim; |
1893 | 1780 | ||
1894 | /* walk over all 16 bits of the syndrome */ | 1781 | /* walk over all 16 bits of the syndrome */ |
1895 | for (i = 1; i < (1U << 16); i <<= 1) { | 1782 | for (i = 1; i < (1U << 16); i <<= 1) { |
@@ -1961,54 +1848,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |||
1961 | struct amd64_pvt *pvt = mci->pvt_info; | 1848 | struct amd64_pvt *pvt = mci->pvt_info; |
1962 | int err_sym = -1; | 1849 | int err_sym = -1; |
1963 | 1850 | ||
1964 | if (pvt->syn_type == 8) | 1851 | if (pvt->ecc_sym_sz == 8) |
1965 | err_sym = decode_syndrome(syndrome, x8_vectors, | 1852 | err_sym = decode_syndrome(syndrome, x8_vectors, |
1966 | ARRAY_SIZE(x8_vectors), | 1853 | ARRAY_SIZE(x8_vectors), |
1967 | pvt->syn_type); | 1854 | pvt->ecc_sym_sz); |
1968 | else if (pvt->syn_type == 4) | 1855 | else if (pvt->ecc_sym_sz == 4) |
1969 | err_sym = decode_syndrome(syndrome, x4_vectors, | 1856 | err_sym = decode_syndrome(syndrome, x4_vectors, |
1970 | ARRAY_SIZE(x4_vectors), | 1857 | ARRAY_SIZE(x4_vectors), |
1971 | pvt->syn_type); | 1858 | pvt->ecc_sym_sz); |
1972 | else { | 1859 | else { |
1973 | amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n", | 1860 | amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); |
1974 | __func__, pvt->syn_type); | ||
1975 | return err_sym; | 1861 | return err_sym; |
1976 | } | 1862 | } |
1977 | 1863 | ||
1978 | return map_err_sym_to_channel(err_sym, pvt->syn_type); | 1864 | return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); |
1979 | } | 1865 | } |
1980 | 1866 | ||
1981 | /* | 1867 | /* |
1982 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR | 1868 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR |
1983 | * ADDRESS and process. | 1869 | * ADDRESS and process. |
1984 | */ | 1870 | */ |
1985 | static void amd64_handle_ce(struct mem_ctl_info *mci, | 1871 | static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) |
1986 | struct err_regs *info) | ||
1987 | { | 1872 | { |
1988 | struct amd64_pvt *pvt = mci->pvt_info; | 1873 | struct amd64_pvt *pvt = mci->pvt_info; |
1989 | u64 sys_addr; | 1874 | u64 sys_addr; |
1875 | u16 syndrome; | ||
1990 | 1876 | ||
1991 | /* Ensure that the Error Address is VALID */ | 1877 | /* Ensure that the Error Address is VALID */ |
1992 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | 1878 | if (!(m->status & MCI_STATUS_ADDRV)) { |
1993 | amd64_mc_printk(mci, KERN_ERR, | 1879 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
1994 | "HW has no ERROR_ADDRESS available\n"); | ||
1995 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1880 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1996 | return; | 1881 | return; |
1997 | } | 1882 | } |
1998 | 1883 | ||
1999 | sys_addr = pvt->ops->get_error_address(mci, info); | 1884 | sys_addr = get_error_address(m); |
1885 | syndrome = extract_syndrome(m->status); | ||
2000 | 1886 | ||
2001 | amd64_mc_printk(mci, KERN_ERR, | 1887 | amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
2002 | "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); | ||
2003 | 1888 | ||
2004 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); | 1889 | pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome); |
2005 | } | 1890 | } |
2006 | 1891 | ||
2007 | /* Handle any Un-correctable Errors (UEs) */ | 1892 | /* Handle any Un-correctable Errors (UEs) */ |
2008 | static void amd64_handle_ue(struct mem_ctl_info *mci, | 1893 | static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) |
2009 | struct err_regs *info) | ||
2010 | { | 1894 | { |
2011 | struct amd64_pvt *pvt = mci->pvt_info; | ||
2012 | struct mem_ctl_info *log_mci, *src_mci = NULL; | 1895 | struct mem_ctl_info *log_mci, *src_mci = NULL; |
2013 | int csrow; | 1896 | int csrow; |
2014 | u64 sys_addr; | 1897 | u64 sys_addr; |
@@ -2016,14 +1899,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2016 | 1899 | ||
2017 | log_mci = mci; | 1900 | log_mci = mci; |
2018 | 1901 | ||
2019 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | 1902 | if (!(m->status & MCI_STATUS_ADDRV)) { |
2020 | amd64_mc_printk(mci, KERN_CRIT, | 1903 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
2021 | "HW has no ERROR_ADDRESS available\n"); | ||
2022 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1904 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2023 | return; | 1905 | return; |
2024 | } | 1906 | } |
2025 | 1907 | ||
2026 | sys_addr = pvt->ops->get_error_address(mci, info); | 1908 | sys_addr = get_error_address(m); |
2027 | 1909 | ||
2028 | /* | 1910 | /* |
2029 | * Find out which node the error address belongs to. This may be | 1911 | * Find out which node the error address belongs to. This may be |
@@ -2031,9 +1913,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2031 | */ | 1913 | */ |
2032 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | 1914 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
2033 | if (!src_mci) { | 1915 | if (!src_mci) { |
2034 | amd64_mc_printk(mci, KERN_CRIT, | 1916 | amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", |
2035 | "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", | 1917 | (unsigned long)sys_addr); |
2036 | (unsigned long)sys_addr); | ||
2037 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1918 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2038 | return; | 1919 | return; |
2039 | } | 1920 | } |
@@ -2042,9 +1923,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2042 | 1923 | ||
2043 | csrow = sys_addr_to_csrow(log_mci, sys_addr); | 1924 | csrow = sys_addr_to_csrow(log_mci, sys_addr); |
2044 | if (csrow < 0) { | 1925 | if (csrow < 0) { |
2045 | amd64_mc_printk(mci, KERN_CRIT, | 1926 | amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", |
2046 | "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", | 1927 | (unsigned long)sys_addr); |
2047 | (unsigned long)sys_addr); | ||
2048 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1928 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2049 | } else { | 1929 | } else { |
2050 | error_address_to_page_and_offset(sys_addr, &page, &offset); | 1930 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
@@ -2053,14 +1933,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2053 | } | 1933 | } |
2054 | 1934 | ||
2055 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | 1935 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, |
2056 | struct err_regs *info) | 1936 | struct mce *m) |
2057 | { | 1937 | { |
2058 | u32 ec = ERROR_CODE(info->nbsl); | 1938 | u16 ec = EC(m->status); |
2059 | u32 xec = EXT_ERROR_CODE(info->nbsl); | 1939 | u8 xec = XEC(m->status, 0x1f); |
2060 | int ecc_type = (info->nbsh >> 13) & 0x3; | 1940 | u8 ecc_type = (m->status >> 45) & 0x3; |
2061 | 1941 | ||
2062 | /* Bail early out if this was an 'observed' error */ | 1942 | /* Bail early out if this was an 'observed' error */ |
2063 | if (PP(ec) == K8_NBSL_PP_OBS) | 1943 | if (PP(ec) == NBSL_PP_OBS) |
2064 | return; | 1944 | return; |
2065 | 1945 | ||
2066 | /* Do only ECC errors */ | 1946 | /* Do only ECC errors */ |
@@ -2068,103 +1948,68 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | |||
2068 | return; | 1948 | return; |
2069 | 1949 | ||
2070 | if (ecc_type == 2) | 1950 | if (ecc_type == 2) |
2071 | amd64_handle_ce(mci, info); | 1951 | amd64_handle_ce(mci, m); |
2072 | else if (ecc_type == 1) | 1952 | else if (ecc_type == 1) |
2073 | amd64_handle_ue(mci, info); | 1953 | amd64_handle_ue(mci, m); |
2074 | } | 1954 | } |
2075 | 1955 | ||
2076 | void amd64_decode_bus_error(int node_id, struct err_regs *regs) | 1956 | void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) |
2077 | { | 1957 | { |
2078 | struct mem_ctl_info *mci = mci_lookup[node_id]; | 1958 | struct mem_ctl_info *mci = mcis[node_id]; |
2079 | |||
2080 | __amd64_decode_bus_error(mci, regs); | ||
2081 | |||
2082 | /* | ||
2083 | * Check the UE bit of the NB status high register, if set generate some | ||
2084 | * logs. If NOT a GART error, then process the event as a NO-INFO event. | ||
2085 | * If it was a GART error, skip that process. | ||
2086 | * | ||
2087 | * FIXME: this should go somewhere else, if at all. | ||
2088 | */ | ||
2089 | if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors) | ||
2090 | edac_mc_handle_ue_no_info(mci, "UE bit is set"); | ||
2091 | 1959 | ||
1960 | __amd64_decode_bus_error(mci, m); | ||
2092 | } | 1961 | } |
2093 | 1962 | ||
2094 | /* | 1963 | /* |
2095 | * Input: | 1964 | * Use pvt->F2 which contains the F2 CPU PCI device to get the related |
2096 | * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer | 1965 | * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. |
2097 | * 2) AMD Family index value | ||
2098 | * | ||
2099 | * Ouput: | ||
2100 | * Upon return of 0, the following filled in: | ||
2101 | * | ||
2102 | * struct pvt->addr_f1_ctl | ||
2103 | * struct pvt->misc_f3_ctl | ||
2104 | * | ||
2105 | * Filled in with related device funcitions of 'dram_f2_ctl' | ||
2106 | * These devices are "reserved" via the pci_get_device() | ||
2107 | * | ||
2108 | * Upon return of 1 (error status): | ||
2109 | * | ||
2110 | * Nothing reserved | ||
2111 | */ | 1966 | */ |
2112 | static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx) | 1967 | static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id) |
2113 | { | 1968 | { |
2114 | const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx]; | ||
2115 | |||
2116 | /* Reserve the ADDRESS MAP Device */ | 1969 | /* Reserve the ADDRESS MAP Device */ |
2117 | pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | 1970 | pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); |
2118 | amd64_dev->addr_f1_ctl, | 1971 | if (!pvt->F1) { |
2119 | pvt->dram_f2_ctl); | 1972 | amd64_err("error address map device not found: " |
2120 | 1973 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2121 | if (!pvt->addr_f1_ctl) { | 1974 | PCI_VENDOR_ID_AMD, f1_id); |
2122 | amd64_printk(KERN_ERR, "error address map device not found: " | 1975 | return -ENODEV; |
2123 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
2124 | PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl); | ||
2125 | return 1; | ||
2126 | } | 1976 | } |
2127 | 1977 | ||
2128 | /* Reserve the MISC Device */ | 1978 | /* Reserve the MISC Device */ |
2129 | pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | 1979 | pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2); |
2130 | amd64_dev->misc_f3_ctl, | 1980 | if (!pvt->F3) { |
2131 | pvt->dram_f2_ctl); | 1981 | pci_dev_put(pvt->F1); |
1982 | pvt->F1 = NULL; | ||
2132 | 1983 | ||
2133 | if (!pvt->misc_f3_ctl) { | 1984 | amd64_err("error F3 device not found: " |
2134 | pci_dev_put(pvt->addr_f1_ctl); | 1985 | "vendor %x device 0x%x (broken BIOS?)\n", |
2135 | pvt->addr_f1_ctl = NULL; | 1986 | PCI_VENDOR_ID_AMD, f3_id); |
2136 | 1987 | ||
2137 | amd64_printk(KERN_ERR, "error miscellaneous device not found: " | 1988 | return -ENODEV; |
2138 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
2139 | PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl); | ||
2140 | return 1; | ||
2141 | } | 1989 | } |
2142 | 1990 | debugf1("F1: %s\n", pci_name(pvt->F1)); | |
2143 | debugf1(" Addr Map device PCI Bus ID:\t%s\n", | 1991 | debugf1("F2: %s\n", pci_name(pvt->F2)); |
2144 | pci_name(pvt->addr_f1_ctl)); | 1992 | debugf1("F3: %s\n", pci_name(pvt->F3)); |
2145 | debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n", | ||
2146 | pci_name(pvt->dram_f2_ctl)); | ||
2147 | debugf1(" Misc device PCI Bus ID:\t%s\n", | ||
2148 | pci_name(pvt->misc_f3_ctl)); | ||
2149 | 1993 | ||
2150 | return 0; | 1994 | return 0; |
2151 | } | 1995 | } |
2152 | 1996 | ||
2153 | static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) | 1997 | static void free_mc_sibling_devs(struct amd64_pvt *pvt) |
2154 | { | 1998 | { |
2155 | pci_dev_put(pvt->addr_f1_ctl); | 1999 | pci_dev_put(pvt->F1); |
2156 | pci_dev_put(pvt->misc_f3_ctl); | 2000 | pci_dev_put(pvt->F3); |
2157 | } | 2001 | } |
2158 | 2002 | ||
2159 | /* | 2003 | /* |
2160 | * Retrieve the hardware registers of the memory controller (this includes the | 2004 | * Retrieve the hardware registers of the memory controller (this includes the |
2161 | * 'Address Map' and 'Misc' device regs) | 2005 | * 'Address Map' and 'Misc' device regs) |
2162 | */ | 2006 | */ |
2163 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) | 2007 | static void read_mc_regs(struct amd64_pvt *pvt) |
2164 | { | 2008 | { |
2009 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
2165 | u64 msr_val; | 2010 | u64 msr_val; |
2166 | u32 tmp; | 2011 | u32 tmp; |
2167 | int dram; | 2012 | unsigned range; |
2168 | 2013 | ||
2169 | /* | 2014 | /* |
2170 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | 2015 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since |
@@ -2181,78 +2026,66 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
2181 | } else | 2026 | } else |
2182 | debugf0(" TOP_MEM2 disabled.\n"); | 2027 | debugf0(" TOP_MEM2 disabled.\n"); |
2183 | 2028 | ||
2184 | amd64_cpu_display_info(pvt); | 2029 | amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); |
2185 | 2030 | ||
2186 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); | 2031 | read_dram_ctl_register(pvt); |
2187 | 2032 | ||
2188 | if (pvt->ops->read_dram_ctl_register) | 2033 | for (range = 0; range < DRAM_RANGES; range++) { |
2189 | pvt->ops->read_dram_ctl_register(pvt); | 2034 | u8 rw; |
2190 | 2035 | ||
2191 | for (dram = 0; dram < DRAM_REG_COUNT; dram++) { | 2036 | /* read settings for this DRAM range */ |
2192 | /* | 2037 | read_dram_base_limit_regs(pvt, range); |
2193 | * Call CPU specific READ function to get the DRAM Base and | ||
2194 | * Limit values from the DCT. | ||
2195 | */ | ||
2196 | pvt->ops->read_dram_base_limit(pvt, dram); | ||
2197 | 2038 | ||
2198 | /* | 2039 | rw = dram_rw(pvt, range); |
2199 | * Only print out debug info on rows with both R and W Enabled. | 2040 | if (!rw) |
2200 | * Normal processing, compiler should optimize this whole 'if' | 2041 | continue; |
2201 | * debug output block away. | 2042 | |
2202 | */ | 2043 | debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", |
2203 | if (pvt->dram_rw_en[dram] != 0) { | 2044 | range, |
2204 | debugf1(" DRAM-BASE[%d]: 0x%016llx " | 2045 | get_dram_base(pvt, range), |
2205 | "DRAM-LIMIT: 0x%016llx\n", | 2046 | get_dram_limit(pvt, range)); |
2206 | dram, | 2047 | |
2207 | pvt->dram_base[dram], | 2048 | debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", |
2208 | pvt->dram_limit[dram]); | 2049 | dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", |
2209 | 2050 | (rw & 0x1) ? "R" : "-", | |
2210 | debugf1(" IntlvEn=%s %s %s " | 2051 | (rw & 0x2) ? "W" : "-", |
2211 | "IntlvSel=%d DstNode=%d\n", | 2052 | dram_intlv_sel(pvt, range), |
2212 | pvt->dram_IntlvEn[dram] ? | 2053 | dram_dst_node(pvt, range)); |
2213 | "Enabled" : "Disabled", | ||
2214 | (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", | ||
2215 | (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", | ||
2216 | pvt->dram_IntlvSel[dram], | ||
2217 | pvt->dram_DstNode[dram]); | ||
2218 | } | ||
2219 | } | 2054 | } |
2220 | 2055 | ||
2221 | amd64_read_dct_base_mask(pvt); | 2056 | read_dct_base_mask(pvt); |
2222 | 2057 | ||
2223 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); | 2058 | amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); |
2224 | amd64_read_dbam_reg(pvt); | 2059 | amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); |
2225 | 2060 | ||
2226 | amd64_read_pci_cfg(pvt->misc_f3_ctl, | 2061 | amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); |
2227 | F10_ONLINE_SPARE, &pvt->online_spare); | ||
2228 | 2062 | ||
2229 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | 2063 | amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); |
2230 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); | 2064 | amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); |
2231 | 2065 | ||
2232 | if (boot_cpu_data.x86 >= 0x10) { | 2066 | if (!dct_ganging_enabled(pvt)) { |
2233 | if (!dct_ganging_enabled(pvt)) { | 2067 | amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); |
2234 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); | 2068 | amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); |
2235 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); | ||
2236 | } | ||
2237 | amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp); | ||
2238 | } | 2069 | } |
2239 | 2070 | ||
2240 | if (boot_cpu_data.x86 == 0x10 && | 2071 | pvt->ecc_sym_sz = 4; |
2241 | boot_cpu_data.x86_model > 7 && | 2072 | |
2242 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ | 2073 | if (c->x86 >= 0x10) { |
2243 | tmp & BIT(25)) | 2074 | amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); |
2244 | pvt->syn_type = 8; | 2075 | amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); |
2245 | else | ||
2246 | pvt->syn_type = 4; | ||
2247 | 2076 | ||
2248 | amd64_dump_misc_regs(pvt); | 2077 | /* F10h, revD and later can do x8 ECC too */ |
2078 | if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) | ||
2079 | pvt->ecc_sym_sz = 8; | ||
2080 | } | ||
2081 | dump_misc_regs(pvt); | ||
2249 | } | 2082 | } |
2250 | 2083 | ||
2251 | /* | 2084 | /* |
2252 | * NOTE: CPU Revision Dependent code | 2085 | * NOTE: CPU Revision Dependent code |
2253 | * | 2086 | * |
2254 | * Input: | 2087 | * Input: |
2255 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) | 2088 | * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1) |
2256 | * k8 private pointer to --> | 2089 | * k8 private pointer to --> |
2257 | * DRAM Bank Address mapping register | 2090 | * DRAM Bank Address mapping register |
2258 | * node_id | 2091 | * node_id |
@@ -2282,7 +2115,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
2282 | * encompasses | 2115 | * encompasses |
2283 | * | 2116 | * |
2284 | */ | 2117 | */ |
2285 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | 2118 | static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) |
2286 | { | 2119 | { |
2287 | u32 cs_mode, nr_pages; | 2120 | u32 cs_mode, nr_pages; |
2288 | 2121 | ||
@@ -2295,7 +2128,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |||
2295 | */ | 2128 | */ |
2296 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; | 2129 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
2297 | 2130 | ||
2298 | nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); | 2131 | nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); |
2299 | 2132 | ||
2300 | /* | 2133 | /* |
2301 | * If dual channel then double the memory size of single channel. | 2134 | * If dual channel then double the memory size of single channel. |
@@ -2314,26 +2147,26 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |||
2314 | * Initialize the array of csrow attribute instances, based on the values | 2147 | * Initialize the array of csrow attribute instances, based on the values |
2315 | * from pci config hardware registers. | 2148 | * from pci config hardware registers. |
2316 | */ | 2149 | */ |
2317 | static int amd64_init_csrows(struct mem_ctl_info *mci) | 2150 | static int init_csrows(struct mem_ctl_info *mci) |
2318 | { | 2151 | { |
2319 | struct csrow_info *csrow; | 2152 | struct csrow_info *csrow; |
2320 | struct amd64_pvt *pvt; | 2153 | struct amd64_pvt *pvt = mci->pvt_info; |
2321 | u64 input_addr_min, input_addr_max, sys_addr; | 2154 | u64 input_addr_min, input_addr_max, sys_addr, base, mask; |
2155 | u32 val; | ||
2322 | int i, empty = 1; | 2156 | int i, empty = 1; |
2323 | 2157 | ||
2324 | pvt = mci->pvt_info; | 2158 | amd64_read_pci_cfg(pvt->F3, NBCFG, &val); |
2325 | 2159 | ||
2326 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); | 2160 | pvt->nbcfg = val; |
2327 | 2161 | ||
2328 | debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, | 2162 | debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", |
2329 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2163 | pvt->mc_node_id, val, |
2330 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" | 2164 | !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); |
2331 | ); | ||
2332 | 2165 | ||
2333 | for (i = 0; i < pvt->cs_count; i++) { | 2166 | for_each_chip_select(i, 0, pvt) { |
2334 | csrow = &mci->csrows[i]; | 2167 | csrow = &mci->csrows[i]; |
2335 | 2168 | ||
2336 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | 2169 | if (!csrow_enabled(i, 0, pvt)) { |
2337 | debugf1("----CSROW %d EMPTY for node %d\n", i, | 2170 | debugf1("----CSROW %d EMPTY for node %d\n", i, |
2338 | pvt->mc_node_id); | 2171 | pvt->mc_node_id); |
2339 | continue; | 2172 | continue; |
@@ -2343,16 +2176,18 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2343 | i, pvt->mc_node_id); | 2176 | i, pvt->mc_node_id); |
2344 | 2177 | ||
2345 | empty = 0; | 2178 | empty = 0; |
2346 | csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); | 2179 | csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); |
2347 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); | 2180 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); |
2348 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); | 2181 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); |
2349 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); | 2182 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); |
2350 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); | 2183 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); |
2351 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); | 2184 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); |
2352 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); | 2185 | |
2186 | get_cs_base_and_mask(pvt, i, 0, &base, &mask); | ||
2187 | csrow->page_mask = ~mask; | ||
2353 | /* 8 bytes of resolution */ | 2188 | /* 8 bytes of resolution */ |
2354 | 2189 | ||
2355 | csrow->mtype = amd64_determine_memory_type(pvt); | 2190 | csrow->mtype = amd64_determine_memory_type(pvt, i); |
2356 | 2191 | ||
2357 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); | 2192 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); |
2358 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", | 2193 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", |
@@ -2368,9 +2203,9 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2368 | /* | 2203 | /* |
2369 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating | 2204 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating |
2370 | */ | 2205 | */ |
2371 | if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) | 2206 | if (pvt->nbcfg & NBCFG_ECC_ENABLE) |
2372 | csrow->edac_mode = | 2207 | csrow->edac_mode = |
2373 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? | 2208 | (pvt->nbcfg & NBCFG_CHIPKILL) ? |
2374 | EDAC_S4ECD4ED : EDAC_SECDED; | 2209 | EDAC_S4ECD4ED : EDAC_SECDED; |
2375 | else | 2210 | else |
2376 | csrow->edac_mode = EDAC_NONE; | 2211 | csrow->edac_mode = EDAC_NONE; |
@@ -2380,7 +2215,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2380 | } | 2215 | } |
2381 | 2216 | ||
2382 | /* get all cores on this DCT */ | 2217 | /* get all cores on this DCT */ |
2383 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | 2218 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid) |
2384 | { | 2219 | { |
2385 | int cpu; | 2220 | int cpu; |
2386 | 2221 | ||
@@ -2390,15 +2225,14 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | |||
2390 | } | 2225 | } |
2391 | 2226 | ||
2392 | /* check MCG_CTL on all the cpus on this node */ | 2227 | /* check MCG_CTL on all the cpus on this node */ |
2393 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | 2228 | static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) |
2394 | { | 2229 | { |
2395 | cpumask_var_t mask; | 2230 | cpumask_var_t mask; |
2396 | int cpu, nbe; | 2231 | int cpu, nbe; |
2397 | bool ret = false; | 2232 | bool ret = false; |
2398 | 2233 | ||
2399 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | 2234 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { |
2400 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | 2235 | amd64_warn("%s: Error allocating mask\n", __func__); |
2401 | __func__); | ||
2402 | return false; | 2236 | return false; |
2403 | } | 2237 | } |
2404 | 2238 | ||
@@ -2408,7 +2242,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) | |||
2408 | 2242 | ||
2409 | for_each_cpu(cpu, mask) { | 2243 | for_each_cpu(cpu, mask) { |
2410 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 2244 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2411 | nbe = reg->l & K8_MSR_MCGCTL_NBE; | 2245 | nbe = reg->l & MSR_MCGCTL_NBE; |
2412 | 2246 | ||
2413 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | 2247 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", |
2414 | cpu, reg->q, | 2248 | cpu, reg->q, |
@@ -2424,18 +2258,17 @@ out: | |||
2424 | return ret; | 2258 | return ret; |
2425 | } | 2259 | } |
2426 | 2260 | ||
2427 | static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | 2261 | static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) |
2428 | { | 2262 | { |
2429 | cpumask_var_t cmask; | 2263 | cpumask_var_t cmask; |
2430 | int cpu; | 2264 | int cpu; |
2431 | 2265 | ||
2432 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | 2266 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { |
2433 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | 2267 | amd64_warn("%s: error allocating mask\n", __func__); |
2434 | __func__); | ||
2435 | return false; | 2268 | return false; |
2436 | } | 2269 | } |
2437 | 2270 | ||
2438 | get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); | 2271 | get_cpus_on_this_dct_cpumask(cmask, nid); |
2439 | 2272 | ||
2440 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | 2273 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); |
2441 | 2274 | ||
@@ -2444,16 +2277,16 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |||
2444 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 2277 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2445 | 2278 | ||
2446 | if (on) { | 2279 | if (on) { |
2447 | if (reg->l & K8_MSR_MCGCTL_NBE) | 2280 | if (reg->l & MSR_MCGCTL_NBE) |
2448 | pvt->flags.nb_mce_enable = 1; | 2281 | s->flags.nb_mce_enable = 1; |
2449 | 2282 | ||
2450 | reg->l |= K8_MSR_MCGCTL_NBE; | 2283 | reg->l |= MSR_MCGCTL_NBE; |
2451 | } else { | 2284 | } else { |
2452 | /* | 2285 | /* |
2453 | * Turn off NB MCE reporting only when it was off before | 2286 | * Turn off NB MCE reporting only when it was off before |
2454 | */ | 2287 | */ |
2455 | if (!pvt->flags.nb_mce_enable) | 2288 | if (!s->flags.nb_mce_enable) |
2456 | reg->l &= ~K8_MSR_MCGCTL_NBE; | 2289 | reg->l &= ~MSR_MCGCTL_NBE; |
2457 | } | 2290 | } |
2458 | } | 2291 | } |
2459 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | 2292 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); |
@@ -2463,92 +2296,90 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |||
2463 | return 0; | 2296 | return 0; |
2464 | } | 2297 | } |
2465 | 2298 | ||
2466 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | 2299 | static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, |
2300 | struct pci_dev *F3) | ||
2467 | { | 2301 | { |
2468 | struct amd64_pvt *pvt = mci->pvt_info; | 2302 | bool ret = true; |
2469 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | 2303 | u32 value, mask = 0x3; /* UECC/CECC enable */ |
2304 | |||
2305 | if (toggle_ecc_err_reporting(s, nid, ON)) { | ||
2306 | amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); | ||
2307 | return false; | ||
2308 | } | ||
2470 | 2309 | ||
2471 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); | 2310 | amd64_read_pci_cfg(F3, NBCTL, &value); |
2472 | 2311 | ||
2473 | /* turn on UECCn and CECCEn bits */ | 2312 | s->old_nbctl = value & mask; |
2474 | pvt->old_nbctl = value & mask; | 2313 | s->nbctl_valid = true; |
2475 | pvt->nbctl_mcgctl_saved = 1; | ||
2476 | 2314 | ||
2477 | value |= mask; | 2315 | value |= mask; |
2478 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | 2316 | amd64_write_pci_cfg(F3, NBCTL, value); |
2479 | 2317 | ||
2480 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) | 2318 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2481 | amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " | ||
2482 | "MCGCTL!\n"); | ||
2483 | 2319 | ||
2484 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2320 | debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2321 | nid, value, !!(value & NBCFG_ECC_ENABLE)); | ||
2485 | 2322 | ||
2486 | debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | 2323 | if (!(value & NBCFG_ECC_ENABLE)) { |
2487 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2324 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); |
2488 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | ||
2489 | 2325 | ||
2490 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2326 | s->flags.nb_ecc_prev = 0; |
2491 | amd64_printk(KERN_WARNING, | ||
2492 | "This node reports that DRAM ECC is " | ||
2493 | "currently Disabled; ENABLING now\n"); | ||
2494 | |||
2495 | pvt->flags.nb_ecc_prev = 0; | ||
2496 | 2327 | ||
2497 | /* Attempt to turn on DRAM ECC Enable */ | 2328 | /* Attempt to turn on DRAM ECC Enable */ |
2498 | value |= K8_NBCFG_ECC_ENABLE; | 2329 | value |= NBCFG_ECC_ENABLE; |
2499 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | 2330 | amd64_write_pci_cfg(F3, NBCFG, value); |
2500 | 2331 | ||
2501 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2332 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2502 | 2333 | ||
2503 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2334 | if (!(value & NBCFG_ECC_ENABLE)) { |
2504 | amd64_printk(KERN_WARNING, | 2335 | amd64_warn("Hardware rejected DRAM ECC enable," |
2505 | "Hardware rejects Enabling DRAM ECC checking\n" | 2336 | "check memory DIMM configuration.\n"); |
2506 | "Check memory DIMM configuration\n"); | 2337 | ret = false; |
2507 | } else { | 2338 | } else { |
2508 | amd64_printk(KERN_DEBUG, | 2339 | amd64_info("Hardware accepted DRAM ECC Enable\n"); |
2509 | "Hardware accepted DRAM ECC Enable\n"); | ||
2510 | } | 2340 | } |
2511 | } else { | 2341 | } else { |
2512 | pvt->flags.nb_ecc_prev = 1; | 2342 | s->flags.nb_ecc_prev = 1; |
2513 | } | 2343 | } |
2514 | 2344 | ||
2515 | debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | 2345 | debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2516 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2346 | nid, value, !!(value & NBCFG_ECC_ENABLE)); |
2517 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | ||
2518 | 2347 | ||
2519 | pvt->ctl_error_info.nbcfg = value; | 2348 | return ret; |
2520 | } | 2349 | } |
2521 | 2350 | ||
2522 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | 2351 | static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, |
2352 | struct pci_dev *F3) | ||
2523 | { | 2353 | { |
2524 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | 2354 | u32 value, mask = 0x3; /* UECC/CECC enable */ |
2525 | 2355 | ||
2526 | if (!pvt->nbctl_mcgctl_saved) | 2356 | |
2357 | if (!s->nbctl_valid) | ||
2527 | return; | 2358 | return; |
2528 | 2359 | ||
2529 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); | 2360 | amd64_read_pci_cfg(F3, NBCTL, &value); |
2530 | value &= ~mask; | 2361 | value &= ~mask; |
2531 | value |= pvt->old_nbctl; | 2362 | value |= s->old_nbctl; |
2532 | 2363 | ||
2533 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | 2364 | amd64_write_pci_cfg(F3, NBCTL, value); |
2534 | 2365 | ||
2535 | /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */ | 2366 | /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ |
2536 | if (!pvt->flags.nb_ecc_prev) { | 2367 | if (!s->flags.nb_ecc_prev) { |
2537 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2368 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2538 | value &= ~K8_NBCFG_ECC_ENABLE; | 2369 | value &= ~NBCFG_ECC_ENABLE; |
2539 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | 2370 | amd64_write_pci_cfg(F3, NBCFG, value); |
2540 | } | 2371 | } |
2541 | 2372 | ||
2542 | /* restore the NB Enable MCGCTL bit */ | 2373 | /* restore the NB Enable MCGCTL bit */ |
2543 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) | 2374 | if (toggle_ecc_err_reporting(s, nid, OFF)) |
2544 | amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n"); | 2375 | amd64_warn("Error restoring NB MCGCTL settings!\n"); |
2545 | } | 2376 | } |
2546 | 2377 | ||
2547 | /* | 2378 | /* |
2548 | * EDAC requires that the BIOS have ECC enabled before taking over the | 2379 | * EDAC requires that the BIOS have ECC enabled before |
2549 | * processing of ECC errors. This is because the BIOS can properly initialize | 2380 | * taking over the processing of ECC errors. A command line |
2550 | * the memory system completely. A command line option allows to force-enable | 2381 | * option allows to force-enable hardware ECC later in |
2551 | * hardware ECC later in amd64_enable_ecc_error_reporting(). | 2382 | * enable_ecc_error_reporting(). |
2552 | */ | 2383 | */ |
2553 | static const char *ecc_msg = | 2384 | static const char *ecc_msg = |
2554 | "ECC disabled in the BIOS or no ECC capability, module will not load.\n" | 2385 | "ECC disabled in the BIOS or no ECC capability, module will not load.\n" |
@@ -2556,38 +2387,28 @@ static const char *ecc_msg = | |||
2556 | "'ecc_enable_override'.\n" | 2387 | "'ecc_enable_override'.\n" |
2557 | " (Note that use of the override may cause unknown side effects.)\n"; | 2388 | " (Note that use of the override may cause unknown side effects.)\n"; |
2558 | 2389 | ||
2559 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | 2390 | static bool ecc_enabled(struct pci_dev *F3, u8 nid) |
2560 | { | 2391 | { |
2561 | u32 value; | 2392 | u32 value; |
2562 | u8 ecc_enabled = 0; | 2393 | u8 ecc_en = 0; |
2563 | bool nb_mce_en = false; | 2394 | bool nb_mce_en = false; |
2564 | 2395 | ||
2565 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2396 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2566 | 2397 | ||
2567 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | 2398 | ecc_en = !!(value & NBCFG_ECC_ENABLE); |
2568 | if (!ecc_enabled) | 2399 | amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); |
2569 | amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " | ||
2570 | "is currently disabled, set F3x%x[22] (%s).\n", | ||
2571 | K8_NBCFG, pci_name(pvt->misc_f3_ctl)); | ||
2572 | else | ||
2573 | amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n"); | ||
2574 | 2400 | ||
2575 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); | 2401 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); |
2576 | if (!nb_mce_en) | 2402 | if (!nb_mce_en) |
2577 | amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " | 2403 | amd64_notice("NB MCE bank disabled, set MSR " |
2578 | "0x%08x[4] on node %d to enable.\n", | 2404 | "0x%08x[4] on node %d to enable.\n", |
2579 | MSR_IA32_MCG_CTL, pvt->mc_node_id); | 2405 | MSR_IA32_MCG_CTL, nid); |
2580 | 2406 | ||
2581 | if (!ecc_enabled || !nb_mce_en) { | 2407 | if (!ecc_en || !nb_mce_en) { |
2582 | if (!ecc_enable_override) { | 2408 | amd64_notice("%s", ecc_msg); |
2583 | amd64_printk(KERN_NOTICE, "%s", ecc_msg); | 2409 | return false; |
2584 | return -ENODEV; | ||
2585 | } else { | ||
2586 | amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n"); | ||
2587 | } | ||
2588 | } | 2410 | } |
2589 | 2411 | return true; | |
2590 | return 0; | ||
2591 | } | 2412 | } |
2592 | 2413 | ||
2593 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + | 2414 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + |
@@ -2596,39 +2417,41 @@ struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + | |||
2596 | 2417 | ||
2597 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; | 2418 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; |
2598 | 2419 | ||
2599 | static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) | 2420 | static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) |
2600 | { | 2421 | { |
2601 | unsigned int i = 0, j = 0; | 2422 | unsigned int i = 0, j = 0; |
2602 | 2423 | ||
2603 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) | 2424 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) |
2604 | sysfs_attrs[i] = amd64_dbg_attrs[i]; | 2425 | sysfs_attrs[i] = amd64_dbg_attrs[i]; |
2605 | 2426 | ||
2606 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) | 2427 | if (boot_cpu_data.x86 >= 0x10) |
2607 | sysfs_attrs[i] = amd64_inj_attrs[j]; | 2428 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) |
2429 | sysfs_attrs[i] = amd64_inj_attrs[j]; | ||
2608 | 2430 | ||
2609 | sysfs_attrs[i] = terminator; | 2431 | sysfs_attrs[i] = terminator; |
2610 | 2432 | ||
2611 | mci->mc_driver_sysfs_attributes = sysfs_attrs; | 2433 | mci->mc_driver_sysfs_attributes = sysfs_attrs; |
2612 | } | 2434 | } |
2613 | 2435 | ||
2614 | static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | 2436 | static void setup_mci_misc_attrs(struct mem_ctl_info *mci, |
2437 | struct amd64_family_type *fam) | ||
2615 | { | 2438 | { |
2616 | struct amd64_pvt *pvt = mci->pvt_info; | 2439 | struct amd64_pvt *pvt = mci->pvt_info; |
2617 | 2440 | ||
2618 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; | 2441 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; |
2619 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | 2442 | mci->edac_ctl_cap = EDAC_FLAG_NONE; |
2620 | 2443 | ||
2621 | if (pvt->nbcap & K8_NBCAP_SECDED) | 2444 | if (pvt->nbcap & NBCAP_SECDED) |
2622 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; | 2445 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; |
2623 | 2446 | ||
2624 | if (pvt->nbcap & K8_NBCAP_CHIPKILL) | 2447 | if (pvt->nbcap & NBCAP_CHIPKILL) |
2625 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; | 2448 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; |
2626 | 2449 | ||
2627 | mci->edac_cap = amd64_determine_edac_cap(pvt); | 2450 | mci->edac_cap = amd64_determine_edac_cap(pvt); |
2628 | mci->mod_name = EDAC_MOD_STR; | 2451 | mci->mod_name = EDAC_MOD_STR; |
2629 | mci->mod_ver = EDAC_AMD64_VERSION; | 2452 | mci->mod_ver = EDAC_AMD64_VERSION; |
2630 | mci->ctl_name = get_amd_family_name(pvt->mc_type_index); | 2453 | mci->ctl_name = fam->ctl_name; |
2631 | mci->dev_name = pci_name(pvt->dram_f2_ctl); | 2454 | mci->dev_name = pci_name(pvt->F2); |
2632 | mci->ctl_page_to_phys = NULL; | 2455 | mci->ctl_page_to_phys = NULL; |
2633 | 2456 | ||
2634 | /* memory scrubber interface */ | 2457 | /* memory scrubber interface */ |
@@ -2637,111 +2460,96 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | |||
2637 | } | 2460 | } |
2638 | 2461 | ||
2639 | /* | 2462 | /* |
2640 | * Init stuff for this DRAM Controller device. | 2463 | * returns a pointer to the family descriptor on success, NULL otherwise. |
2641 | * | ||
2642 | * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration | ||
2643 | * Space feature MUST be enabled on ALL Processors prior to actually reading | ||
2644 | * from the ECS registers. Since the loading of the module can occur on any | ||
2645 | * 'core', and cores don't 'see' all the other processors ECS data when the | ||
2646 | * others are NOT enabled. Our solution is to first enable ECS access in this | ||
2647 | * routine on all processors, gather some data in a amd64_pvt structure and | ||
2648 | * later come back in a finish-setup function to perform that final | ||
2649 | * initialization. See also amd64_init_2nd_stage() for that. | ||
2650 | */ | 2464 | */ |
2651 | static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, | 2465 | static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) |
2652 | int mc_type_index) | 2466 | { |
2467 | u8 fam = boot_cpu_data.x86; | ||
2468 | struct amd64_family_type *fam_type = NULL; | ||
2469 | |||
2470 | switch (fam) { | ||
2471 | case 0xf: | ||
2472 | fam_type = &amd64_family_types[K8_CPUS]; | ||
2473 | pvt->ops = &amd64_family_types[K8_CPUS].ops; | ||
2474 | break; | ||
2475 | |||
2476 | case 0x10: | ||
2477 | fam_type = &amd64_family_types[F10_CPUS]; | ||
2478 | pvt->ops = &amd64_family_types[F10_CPUS].ops; | ||
2479 | break; | ||
2480 | |||
2481 | case 0x15: | ||
2482 | fam_type = &amd64_family_types[F15_CPUS]; | ||
2483 | pvt->ops = &amd64_family_types[F15_CPUS].ops; | ||
2484 | break; | ||
2485 | |||
2486 | default: | ||
2487 | amd64_err("Unsupported family!\n"); | ||
2488 | return NULL; | ||
2489 | } | ||
2490 | |||
2491 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | ||
2492 | |||
2493 | amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, | ||
2494 | (fam == 0xf ? | ||
2495 | (pvt->ext_model >= K8_REV_F ? "revF or later " | ||
2496 | : "revE or earlier ") | ||
2497 | : ""), pvt->mc_node_id); | ||
2498 | return fam_type; | ||
2499 | } | ||
2500 | |||
2501 | static int amd64_init_one_instance(struct pci_dev *F2) | ||
2653 | { | 2502 | { |
2654 | struct amd64_pvt *pvt = NULL; | 2503 | struct amd64_pvt *pvt = NULL; |
2504 | struct amd64_family_type *fam_type = NULL; | ||
2505 | struct mem_ctl_info *mci = NULL; | ||
2655 | int err = 0, ret; | 2506 | int err = 0, ret; |
2507 | u8 nid = get_node_id(F2); | ||
2656 | 2508 | ||
2657 | ret = -ENOMEM; | 2509 | ret = -ENOMEM; |
2658 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); | 2510 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); |
2659 | if (!pvt) | 2511 | if (!pvt) |
2660 | goto err_exit; | 2512 | goto err_ret; |
2661 | 2513 | ||
2662 | pvt->mc_node_id = get_node_id(dram_f2_ctl); | 2514 | pvt->mc_node_id = nid; |
2515 | pvt->F2 = F2; | ||
2663 | 2516 | ||
2664 | pvt->dram_f2_ctl = dram_f2_ctl; | 2517 | ret = -EINVAL; |
2665 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | 2518 | fam_type = amd64_per_family_init(pvt); |
2666 | pvt->mc_type_index = mc_type_index; | 2519 | if (!fam_type) |
2667 | pvt->ops = family_ops(mc_type_index); | 2520 | goto err_free; |
2668 | 2521 | ||
2669 | /* | ||
2670 | * We have the dram_f2_ctl device as an argument, now go reserve its | ||
2671 | * sibling devices from the PCI system. | ||
2672 | */ | ||
2673 | ret = -ENODEV; | 2522 | ret = -ENODEV; |
2674 | err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index); | 2523 | err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id); |
2675 | if (err) | 2524 | if (err) |
2676 | goto err_free; | 2525 | goto err_free; |
2677 | 2526 | ||
2678 | ret = -EINVAL; | 2527 | read_mc_regs(pvt); |
2679 | err = amd64_check_ecc_enabled(pvt); | ||
2680 | if (err) | ||
2681 | goto err_put; | ||
2682 | |||
2683 | /* | ||
2684 | * Key operation here: setup of HW prior to performing ops on it. Some | ||
2685 | * setup is required to access ECS data. After this is performed, the | ||
2686 | * 'teardown' function must be called upon error and normal exit paths. | ||
2687 | */ | ||
2688 | if (boot_cpu_data.x86 >= 0x10) | ||
2689 | amd64_setup(pvt); | ||
2690 | |||
2691 | /* | ||
2692 | * Save the pointer to the private data for use in 2nd initialization | ||
2693 | * stage | ||
2694 | */ | ||
2695 | pvt_lookup[pvt->mc_node_id] = pvt; | ||
2696 | |||
2697 | return 0; | ||
2698 | |||
2699 | err_put: | ||
2700 | amd64_free_mc_sibling_devices(pvt); | ||
2701 | |||
2702 | err_free: | ||
2703 | kfree(pvt); | ||
2704 | |||
2705 | err_exit: | ||
2706 | return ret; | ||
2707 | } | ||
2708 | |||
2709 | /* | ||
2710 | * This is the finishing stage of the init code. Needs to be performed after all | ||
2711 | * MCs' hardware have been prepped for accessing extended config space. | ||
2712 | */ | ||
2713 | static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | ||
2714 | { | ||
2715 | int node_id = pvt->mc_node_id; | ||
2716 | struct mem_ctl_info *mci; | ||
2717 | int ret = -ENODEV; | ||
2718 | |||
2719 | amd64_read_mc_registers(pvt); | ||
2720 | 2528 | ||
2721 | /* | 2529 | /* |
2722 | * We need to determine how many memory channels there are. Then use | 2530 | * We need to determine how many memory channels there are. Then use |
2723 | * that information for calculating the size of the dynamic instance | 2531 | * that information for calculating the size of the dynamic instance |
2724 | * tables in the 'mci' structure | 2532 | * tables in the 'mci' structure. |
2725 | */ | 2533 | */ |
2534 | ret = -EINVAL; | ||
2726 | pvt->channel_count = pvt->ops->early_channel_count(pvt); | 2535 | pvt->channel_count = pvt->ops->early_channel_count(pvt); |
2727 | if (pvt->channel_count < 0) | 2536 | if (pvt->channel_count < 0) |
2728 | goto err_exit; | 2537 | goto err_siblings; |
2729 | 2538 | ||
2730 | ret = -ENOMEM; | 2539 | ret = -ENOMEM; |
2731 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); | 2540 | mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); |
2732 | if (!mci) | 2541 | if (!mci) |
2733 | goto err_exit; | 2542 | goto err_siblings; |
2734 | 2543 | ||
2735 | mci->pvt_info = pvt; | 2544 | mci->pvt_info = pvt; |
2545 | mci->dev = &pvt->F2->dev; | ||
2736 | 2546 | ||
2737 | mci->dev = &pvt->dram_f2_ctl->dev; | 2547 | setup_mci_misc_attrs(mci, fam_type); |
2738 | amd64_setup_mci_misc_attributes(mci); | ||
2739 | 2548 | ||
2740 | if (amd64_init_csrows(mci)) | 2549 | if (init_csrows(mci)) |
2741 | mci->edac_cap = EDAC_FLAG_NONE; | 2550 | mci->edac_cap = EDAC_FLAG_NONE; |
2742 | 2551 | ||
2743 | amd64_enable_ecc_error_reporting(mci); | 2552 | set_mc_sysfs_attrs(mci); |
2744 | amd64_set_mc_sysfs_attributes(mci); | ||
2745 | 2553 | ||
2746 | ret = -ENODEV; | 2554 | ret = -ENODEV; |
2747 | if (edac_mc_add_mc(mci)) { | 2555 | if (edac_mc_add_mc(mci)) { |
@@ -2749,54 +2557,77 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |||
2749 | goto err_add_mc; | 2557 | goto err_add_mc; |
2750 | } | 2558 | } |
2751 | 2559 | ||
2752 | mci_lookup[node_id] = mci; | ||
2753 | pvt_lookup[node_id] = NULL; | ||
2754 | |||
2755 | /* register stuff with EDAC MCE */ | 2560 | /* register stuff with EDAC MCE */ |
2756 | if (report_gart_errors) | 2561 | if (report_gart_errors) |
2757 | amd_report_gart_errors(true); | 2562 | amd_report_gart_errors(true); |
2758 | 2563 | ||
2759 | amd_register_ecc_decoder(amd64_decode_bus_error); | 2564 | amd_register_ecc_decoder(amd64_decode_bus_error); |
2760 | 2565 | ||
2566 | mcis[nid] = mci; | ||
2567 | |||
2568 | atomic_inc(&drv_instances); | ||
2569 | |||
2761 | return 0; | 2570 | return 0; |
2762 | 2571 | ||
2763 | err_add_mc: | 2572 | err_add_mc: |
2764 | edac_mc_free(mci); | 2573 | edac_mc_free(mci); |
2765 | 2574 | ||
2766 | err_exit: | 2575 | err_siblings: |
2767 | debugf0("failure to init 2nd stage: ret=%d\n", ret); | 2576 | free_mc_sibling_devs(pvt); |
2768 | |||
2769 | amd64_restore_ecc_error_reporting(pvt); | ||
2770 | |||
2771 | if (boot_cpu_data.x86 > 0xf) | ||
2772 | amd64_teardown(pvt); | ||
2773 | 2577 | ||
2774 | amd64_free_mc_sibling_devices(pvt); | 2578 | err_free: |
2775 | 2579 | kfree(pvt); | |
2776 | kfree(pvt_lookup[pvt->mc_node_id]); | ||
2777 | pvt_lookup[node_id] = NULL; | ||
2778 | 2580 | ||
2581 | err_ret: | ||
2779 | return ret; | 2582 | return ret; |
2780 | } | 2583 | } |
2781 | 2584 | ||
2782 | 2585 | static int __devinit amd64_probe_one_instance(struct pci_dev *pdev, | |
2783 | static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | 2586 | const struct pci_device_id *mc_type) |
2784 | const struct pci_device_id *mc_type) | ||
2785 | { | 2587 | { |
2588 | u8 nid = get_node_id(pdev); | ||
2589 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | ||
2590 | struct ecc_settings *s; | ||
2786 | int ret = 0; | 2591 | int ret = 0; |
2787 | 2592 | ||
2788 | debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev), | ||
2789 | get_amd_family_name(mc_type->driver_data)); | ||
2790 | |||
2791 | ret = pci_enable_device(pdev); | 2593 | ret = pci_enable_device(pdev); |
2792 | if (ret < 0) | 2594 | if (ret < 0) { |
2793 | ret = -EIO; | ||
2794 | else | ||
2795 | ret = amd64_probe_one_instance(pdev, mc_type->driver_data); | ||
2796 | |||
2797 | if (ret < 0) | ||
2798 | debugf0("ret=%d\n", ret); | 2595 | debugf0("ret=%d\n", ret); |
2596 | return -EIO; | ||
2597 | } | ||
2598 | |||
2599 | ret = -ENOMEM; | ||
2600 | s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL); | ||
2601 | if (!s) | ||
2602 | goto err_out; | ||
2603 | |||
2604 | ecc_stngs[nid] = s; | ||
2605 | |||
2606 | if (!ecc_enabled(F3, nid)) { | ||
2607 | ret = -ENODEV; | ||
2608 | |||
2609 | if (!ecc_enable_override) | ||
2610 | goto err_enable; | ||
2611 | |||
2612 | amd64_warn("Forcing ECC on!\n"); | ||
2613 | |||
2614 | if (!enable_ecc_error_reporting(s, nid, F3)) | ||
2615 | goto err_enable; | ||
2616 | } | ||
2799 | 2617 | ||
2618 | ret = amd64_init_one_instance(pdev); | ||
2619 | if (ret < 0) { | ||
2620 | amd64_err("Error probing instance: %d\n", nid); | ||
2621 | restore_ecc_error_reporting(s, nid, F3); | ||
2622 | } | ||
2623 | |||
2624 | return ret; | ||
2625 | |||
2626 | err_enable: | ||
2627 | kfree(s); | ||
2628 | ecc_stngs[nid] = NULL; | ||
2629 | |||
2630 | err_out: | ||
2800 | return ret; | 2631 | return ret; |
2801 | } | 2632 | } |
2802 | 2633 | ||
@@ -2804,6 +2635,9 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | |||
2804 | { | 2635 | { |
2805 | struct mem_ctl_info *mci; | 2636 | struct mem_ctl_info *mci; |
2806 | struct amd64_pvt *pvt; | 2637 | struct amd64_pvt *pvt; |
2638 | u8 nid = get_node_id(pdev); | ||
2639 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | ||
2640 | struct ecc_settings *s = ecc_stngs[nid]; | ||
2807 | 2641 | ||
2808 | /* Remove from EDAC CORE tracking list */ | 2642 | /* Remove from EDAC CORE tracking list */ |
2809 | mci = edac_mc_del_mc(&pdev->dev); | 2643 | mci = edac_mc_del_mc(&pdev->dev); |
@@ -2812,20 +2646,20 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | |||
2812 | 2646 | ||
2813 | pvt = mci->pvt_info; | 2647 | pvt = mci->pvt_info; |
2814 | 2648 | ||
2815 | amd64_restore_ecc_error_reporting(pvt); | 2649 | restore_ecc_error_reporting(s, nid, F3); |
2816 | |||
2817 | if (boot_cpu_data.x86 > 0xf) | ||
2818 | amd64_teardown(pvt); | ||
2819 | 2650 | ||
2820 | amd64_free_mc_sibling_devices(pvt); | 2651 | free_mc_sibling_devs(pvt); |
2821 | 2652 | ||
2822 | /* unregister from EDAC MCE */ | 2653 | /* unregister from EDAC MCE */ |
2823 | amd_report_gart_errors(false); | 2654 | amd_report_gart_errors(false); |
2824 | amd_unregister_ecc_decoder(amd64_decode_bus_error); | 2655 | amd_unregister_ecc_decoder(amd64_decode_bus_error); |
2825 | 2656 | ||
2657 | kfree(ecc_stngs[nid]); | ||
2658 | ecc_stngs[nid] = NULL; | ||
2659 | |||
2826 | /* Free the EDAC CORE resources */ | 2660 | /* Free the EDAC CORE resources */ |
2827 | mci->pvt_info = NULL; | 2661 | mci->pvt_info = NULL; |
2828 | mci_lookup[pvt->mc_node_id] = NULL; | 2662 | mcis[nid] = NULL; |
2829 | 2663 | ||
2830 | kfree(pvt); | 2664 | kfree(pvt); |
2831 | edac_mc_free(mci); | 2665 | edac_mc_free(mci); |
@@ -2844,7 +2678,6 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |||
2844 | .subdevice = PCI_ANY_ID, | 2678 | .subdevice = PCI_ANY_ID, |
2845 | .class = 0, | 2679 | .class = 0, |
2846 | .class_mask = 0, | 2680 | .class_mask = 0, |
2847 | .driver_data = K8_CPUS | ||
2848 | }, | 2681 | }, |
2849 | { | 2682 | { |
2850 | .vendor = PCI_VENDOR_ID_AMD, | 2683 | .vendor = PCI_VENDOR_ID_AMD, |
@@ -2853,29 +2686,28 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |||
2853 | .subdevice = PCI_ANY_ID, | 2686 | .subdevice = PCI_ANY_ID, |
2854 | .class = 0, | 2687 | .class = 0, |
2855 | .class_mask = 0, | 2688 | .class_mask = 0, |
2856 | .driver_data = F10_CPUS | ||
2857 | }, | 2689 | }, |
2858 | { | 2690 | { |
2859 | .vendor = PCI_VENDOR_ID_AMD, | 2691 | .vendor = PCI_VENDOR_ID_AMD, |
2860 | .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM, | 2692 | .device = PCI_DEVICE_ID_AMD_15H_NB_F2, |
2861 | .subvendor = PCI_ANY_ID, | 2693 | .subvendor = PCI_ANY_ID, |
2862 | .subdevice = PCI_ANY_ID, | 2694 | .subdevice = PCI_ANY_ID, |
2863 | .class = 0, | 2695 | .class = 0, |
2864 | .class_mask = 0, | 2696 | .class_mask = 0, |
2865 | .driver_data = F11_CPUS | ||
2866 | }, | 2697 | }, |
2698 | |||
2867 | {0, } | 2699 | {0, } |
2868 | }; | 2700 | }; |
2869 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); | 2701 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); |
2870 | 2702 | ||
2871 | static struct pci_driver amd64_pci_driver = { | 2703 | static struct pci_driver amd64_pci_driver = { |
2872 | .name = EDAC_MOD_STR, | 2704 | .name = EDAC_MOD_STR, |
2873 | .probe = amd64_init_one_instance, | 2705 | .probe = amd64_probe_one_instance, |
2874 | .remove = __devexit_p(amd64_remove_one_instance), | 2706 | .remove = __devexit_p(amd64_remove_one_instance), |
2875 | .id_table = amd64_pci_table, | 2707 | .id_table = amd64_pci_table, |
2876 | }; | 2708 | }; |
2877 | 2709 | ||
2878 | static void amd64_setup_pci_device(void) | 2710 | static void setup_pci_device(void) |
2879 | { | 2711 | { |
2880 | struct mem_ctl_info *mci; | 2712 | struct mem_ctl_info *mci; |
2881 | struct amd64_pvt *pvt; | 2713 | struct amd64_pvt *pvt; |
@@ -2883,13 +2715,12 @@ static void amd64_setup_pci_device(void) | |||
2883 | if (amd64_ctl_pci) | 2715 | if (amd64_ctl_pci) |
2884 | return; | 2716 | return; |
2885 | 2717 | ||
2886 | mci = mci_lookup[0]; | 2718 | mci = mcis[0]; |
2887 | if (mci) { | 2719 | if (mci) { |
2888 | 2720 | ||
2889 | pvt = mci->pvt_info; | 2721 | pvt = mci->pvt_info; |
2890 | amd64_ctl_pci = | 2722 | amd64_ctl_pci = |
2891 | edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev, | 2723 | edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); |
2892 | EDAC_MOD_STR); | ||
2893 | 2724 | ||
2894 | if (!amd64_ctl_pci) { | 2725 | if (!amd64_ctl_pci) { |
2895 | pr_warning("%s(): Unable to create PCI control\n", | 2726 | pr_warning("%s(): Unable to create PCI control\n", |
@@ -2903,51 +2734,50 @@ static void amd64_setup_pci_device(void) | |||
2903 | 2734 | ||
2904 | static int __init amd64_edac_init(void) | 2735 | static int __init amd64_edac_init(void) |
2905 | { | 2736 | { |
2906 | int nb, err = -ENODEV; | 2737 | int err = -ENODEV; |
2907 | bool load_ok = false; | ||
2908 | 2738 | ||
2909 | edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); | 2739 | printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); |
2910 | 2740 | ||
2911 | opstate_init(); | 2741 | opstate_init(); |
2912 | 2742 | ||
2913 | if (cache_k8_northbridges() < 0) | 2743 | if (amd_cache_northbridges() < 0) |
2914 | goto err_ret; | 2744 | goto err_ret; |
2915 | 2745 | ||
2746 | err = -ENOMEM; | ||
2747 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); | ||
2748 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); | ||
2749 | if (!(mcis && ecc_stngs)) | ||
2750 | goto err_free; | ||
2751 | |||
2916 | msrs = msrs_alloc(); | 2752 | msrs = msrs_alloc(); |
2917 | if (!msrs) | 2753 | if (!msrs) |
2918 | goto err_ret; | 2754 | goto err_free; |
2919 | 2755 | ||
2920 | err = pci_register_driver(&amd64_pci_driver); | 2756 | err = pci_register_driver(&amd64_pci_driver); |
2921 | if (err) | 2757 | if (err) |
2922 | goto err_pci; | 2758 | goto err_pci; |
2923 | 2759 | ||
2924 | /* | ||
2925 | * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd | ||
2926 | * amd64_pvt structs. These will be used in the 2nd stage init function | ||
2927 | * to finish initialization of the MC instances. | ||
2928 | */ | ||
2929 | err = -ENODEV; | 2760 | err = -ENODEV; |
2930 | for (nb = 0; nb < num_k8_northbridges; nb++) { | 2761 | if (!atomic_read(&drv_instances)) |
2931 | if (!pvt_lookup[nb]) | 2762 | goto err_no_instances; |
2932 | continue; | ||
2933 | |||
2934 | err = amd64_init_2nd_stage(pvt_lookup[nb]); | ||
2935 | if (err) | ||
2936 | goto err_2nd_stage; | ||
2937 | |||
2938 | load_ok = true; | ||
2939 | } | ||
2940 | 2763 | ||
2941 | if (load_ok) { | 2764 | setup_pci_device(); |
2942 | amd64_setup_pci_device(); | 2765 | return 0; |
2943 | return 0; | ||
2944 | } | ||
2945 | 2766 | ||
2946 | err_2nd_stage: | 2767 | err_no_instances: |
2947 | pci_unregister_driver(&amd64_pci_driver); | 2768 | pci_unregister_driver(&amd64_pci_driver); |
2769 | |||
2948 | err_pci: | 2770 | err_pci: |
2949 | msrs_free(msrs); | 2771 | msrs_free(msrs); |
2950 | msrs = NULL; | 2772 | msrs = NULL; |
2773 | |||
2774 | err_free: | ||
2775 | kfree(mcis); | ||
2776 | mcis = NULL; | ||
2777 | |||
2778 | kfree(ecc_stngs); | ||
2779 | ecc_stngs = NULL; | ||
2780 | |||
2951 | err_ret: | 2781 | err_ret: |
2952 | return err; | 2782 | return err; |
2953 | } | 2783 | } |
@@ -2959,6 +2789,12 @@ static void __exit amd64_edac_exit(void) | |||
2959 | 2789 | ||
2960 | pci_unregister_driver(&amd64_pci_driver); | 2790 | pci_unregister_driver(&amd64_pci_driver); |
2961 | 2791 | ||
2792 | kfree(ecc_stngs); | ||
2793 | ecc_stngs = NULL; | ||
2794 | |||
2795 | kfree(mcis); | ||
2796 | mcis = NULL; | ||
2797 | |||
2962 | msrs_free(msrs); | 2798 | msrs_free(msrs); |
2963 | msrs = NULL; | 2799 | msrs = NULL; |
2964 | } | 2800 | } |