diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/edac | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/edac')
49 files changed, 4376 insertions, 7982 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 66719925970..af1a17d42bd 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -4,13 +4,10 @@ | |||
4 | # Licensed and distributed under the GPL | 4 | # Licensed and distributed under the GPL |
5 | # | 5 | # |
6 | 6 | ||
7 | config EDAC_SUPPORT | ||
8 | bool | ||
9 | |||
10 | menuconfig EDAC | 7 | menuconfig EDAC |
11 | bool "EDAC (Error Detection And Correction) reporting" | 8 | bool "EDAC (Error Detection And Correction) reporting" |
12 | depends on HAS_IOMEM | 9 | depends on HAS_IOMEM |
13 | depends on X86 || PPC || TILE || ARM || EDAC_SUPPORT | 10 | depends on X86 || PPC || TILE |
14 | help | 11 | help |
15 | EDAC is designed to report errors in the core system. | 12 | EDAC is designed to report errors in the core system. |
16 | These are low-level errors that are reported in the CPU or | 13 | These are low-level errors that are reported in the CPU or |
@@ -32,25 +29,19 @@ menuconfig EDAC | |||
32 | 29 | ||
33 | if EDAC | 30 | if EDAC |
34 | 31 | ||
35 | config EDAC_LEGACY_SYSFS | 32 | comment "Reporting subsystems" |
36 | bool "EDAC legacy sysfs" | ||
37 | default y | ||
38 | help | ||
39 | Enable the compatibility sysfs nodes. | ||
40 | Use 'Y' if your edac utilities aren't ported to work with the newer | ||
41 | structures. | ||
42 | 33 | ||
43 | config EDAC_DEBUG | 34 | config EDAC_DEBUG |
44 | bool "Debugging" | 35 | bool "Debugging" |
45 | help | 36 | help |
46 | This turns on debugging information for the entire EDAC subsystem. | 37 | This turns on debugging information for the entire EDAC |
47 | You do so by inserting edac_module with "edac_debug_level=x." Valid | 38 | sub-system. You can insert module with "debug_level=x", current |
48 | levels are 0-4 (from low to high) and by default it is set to 2. | 39 | there're four debug levels (x=0,1,2,3 from low to high). |
49 | Usually you should select 'N' here. | 40 | Usually you should select 'N'. |
50 | 41 | ||
51 | config EDAC_DECODE_MCE | 42 | config EDAC_DECODE_MCE |
52 | tristate "Decode MCEs in human-readable form (only on AMD for now)" | 43 | tristate "Decode MCEs in human-readable form (only on AMD for now)" |
53 | depends on CPU_SUP_AMD && X86_MCE_AMD | 44 | depends on CPU_SUP_AMD && X86_MCE |
54 | default y | 45 | default y |
55 | ---help--- | 46 | ---help--- |
56 | Enable this option if you want to decode Machine Check Exceptions | 47 | Enable this option if you want to decode Machine Check Exceptions |
@@ -80,6 +71,9 @@ config EDAC_MM_EDAC | |||
80 | occurred so that a particular failing memory module can be | 71 | occurred so that a particular failing memory module can be |
81 | replaced. If unsure, select 'Y'. | 72 | replaced. If unsure, select 'Y'. |
82 | 73 | ||
74 | config EDAC_MCE | ||
75 | bool | ||
76 | |||
83 | config EDAC_AMD64 | 77 | config EDAC_AMD64 |
84 | tristate "AMD64 (Opteron, Athlon64) K8, F10h" | 78 | tristate "AMD64 (Opteron, Athlon64) K8, F10h" |
85 | depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE | 79 | depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE |
@@ -179,7 +173,8 @@ config EDAC_I5400 | |||
179 | 173 | ||
180 | config EDAC_I7CORE | 174 | config EDAC_I7CORE |
181 | tristate "Intel i7 Core (Nehalem) processors" | 175 | tristate "Intel i7 Core (Nehalem) processors" |
182 | depends on EDAC_MM_EDAC && PCI && X86 && X86_MCE_INTEL | 176 | depends on EDAC_MM_EDAC && PCI && X86 |
177 | select EDAC_MCE | ||
183 | help | 178 | help |
184 | Support for error detection and correction the Intel | 179 | Support for error detection and correction the Intel |
185 | i7 Core (Nehalem) Integrated Memory Controller that exists on | 180 | i7 Core (Nehalem) Integrated Memory Controller that exists on |
@@ -221,14 +216,6 @@ config EDAC_I7300 | |||
221 | Support for error detection and correction the Intel | 216 | Support for error detection and correction the Intel |
222 | Clarksboro MCH (Intel 7300 chipset). | 217 | Clarksboro MCH (Intel 7300 chipset). |
223 | 218 | ||
224 | config EDAC_SBRIDGE | ||
225 | tristate "Intel Sandy-Bridge Integrated MC" | ||
226 | depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL | ||
227 | depends on PCI_MMCONFIG && EXPERIMENTAL | ||
228 | help | ||
229 | Support for error detection and correction the Intel | ||
230 | Sandy Bridge Integrated Memory Controller. | ||
231 | |||
232 | config EDAC_MPC85XX | 219 | config EDAC_MPC85XX |
233 | tristate "Freescale MPC83xx / MPC85xx" | 220 | tristate "Freescale MPC83xx / MPC85xx" |
234 | depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx) | 221 | depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx) |
@@ -303,46 +290,4 @@ config EDAC_TILE | |||
303 | Support for error detection and correction on the | 290 | Support for error detection and correction on the |
304 | Tilera memory controller. | 291 | Tilera memory controller. |
305 | 292 | ||
306 | config EDAC_HIGHBANK_MC | ||
307 | tristate "Highbank Memory Controller" | ||
308 | depends on EDAC_MM_EDAC && ARCH_HIGHBANK | ||
309 | help | ||
310 | Support for error detection and correction on the | ||
311 | Calxeda Highbank memory controller. | ||
312 | |||
313 | config EDAC_HIGHBANK_L2 | ||
314 | tristate "Highbank L2 Cache" | ||
315 | depends on EDAC_MM_EDAC && ARCH_HIGHBANK | ||
316 | help | ||
317 | Support for error detection and correction on the | ||
318 | Calxeda Highbank memory controller. | ||
319 | |||
320 | config EDAC_OCTEON_PC | ||
321 | tristate "Cavium Octeon Primary Caches" | ||
322 | depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON | ||
323 | help | ||
324 | Support for error detection and correction on the primary caches of | ||
325 | the cnMIPS cores of Cavium Octeon family SOCs. | ||
326 | |||
327 | config EDAC_OCTEON_L2C | ||
328 | tristate "Cavium Octeon Secondary Caches (L2C)" | ||
329 | depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON | ||
330 | help | ||
331 | Support for error detection and correction on the | ||
332 | Cavium Octeon family of SOCs. | ||
333 | |||
334 | config EDAC_OCTEON_LMC | ||
335 | tristate "Cavium Octeon DRAM Memory Controller (LMC)" | ||
336 | depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON | ||
337 | help | ||
338 | Support for error detection and correction on the | ||
339 | Cavium Octeon family of SOCs. | ||
340 | |||
341 | config EDAC_OCTEON_PCI | ||
342 | tristate "Cavium Octeon PCI Controller" | ||
343 | depends on EDAC_MM_EDAC && PCI && CPU_CAVIUM_OCTEON | ||
344 | help | ||
345 | Support for error detection and correction on the | ||
346 | Cavium Octeon family of SOCs. | ||
347 | |||
348 | endif # EDAC | 293 | endif # EDAC |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 5608a9ba61b..3e239133e29 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | obj-$(CONFIG_EDAC) := edac_stub.o | 9 | obj-$(CONFIG_EDAC) := edac_stub.o |
10 | obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o | 10 | obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o |
11 | obj-$(CONFIG_EDAC_MCE) += edac_mce.o | ||
11 | 12 | ||
12 | edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o | 13 | edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o |
13 | edac_core-y += edac_module.o edac_device_sysfs.o | 14 | edac_core-y += edac_module.o edac_device_sysfs.o |
@@ -28,7 +29,6 @@ obj-$(CONFIG_EDAC_I5100) += i5100_edac.o | |||
28 | obj-$(CONFIG_EDAC_I5400) += i5400_edac.o | 29 | obj-$(CONFIG_EDAC_I5400) += i5400_edac.o |
29 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o | 30 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o |
30 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o | 31 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o |
31 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o | ||
32 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o | 32 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o |
33 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o | 33 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o |
34 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o | 34 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o |
@@ -55,11 +55,3 @@ obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o | |||
55 | obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o | 55 | obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o |
56 | 56 | ||
57 | obj-$(CONFIG_EDAC_TILE) += tile_edac.o | 57 | obj-$(CONFIG_EDAC_TILE) += tile_edac.o |
58 | |||
59 | obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o | ||
60 | obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o | ||
61 | |||
62 | obj-$(CONFIG_EDAC_OCTEON_PC) += octeon_edac-pc.o | ||
63 | obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o | ||
64 | obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o | ||
65 | obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o | ||
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ad8bf2aa629..9a8bebcf6b1 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -60,8 +60,8 @@ struct scrubrate { | |||
60 | { 0x00, 0UL}, /* scrubbing off */ | 60 | { 0x00, 0UL}, /* scrubbing off */ |
61 | }; | 61 | }; |
62 | 62 | ||
63 | int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | 63 | static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, |
64 | u32 *val, const char *func) | 64 | u32 *val, const char *func) |
65 | { | 65 | { |
66 | int err = 0; | 66 | int err = 0; |
67 | 67 | ||
@@ -114,22 +114,10 @@ static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | |||
114 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | 114 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); |
115 | } | 115 | } |
116 | 116 | ||
117 | /* | ||
118 | * Select DCT to which PCI cfg accesses are routed | ||
119 | */ | ||
120 | static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) | ||
121 | { | ||
122 | u32 reg = 0; | ||
123 | |||
124 | amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); | ||
125 | reg &= 0xfffffffe; | ||
126 | reg |= dct; | ||
127 | amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); | ||
128 | } | ||
129 | |||
130 | static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | 117 | static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, |
131 | const char *func) | 118 | const char *func) |
132 | { | 119 | { |
120 | u32 reg = 0; | ||
133 | u8 dct = 0; | 121 | u8 dct = 0; |
134 | 122 | ||
135 | if (addr >= 0x140 && addr <= 0x1a0) { | 123 | if (addr >= 0x140 && addr <= 0x1a0) { |
@@ -137,7 +125,10 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | |||
137 | addr -= 0x100; | 125 | addr -= 0x100; |
138 | } | 126 | } |
139 | 127 | ||
140 | f15h_select_dct(pvt, dct); | 128 | amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); |
129 | reg &= 0xfffffffe; | ||
130 | reg |= dct; | ||
131 | amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); | ||
141 | 132 | ||
142 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | 133 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); |
143 | } | 134 | } |
@@ -170,11 +161,8 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) | |||
170 | * memory controller and apply to register. Search for the first | 161 | * memory controller and apply to register. Search for the first |
171 | * bandwidth entry that is greater or equal than the setting requested | 162 | * bandwidth entry that is greater or equal than the setting requested |
172 | * and program that. If at last entry, turn off DRAM scrubbing. | 163 | * and program that. If at last entry, turn off DRAM scrubbing. |
173 | * | ||
174 | * If no suitable bandwidth is found, turn off DRAM scrubbing entirely | ||
175 | * by falling back to the last element in scrubrates[]. | ||
176 | */ | 164 | */ |
177 | for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { | 165 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
178 | /* | 166 | /* |
179 | * skip scrub rates which aren't recommended | 167 | * skip scrub rates which aren't recommended |
180 | * (see F10 BKDG, F3x58) | 168 | * (see F10 BKDG, F3x58) |
@@ -184,6 +172,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) | |||
184 | 172 | ||
185 | if (scrubrates[i].bandwidth <= new_bw) | 173 | if (scrubrates[i].bandwidth <= new_bw) |
186 | break; | 174 | break; |
175 | |||
176 | /* | ||
177 | * if no suitable bandwidth found, turn off DRAM scrubbing | ||
178 | * entirely by falling back to the last element in the | ||
179 | * scrubrates array. | ||
180 | */ | ||
187 | } | 181 | } |
188 | 182 | ||
189 | scrubval = scrubrates[i].scrubval; | 183 | scrubval = scrubrates[i].scrubval; |
@@ -204,10 +198,6 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) | |||
204 | if (boot_cpu_data.x86 == 0xf) | 198 | if (boot_cpu_data.x86 == 0xf) |
205 | min_scrubrate = 0x0; | 199 | min_scrubrate = 0x0; |
206 | 200 | ||
207 | /* F15h Erratum #505 */ | ||
208 | if (boot_cpu_data.x86 == 0x15) | ||
209 | f15h_select_dct(pvt, 0); | ||
210 | |||
211 | return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); | 201 | return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); |
212 | } | 202 | } |
213 | 203 | ||
@@ -217,10 +207,6 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) | |||
217 | u32 scrubval = 0; | 207 | u32 scrubval = 0; |
218 | int i, retval = -EINVAL; | 208 | int i, retval = -EINVAL; |
219 | 209 | ||
220 | /* F15h Erratum #505 */ | ||
221 | if (boot_cpu_data.x86 == 0x15) | ||
222 | f15h_select_dct(pvt, 0); | ||
223 | |||
224 | amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); | 210 | amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); |
225 | 211 | ||
226 | scrubval = scrubval & 0x001F; | 212 | scrubval = scrubval & 0x001F; |
@@ -318,8 +304,8 @@ found: | |||
318 | return edac_mc_find((int)node_id); | 304 | return edac_mc_find((int)node_id); |
319 | 305 | ||
320 | err_no_match: | 306 | err_no_match: |
321 | edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n", | 307 | debugf2("sys_addr 0x%lx doesn't match any node\n", |
322 | (unsigned long)sys_addr); | 308 | (unsigned long)sys_addr); |
323 | 309 | ||
324 | return NULL; | 310 | return NULL; |
325 | } | 311 | } |
@@ -390,15 +376,15 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
390 | mask = ~mask; | 376 | mask = ~mask; |
391 | 377 | ||
392 | if ((input_addr & mask) == (base & mask)) { | 378 | if ((input_addr & mask) == (base & mask)) { |
393 | edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n", | 379 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", |
394 | (unsigned long)input_addr, csrow, | 380 | (unsigned long)input_addr, csrow, |
395 | pvt->mc_node_id); | 381 | pvt->mc_node_id); |
396 | 382 | ||
397 | return csrow; | 383 | return csrow; |
398 | } | 384 | } |
399 | } | 385 | } |
400 | edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n", | 386 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", |
401 | (unsigned long)input_addr, pvt->mc_node_id); | 387 | (unsigned long)input_addr, pvt->mc_node_id); |
402 | 388 | ||
403 | return -1; | 389 | return -1; |
404 | } | 390 | } |
@@ -423,23 +409,24 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
423 | u64 *hole_offset, u64 *hole_size) | 409 | u64 *hole_offset, u64 *hole_size) |
424 | { | 410 | { |
425 | struct amd64_pvt *pvt = mci->pvt_info; | 411 | struct amd64_pvt *pvt = mci->pvt_info; |
412 | u64 base; | ||
426 | 413 | ||
427 | /* only revE and later have the DRAM Hole Address Register */ | 414 | /* only revE and later have the DRAM Hole Address Register */ |
428 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { | 415 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { |
429 | edac_dbg(1, " revision %d for node %d does not support DHAR\n", | 416 | debugf1(" revision %d for node %d does not support DHAR\n", |
430 | pvt->ext_model, pvt->mc_node_id); | 417 | pvt->ext_model, pvt->mc_node_id); |
431 | return 1; | 418 | return 1; |
432 | } | 419 | } |
433 | 420 | ||
434 | /* valid for Fam10h and above */ | 421 | /* valid for Fam10h and above */ |
435 | if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { | 422 | if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { |
436 | edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n"); | 423 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); |
437 | return 1; | 424 | return 1; |
438 | } | 425 | } |
439 | 426 | ||
440 | if (!dhar_valid(pvt)) { | 427 | if (!dhar_valid(pvt)) { |
441 | edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n", | 428 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", |
442 | pvt->mc_node_id); | 429 | pvt->mc_node_id); |
443 | return 1; | 430 | return 1; |
444 | } | 431 | } |
445 | 432 | ||
@@ -461,17 +448,19 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
461 | * addresses in the hole so that they start at 0x100000000. | 448 | * addresses in the hole so that they start at 0x100000000. |
462 | */ | 449 | */ |
463 | 450 | ||
464 | *hole_base = dhar_base(pvt); | 451 | base = dhar_base(pvt); |
465 | *hole_size = (1ULL << 32) - *hole_base; | 452 | |
453 | *hole_base = base; | ||
454 | *hole_size = (0x1ull << 32) - base; | ||
466 | 455 | ||
467 | if (boot_cpu_data.x86 > 0xf) | 456 | if (boot_cpu_data.x86 > 0xf) |
468 | *hole_offset = f10_dhar_offset(pvt); | 457 | *hole_offset = f10_dhar_offset(pvt); |
469 | else | 458 | else |
470 | *hole_offset = k8_dhar_offset(pvt); | 459 | *hole_offset = k8_dhar_offset(pvt); |
471 | 460 | ||
472 | edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | 461 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", |
473 | pvt->mc_node_id, (unsigned long)*hole_base, | 462 | pvt->mc_node_id, (unsigned long)*hole_base, |
474 | (unsigned long)*hole_offset, (unsigned long)*hole_size); | 463 | (unsigned long)*hole_offset, (unsigned long)*hole_size); |
475 | 464 | ||
476 | return 0; | 465 | return 0; |
477 | } | 466 | } |
@@ -510,21 +499,22 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
510 | { | 499 | { |
511 | struct amd64_pvt *pvt = mci->pvt_info; | 500 | struct amd64_pvt *pvt = mci->pvt_info; |
512 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; | 501 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; |
513 | int ret; | 502 | int ret = 0; |
514 | 503 | ||
515 | dram_base = get_dram_base(pvt, pvt->mc_node_id); | 504 | dram_base = get_dram_base(pvt, pvt->mc_node_id); |
516 | 505 | ||
517 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | 506 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, |
518 | &hole_size); | 507 | &hole_size); |
519 | if (!ret) { | 508 | if (!ret) { |
520 | if ((sys_addr >= (1ULL << 32)) && | 509 | if ((sys_addr >= (1ull << 32)) && |
521 | (sys_addr < ((1ULL << 32) + hole_size))) { | 510 | (sys_addr < ((1ull << 32) + hole_size))) { |
522 | /* use DHAR to translate SysAddr to DramAddr */ | 511 | /* use DHAR to translate SysAddr to DramAddr */ |
523 | dram_addr = sys_addr - hole_offset; | 512 | dram_addr = sys_addr - hole_offset; |
524 | 513 | ||
525 | edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n", | 514 | debugf2("using DHAR to translate SysAddr 0x%lx to " |
526 | (unsigned long)sys_addr, | 515 | "DramAddr 0x%lx\n", |
527 | (unsigned long)dram_addr); | 516 | (unsigned long)sys_addr, |
517 | (unsigned long)dram_addr); | ||
528 | 518 | ||
529 | return dram_addr; | 519 | return dram_addr; |
530 | } | 520 | } |
@@ -541,8 +531,9 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
541 | */ | 531 | */ |
542 | dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; | 532 | dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; |
543 | 533 | ||
544 | edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n", | 534 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " |
545 | (unsigned long)sys_addr, (unsigned long)dram_addr); | 535 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, |
536 | (unsigned long)dram_addr); | ||
546 | return dram_addr; | 537 | return dram_addr; |
547 | } | 538 | } |
548 | 539 | ||
@@ -578,9 +569,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
578 | input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + | 569 | input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + |
579 | (dram_addr & 0xfff); | 570 | (dram_addr & 0xfff); |
580 | 571 | ||
581 | edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", | 572 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", |
582 | intlv_shift, (unsigned long)dram_addr, | 573 | intlv_shift, (unsigned long)dram_addr, |
583 | (unsigned long)input_addr); | 574 | (unsigned long)input_addr); |
584 | 575 | ||
585 | return input_addr; | 576 | return input_addr; |
586 | } | 577 | } |
@@ -596,8 +587,8 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
596 | input_addr = | 587 | input_addr = |
597 | dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); | 588 | dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); |
598 | 589 | ||
599 | edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n", | 590 | debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", |
600 | (unsigned long)sys_addr, (unsigned long)input_addr); | 591 | (unsigned long)sys_addr, (unsigned long)input_addr); |
601 | 592 | ||
602 | return input_addr; | 593 | return input_addr; |
603 | } | 594 | } |
@@ -629,8 +620,8 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
629 | 620 | ||
630 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); | 621 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); |
631 | if (intlv_shift == 0) { | 622 | if (intlv_shift == 0) { |
632 | edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n", | 623 | debugf1(" InputAddr 0x%lx translates to DramAddr of " |
633 | (unsigned long)input_addr); | 624 | "same value\n", (unsigned long)input_addr); |
634 | 625 | ||
635 | return input_addr; | 626 | return input_addr; |
636 | } | 627 | } |
@@ -641,9 +632,9 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
641 | intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); | 632 | intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); |
642 | dram_addr = bits + (intlv_sel << 12); | 633 | dram_addr = bits + (intlv_sel << 12); |
643 | 634 | ||
644 | edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n", | 635 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " |
645 | (unsigned long)input_addr, | 636 | "(%d node interleave bits)\n", (unsigned long)input_addr, |
646 | (unsigned long)dram_addr, intlv_shift); | 637 | (unsigned long)dram_addr, intlv_shift); |
647 | 638 | ||
648 | return dram_addr; | 639 | return dram_addr; |
649 | } | 640 | } |
@@ -665,9 +656,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
665 | (dram_addr < (hole_base + hole_size))) { | 656 | (dram_addr < (hole_base + hole_size))) { |
666 | sys_addr = dram_addr + hole_offset; | 657 | sys_addr = dram_addr + hole_offset; |
667 | 658 | ||
668 | edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n", | 659 | debugf1("using DHAR to translate DramAddr 0x%lx to " |
669 | (unsigned long)dram_addr, | 660 | "SysAddr 0x%lx\n", (unsigned long)dram_addr, |
670 | (unsigned long)sys_addr); | 661 | (unsigned long)sys_addr); |
671 | 662 | ||
672 | return sys_addr; | 663 | return sys_addr; |
673 | } | 664 | } |
@@ -689,9 +680,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
689 | */ | 680 | */ |
690 | sys_addr |= ~((sys_addr & (1ull << 39)) - 1); | 681 | sys_addr |= ~((sys_addr & (1ull << 39)) - 1); |
691 | 682 | ||
692 | edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", | 683 | debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", |
693 | pvt->mc_node_id, (unsigned long)dram_addr, | 684 | pvt->mc_node_id, (unsigned long)dram_addr, |
694 | (unsigned long)sys_addr); | 685 | (unsigned long)sys_addr); |
695 | 686 | ||
696 | return sys_addr; | 687 | return sys_addr; |
697 | } | 688 | } |
@@ -707,12 +698,31 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, | |||
707 | input_addr_to_dram_addr(mci, input_addr)); | 698 | input_addr_to_dram_addr(mci, input_addr)); |
708 | } | 699 | } |
709 | 700 | ||
701 | /* | ||
702 | * Find the minimum and maximum InputAddr values that map to the given @csrow. | ||
703 | * Pass back these values in *input_addr_min and *input_addr_max. | ||
704 | */ | ||
705 | static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | ||
706 | u64 *input_addr_min, u64 *input_addr_max) | ||
707 | { | ||
708 | struct amd64_pvt *pvt; | ||
709 | u64 base, mask; | ||
710 | |||
711 | pvt = mci->pvt_info; | ||
712 | BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt)); | ||
713 | |||
714 | get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); | ||
715 | |||
716 | *input_addr_min = base & ~mask; | ||
717 | *input_addr_max = base | mask; | ||
718 | } | ||
719 | |||
710 | /* Map the Error address to a PAGE and PAGE OFFSET. */ | 720 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
711 | static inline void error_address_to_page_and_offset(u64 error_address, | 721 | static inline void error_address_to_page_and_offset(u64 error_address, |
712 | struct err_info *err) | 722 | u32 *page, u32 *offset) |
713 | { | 723 | { |
714 | err->page = (u32) (error_address >> PAGE_SHIFT); | 724 | *page = (u32) (error_address >> PAGE_SHIFT); |
715 | err->offset = ((u32) error_address) & ~PAGE_MASK; | 725 | *offset = ((u32) error_address) & ~PAGE_MASK; |
716 | } | 726 | } |
717 | 727 | ||
718 | /* | 728 | /* |
@@ -741,10 +751,10 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); | |||
741 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs | 751 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs |
742 | * are ECC capable. | 752 | * are ECC capable. |
743 | */ | 753 | */ |
744 | static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) | 754 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) |
745 | { | 755 | { |
746 | u8 bit; | 756 | u8 bit; |
747 | unsigned long edac_cap = EDAC_FLAG_NONE; | 757 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
748 | 758 | ||
749 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) | 759 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
750 | ? 19 | 760 | ? 19 |
@@ -760,48 +770,49 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); | |||
760 | 770 | ||
761 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) | 771 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) |
762 | { | 772 | { |
763 | edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); | 773 | debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); |
764 | 774 | ||
765 | edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n", | 775 | debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", |
766 | (dclr & BIT(16)) ? "un" : "", | 776 | (dclr & BIT(16)) ? "un" : "", |
767 | (dclr & BIT(19)) ? "yes" : "no"); | 777 | (dclr & BIT(19)) ? "yes" : "no"); |
768 | 778 | ||
769 | edac_dbg(1, " PAR/ERR parity: %s\n", | 779 | debugf1(" PAR/ERR parity: %s\n", |
770 | (dclr & BIT(8)) ? "enabled" : "disabled"); | 780 | (dclr & BIT(8)) ? "enabled" : "disabled"); |
771 | 781 | ||
772 | if (boot_cpu_data.x86 == 0x10) | 782 | if (boot_cpu_data.x86 == 0x10) |
773 | edac_dbg(1, " DCT 128bit mode width: %s\n", | 783 | debugf1(" DCT 128bit mode width: %s\n", |
774 | (dclr & BIT(11)) ? "128b" : "64b"); | 784 | (dclr & BIT(11)) ? "128b" : "64b"); |
775 | 785 | ||
776 | edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | 786 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", |
777 | (dclr & BIT(12)) ? "yes" : "no", | 787 | (dclr & BIT(12)) ? "yes" : "no", |
778 | (dclr & BIT(13)) ? "yes" : "no", | 788 | (dclr & BIT(13)) ? "yes" : "no", |
779 | (dclr & BIT(14)) ? "yes" : "no", | 789 | (dclr & BIT(14)) ? "yes" : "no", |
780 | (dclr & BIT(15)) ? "yes" : "no"); | 790 | (dclr & BIT(15)) ? "yes" : "no"); |
781 | } | 791 | } |
782 | 792 | ||
783 | /* Display and decode various NB registers for debug purposes. */ | 793 | /* Display and decode various NB registers for debug purposes. */ |
784 | static void dump_misc_regs(struct amd64_pvt *pvt) | 794 | static void dump_misc_regs(struct amd64_pvt *pvt) |
785 | { | 795 | { |
786 | edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); | 796 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
787 | 797 | ||
788 | edac_dbg(1, " NB two channel DRAM capable: %s\n", | 798 | debugf1(" NB two channel DRAM capable: %s\n", |
789 | (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); | 799 | (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); |
790 | 800 | ||
791 | edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n", | 801 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
792 | (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", | 802 | (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", |
793 | (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); | 803 | (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); |
794 | 804 | ||
795 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | 805 | amd64_dump_dramcfg_low(pvt->dclr0, 0); |
796 | 806 | ||
797 | edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); | 807 | debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); |
798 | 808 | ||
799 | edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n", | 809 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
800 | pvt->dhar, dhar_base(pvt), | 810 | "offset: 0x%08x\n", |
801 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) | 811 | pvt->dhar, dhar_base(pvt), |
802 | : f10_dhar_offset(pvt)); | 812 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) |
813 | : f10_dhar_offset(pvt)); | ||
803 | 814 | ||
804 | edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); | 815 | debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); |
805 | 816 | ||
806 | amd64_debug_display_dimm_sizes(pvt, 0); | 817 | amd64_debug_display_dimm_sizes(pvt, 0); |
807 | 818 | ||
@@ -848,15 +859,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) | |||
848 | u32 *base1 = &pvt->csels[1].csbases[cs]; | 859 | u32 *base1 = &pvt->csels[1].csbases[cs]; |
849 | 860 | ||
850 | if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) | 861 | if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) |
851 | edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", | 862 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
852 | cs, *base0, reg0); | 863 | cs, *base0, reg0); |
853 | 864 | ||
854 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) | 865 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
855 | continue; | 866 | continue; |
856 | 867 | ||
857 | if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) | 868 | if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) |
858 | edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", | 869 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
859 | cs, *base1, reg1); | 870 | cs, *base1, reg1); |
860 | } | 871 | } |
861 | 872 | ||
862 | for_each_chip_select_mask(cs, 0, pvt) { | 873 | for_each_chip_select_mask(cs, 0, pvt) { |
@@ -866,15 +877,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) | |||
866 | u32 *mask1 = &pvt->csels[1].csmasks[cs]; | 877 | u32 *mask1 = &pvt->csels[1].csmasks[cs]; |
867 | 878 | ||
868 | if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) | 879 | if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) |
869 | edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", | 880 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
870 | cs, *mask0, reg0); | 881 | cs, *mask0, reg0); |
871 | 882 | ||
872 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) | 883 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
873 | continue; | 884 | continue; |
874 | 885 | ||
875 | if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) | 886 | if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) |
876 | edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", | 887 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
877 | cs, *mask1, reg1); | 888 | cs, *mask1, reg1); |
878 | } | 889 | } |
879 | } | 890 | } |
880 | 891 | ||
@@ -1023,44 +1034,25 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) | |||
1023 | } | 1034 | } |
1024 | 1035 | ||
1025 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, | 1036 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1026 | struct err_info *err) | 1037 | u16 syndrome) |
1027 | { | 1038 | { |
1039 | struct mem_ctl_info *src_mci; | ||
1028 | struct amd64_pvt *pvt = mci->pvt_info; | 1040 | struct amd64_pvt *pvt = mci->pvt_info; |
1029 | 1041 | int channel, csrow; | |
1030 | error_address_to_page_and_offset(sys_addr, err); | 1042 | u32 page, offset; |
1031 | |||
1032 | /* | ||
1033 | * Find out which node the error address belongs to. This may be | ||
1034 | * different from the node that detected the error. | ||
1035 | */ | ||
1036 | err->src_mci = find_mc_by_sys_addr(mci, sys_addr); | ||
1037 | if (!err->src_mci) { | ||
1038 | amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", | ||
1039 | (unsigned long)sys_addr); | ||
1040 | err->err_code = ERR_NODE; | ||
1041 | return; | ||
1042 | } | ||
1043 | |||
1044 | /* Now map the sys_addr to a CSROW */ | ||
1045 | err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr); | ||
1046 | if (err->csrow < 0) { | ||
1047 | err->err_code = ERR_CSROW; | ||
1048 | return; | ||
1049 | } | ||
1050 | 1043 | ||
1051 | /* CHIPKILL enabled */ | 1044 | /* CHIPKILL enabled */ |
1052 | if (pvt->nbcfg & NBCFG_CHIPKILL) { | 1045 | if (pvt->nbcfg & NBCFG_CHIPKILL) { |
1053 | err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); | 1046 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
1054 | if (err->channel < 0) { | 1047 | if (channel < 0) { |
1055 | /* | 1048 | /* |
1056 | * Syndrome didn't map, so we don't know which of the | 1049 | * Syndrome didn't map, so we don't know which of the |
1057 | * 2 DIMMs is in error. So we need to ID 'both' of them | 1050 | * 2 DIMMs is in error. So we need to ID 'both' of them |
1058 | * as suspect. | 1051 | * as suspect. |
1059 | */ | 1052 | */ |
1060 | amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - " | 1053 | amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " |
1061 | "possible error reporting race\n", | 1054 | "error reporting race\n", syndrome); |
1062 | err->syndrome); | 1055 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1063 | err->err_code = ERR_CHANNEL; | ||
1064 | return; | 1056 | return; |
1065 | } | 1057 | } |
1066 | } else { | 1058 | } else { |
@@ -1072,7 +1064,30 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, | |||
1072 | * was obtained from email communication with someone at AMD. | 1064 | * was obtained from email communication with someone at AMD. |
1073 | * (Wish the email was placed in this comment - norsk) | 1065 | * (Wish the email was placed in this comment - norsk) |
1074 | */ | 1066 | */ |
1075 | err->channel = ((sys_addr & BIT(3)) != 0); | 1067 | channel = ((sys_addr & BIT(3)) != 0); |
1068 | } | ||
1069 | |||
1070 | /* | ||
1071 | * Find out which node the error address belongs to. This may be | ||
1072 | * different from the node that detected the error. | ||
1073 | */ | ||
1074 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | ||
1075 | if (!src_mci) { | ||
1076 | amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", | ||
1077 | (unsigned long)sys_addr); | ||
1078 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | ||
1079 | return; | ||
1080 | } | ||
1081 | |||
1082 | /* Now map the sys_addr to a CSROW */ | ||
1083 | csrow = sys_addr_to_csrow(src_mci, sys_addr); | ||
1084 | if (csrow < 0) { | ||
1085 | edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); | ||
1086 | } else { | ||
1087 | error_address_to_page_and_offset(sys_addr, &page, &offset); | ||
1088 | |||
1089 | edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, | ||
1090 | channel, EDAC_MOD_STR); | ||
1076 | } | 1091 | } |
1077 | } | 1092 | } |
1078 | 1093 | ||
@@ -1100,36 +1115,12 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, | |||
1100 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); | 1115 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); |
1101 | } | 1116 | } |
1102 | else if (pvt->ext_model >= K8_REV_D) { | 1117 | else if (pvt->ext_model >= K8_REV_D) { |
1103 | unsigned diff; | ||
1104 | WARN_ON(cs_mode > 10); | 1118 | WARN_ON(cs_mode > 10); |
1105 | 1119 | ||
1106 | /* | 1120 | if (cs_mode == 3 || cs_mode == 8) |
1107 | * the below calculation, besides trying to win an obfuscated C | 1121 | return 32 << (cs_mode - 1); |
1108 | * contest, maps cs_mode values to DIMM chip select sizes. The | 1122 | else |
1109 | * mappings are: | 1123 | return 32 << cs_mode; |
1110 | * | ||
1111 | * cs_mode CS size (mb) | ||
1112 | * ======= ============ | ||
1113 | * 0 32 | ||
1114 | * 1 64 | ||
1115 | * 2 128 | ||
1116 | * 3 128 | ||
1117 | * 4 256 | ||
1118 | * 5 512 | ||
1119 | * 6 256 | ||
1120 | * 7 512 | ||
1121 | * 8 1024 | ||
1122 | * 9 1024 | ||
1123 | * 10 2048 | ||
1124 | * | ||
1125 | * Basically, it calculates a value with which to shift the | ||
1126 | * smallest CS size of 32MB. | ||
1127 | * | ||
1128 | * ddr[23]_cs_size have a similar purpose. | ||
1129 | */ | ||
1130 | diff = cs_mode/3 + (unsigned)(cs_mode > 5); | ||
1131 | |||
1132 | return 32 << (cs_mode - diff); | ||
1133 | } | 1124 | } |
1134 | else { | 1125 | else { |
1135 | WARN_ON(cs_mode > 6); | 1126 | WARN_ON(cs_mode > 6); |
@@ -1161,7 +1152,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt) | |||
1161 | * Need to check DCT0[0] and DCT1[0] to see if only one of them has | 1152 | * Need to check DCT0[0] and DCT1[0] to see if only one of them has |
1162 | * their CSEnable bit on. If so, then SINGLE DIMM case. | 1153 | * their CSEnable bit on. If so, then SINGLE DIMM case. |
1163 | */ | 1154 | */ |
1164 | edac_dbg(0, "Data width is not 128 bits - need more decoding\n"); | 1155 | debugf0("Data width is not 128 bits - need more decoding\n"); |
1165 | 1156 | ||
1166 | /* | 1157 | /* |
1167 | * Check DRAM Bank Address Mapping values for each DIMM to see if there | 1158 | * Check DRAM Bank Address Mapping values for each DIMM to see if there |
@@ -1240,24 +1231,25 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) | |||
1240 | return; | 1231 | return; |
1241 | 1232 | ||
1242 | if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { | 1233 | if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { |
1243 | edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", | 1234 | debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", |
1244 | pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); | 1235 | pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); |
1245 | 1236 | ||
1246 | edac_dbg(0, " DCTs operate in %s mode\n", | 1237 | debugf0(" DCTs operate in %s mode.\n", |
1247 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); | 1238 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); |
1248 | 1239 | ||
1249 | if (!dct_ganging_enabled(pvt)) | 1240 | if (!dct_ganging_enabled(pvt)) |
1250 | edac_dbg(0, " Address range split per DCT: %s\n", | 1241 | debugf0(" Address range split per DCT: %s\n", |
1251 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | 1242 | (dct_high_range_enabled(pvt) ? "yes" : "no")); |
1252 | 1243 | ||
1253 | edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n", | 1244 | debugf0(" data interleave for ECC: %s, " |
1254 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | 1245 | "DRAM cleared since last warm reset: %s\n", |
1255 | (dct_memory_cleared(pvt) ? "yes" : "no")); | 1246 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), |
1247 | (dct_memory_cleared(pvt) ? "yes" : "no")); | ||
1256 | 1248 | ||
1257 | edac_dbg(0, " channel interleave: %s, " | 1249 | debugf0(" channel interleave: %s, " |
1258 | "interleave bits selector: 0x%x\n", | 1250 | "interleave bits selector: 0x%x\n", |
1259 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | 1251 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), |
1260 | dct_sel_interleave_addr(pvt)); | 1252 | dct_sel_interleave_addr(pvt)); |
1261 | } | 1253 | } |
1262 | 1254 | ||
1263 | amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); | 1255 | amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); |
@@ -1395,7 +1387,7 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) | |||
1395 | 1387 | ||
1396 | pvt = mci->pvt_info; | 1388 | pvt = mci->pvt_info; |
1397 | 1389 | ||
1398 | edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct); | 1390 | debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); |
1399 | 1391 | ||
1400 | for_each_chip_select(csrow, dct, pvt) { | 1392 | for_each_chip_select(csrow, dct, pvt) { |
1401 | if (!csrow_enabled(csrow, dct, pvt)) | 1393 | if (!csrow_enabled(csrow, dct, pvt)) |
@@ -1403,18 +1395,19 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) | |||
1403 | 1395 | ||
1404 | get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); | 1396 | get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); |
1405 | 1397 | ||
1406 | edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", | 1398 | debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", |
1407 | csrow, cs_base, cs_mask); | 1399 | csrow, cs_base, cs_mask); |
1408 | 1400 | ||
1409 | cs_mask = ~cs_mask; | 1401 | cs_mask = ~cs_mask; |
1410 | 1402 | ||
1411 | edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n", | 1403 | debugf1(" (InputAddr & ~CSMask)=0x%llx " |
1412 | (in_addr & cs_mask), (cs_base & cs_mask)); | 1404 | "(CSBase & ~CSMask)=0x%llx\n", |
1405 | (in_addr & cs_mask), (cs_base & cs_mask)); | ||
1413 | 1406 | ||
1414 | if ((in_addr & cs_mask) == (cs_base & cs_mask)) { | 1407 | if ((in_addr & cs_mask) == (cs_base & cs_mask)) { |
1415 | cs_found = f10_process_possible_spare(pvt, dct, csrow); | 1408 | cs_found = f10_process_possible_spare(pvt, dct, csrow); |
1416 | 1409 | ||
1417 | edac_dbg(1, " MATCH csrow=%d\n", cs_found); | 1410 | debugf1(" MATCH csrow=%d\n", cs_found); |
1418 | break; | 1411 | break; |
1419 | } | 1412 | } |
1420 | } | 1413 | } |
@@ -1459,7 +1452,7 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) | |||
1459 | 1452 | ||
1460 | /* For a given @dram_range, check if @sys_addr falls within it. */ | 1453 | /* For a given @dram_range, check if @sys_addr falls within it. */ |
1461 | static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, | 1454 | static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, |
1462 | u64 sys_addr, int *chan_sel) | 1455 | u64 sys_addr, int *nid, int *chan_sel) |
1463 | { | 1456 | { |
1464 | int cs_found = -EINVAL; | 1457 | int cs_found = -EINVAL; |
1465 | u64 chan_addr; | 1458 | u64 chan_addr; |
@@ -1471,8 +1464,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, | |||
1471 | u8 intlv_en = dram_intlv_en(pvt, range); | 1464 | u8 intlv_en = dram_intlv_en(pvt, range); |
1472 | u32 intlv_sel = dram_intlv_sel(pvt, range); | 1465 | u32 intlv_sel = dram_intlv_sel(pvt, range); |
1473 | 1466 | ||
1474 | edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", | 1467 | debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", |
1475 | range, sys_addr, get_dram_limit(pvt, range)); | 1468 | range, sys_addr, get_dram_limit(pvt, range)); |
1476 | 1469 | ||
1477 | if (dhar_valid(pvt) && | 1470 | if (dhar_valid(pvt) && |
1478 | dhar_base(pvt) <= sys_addr && | 1471 | dhar_base(pvt) <= sys_addr && |
@@ -1528,18 +1521,19 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, | |||
1528 | (chan_addr & 0xfff); | 1521 | (chan_addr & 0xfff); |
1529 | } | 1522 | } |
1530 | 1523 | ||
1531 | edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr); | 1524 | debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); |
1532 | 1525 | ||
1533 | cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); | 1526 | cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); |
1534 | 1527 | ||
1535 | if (cs_found >= 0) | 1528 | if (cs_found >= 0) { |
1529 | *nid = node_id; | ||
1536 | *chan_sel = channel; | 1530 | *chan_sel = channel; |
1537 | 1531 | } | |
1538 | return cs_found; | 1532 | return cs_found; |
1539 | } | 1533 | } |
1540 | 1534 | ||
1541 | static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | 1535 | static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, |
1542 | int *chan_sel) | 1536 | int *node, int *chan_sel) |
1543 | { | 1537 | { |
1544 | int cs_found = -EINVAL; | 1538 | int cs_found = -EINVAL; |
1545 | unsigned range; | 1539 | unsigned range; |
@@ -1553,7 +1547,8 @@ static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |||
1553 | (get_dram_limit(pvt, range) >= sys_addr)) { | 1547 | (get_dram_limit(pvt, range) >= sys_addr)) { |
1554 | 1548 | ||
1555 | cs_found = f1x_match_to_this_node(pvt, range, | 1549 | cs_found = f1x_match_to_this_node(pvt, range, |
1556 | sys_addr, chan_sel); | 1550 | sys_addr, node, |
1551 | chan_sel); | ||
1557 | if (cs_found >= 0) | 1552 | if (cs_found >= 0) |
1558 | break; | 1553 | break; |
1559 | } | 1554 | } |
@@ -1569,25 +1564,39 @@ static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |||
1569 | * (MCX_ADDR). | 1564 | * (MCX_ADDR). |
1570 | */ | 1565 | */ |
1571 | static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, | 1566 | static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1572 | struct err_info *err) | 1567 | u16 syndrome) |
1573 | { | 1568 | { |
1574 | struct amd64_pvt *pvt = mci->pvt_info; | 1569 | struct amd64_pvt *pvt = mci->pvt_info; |
1570 | u32 page, offset; | ||
1571 | int nid, csrow, chan = 0; | ||
1575 | 1572 | ||
1576 | error_address_to_page_and_offset(sys_addr, err); | 1573 | csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); |
1577 | 1574 | ||
1578 | err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel); | 1575 | if (csrow < 0) { |
1579 | if (err->csrow < 0) { | 1576 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1580 | err->err_code = ERR_CSROW; | ||
1581 | return; | 1577 | return; |
1582 | } | 1578 | } |
1583 | 1579 | ||
1580 | error_address_to_page_and_offset(sys_addr, &page, &offset); | ||
1581 | |||
1584 | /* | 1582 | /* |
1585 | * We need the syndromes for channel detection only when we're | 1583 | * We need the syndromes for channel detection only when we're |
1586 | * ganged. Otherwise @chan should already contain the channel at | 1584 | * ganged. Otherwise @chan should already contain the channel at |
1587 | * this point. | 1585 | * this point. |
1588 | */ | 1586 | */ |
1589 | if (dct_ganging_enabled(pvt)) | 1587 | if (dct_ganging_enabled(pvt)) |
1590 | err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); | 1588 | chan = get_channel_from_ecc_syndrome(mci, syndrome); |
1589 | |||
1590 | if (chan >= 0) | ||
1591 | edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, | ||
1592 | EDAC_MOD_STR); | ||
1593 | else | ||
1594 | /* | ||
1595 | * Channel unknown, report all channels on this CSROW as failed. | ||
1596 | */ | ||
1597 | for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) | ||
1598 | edac_mc_handle_ce(mci, page, offset, syndrome, | ||
1599 | csrow, chan, EDAC_MOD_STR); | ||
1591 | } | 1600 | } |
1592 | 1601 | ||
1593 | /* | 1602 | /* |
@@ -1596,11 +1605,14 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, | |||
1596 | */ | 1605 | */ |
1597 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) | 1606 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) |
1598 | { | 1607 | { |
1599 | int dimm, size0, size1; | 1608 | int dimm, size0, size1, factor = 0; |
1600 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; | 1609 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; |
1601 | u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | 1610 | u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; |
1602 | 1611 | ||
1603 | if (boot_cpu_data.x86 == 0xf) { | 1612 | if (boot_cpu_data.x86 == 0xf) { |
1613 | if (pvt->dclr0 & WIDTH_128) | ||
1614 | factor = 1; | ||
1615 | |||
1604 | /* K8 families < revF not supported yet */ | 1616 | /* K8 families < revF not supported yet */ |
1605 | if (pvt->ext_model < K8_REV_F) | 1617 | if (pvt->ext_model < K8_REV_F) |
1606 | return; | 1618 | return; |
@@ -1612,8 +1624,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) | |||
1612 | dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases | 1624 | dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases |
1613 | : pvt->csels[0].csbases; | 1625 | : pvt->csels[0].csbases; |
1614 | 1626 | ||
1615 | edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | 1627 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); |
1616 | ctrl, dbam); | ||
1617 | 1628 | ||
1618 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); | 1629 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); |
1619 | 1630 | ||
@@ -1631,8 +1642,8 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) | |||
1631 | DBAM_DIMM(dimm, dbam)); | 1642 | DBAM_DIMM(dimm, dbam)); |
1632 | 1643 | ||
1633 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", | 1644 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
1634 | dimm * 2, size0, | 1645 | dimm * 2, size0 << factor, |
1635 | dimm * 2 + 1, size1); | 1646 | dimm * 2 + 1, size1 << factor); |
1636 | } | 1647 | } |
1637 | } | 1648 | } |
1638 | 1649 | ||
@@ -1789,7 +1800,7 @@ static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, | |||
1789 | } | 1800 | } |
1790 | } | 1801 | } |
1791 | 1802 | ||
1792 | edac_dbg(0, "syndrome(%x) not found\n", syndrome); | 1803 | debugf0("syndrome(%x) not found\n", syndrome); |
1793 | return -1; | 1804 | return -1; |
1794 | } | 1805 | } |
1795 | 1806 | ||
@@ -1853,56 +1864,82 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |||
1853 | return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); | 1864 | return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); |
1854 | } | 1865 | } |
1855 | 1866 | ||
1856 | static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err, | 1867 | /* |
1857 | u8 ecc_type) | 1868 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR |
1869 | * ADDRESS and process. | ||
1870 | */ | ||
1871 | static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) | ||
1858 | { | 1872 | { |
1859 | enum hw_event_mc_err_type err_type; | 1873 | struct amd64_pvt *pvt = mci->pvt_info; |
1860 | const char *string; | 1874 | u64 sys_addr; |
1875 | u16 syndrome; | ||
1861 | 1876 | ||
1862 | if (ecc_type == 2) | 1877 | /* Ensure that the Error Address is VALID */ |
1863 | err_type = HW_EVENT_ERR_CORRECTED; | 1878 | if (!(m->status & MCI_STATUS_ADDRV)) { |
1864 | else if (ecc_type == 1) | 1879 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
1865 | err_type = HW_EVENT_ERR_UNCORRECTED; | 1880 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1866 | else { | ||
1867 | WARN(1, "Something is rotten in the state of Denmark.\n"); | ||
1868 | return; | 1881 | return; |
1869 | } | 1882 | } |
1870 | 1883 | ||
1871 | switch (err->err_code) { | 1884 | sys_addr = get_error_address(m); |
1872 | case DECODE_OK: | 1885 | syndrome = extract_syndrome(m->status); |
1873 | string = ""; | 1886 | |
1874 | break; | 1887 | amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
1875 | case ERR_NODE: | 1888 | |
1876 | string = "Failed to map error addr to a node"; | 1889 | pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome); |
1877 | break; | 1890 | } |
1878 | case ERR_CSROW: | 1891 | |
1879 | string = "Failed to map error addr to a csrow"; | 1892 | /* Handle any Un-correctable Errors (UEs) */ |
1880 | break; | 1893 | static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) |
1881 | case ERR_CHANNEL: | 1894 | { |
1882 | string = "unknown syndrome - possible error reporting race"; | 1895 | struct mem_ctl_info *log_mci, *src_mci = NULL; |
1883 | break; | 1896 | int csrow; |
1884 | default: | 1897 | u64 sys_addr; |
1885 | string = "WTF error"; | 1898 | u32 page, offset; |
1886 | break; | 1899 | |
1900 | log_mci = mci; | ||
1901 | |||
1902 | if (!(m->status & MCI_STATUS_ADDRV)) { | ||
1903 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); | ||
1904 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | ||
1905 | return; | ||
1887 | } | 1906 | } |
1888 | 1907 | ||
1889 | edac_mc_handle_error(err_type, mci, 1, | 1908 | sys_addr = get_error_address(m); |
1890 | err->page, err->offset, err->syndrome, | 1909 | |
1891 | err->csrow, err->channel, -1, | 1910 | /* |
1892 | string, ""); | 1911 | * Find out which node the error address belongs to. This may be |
1912 | * different from the node that detected the error. | ||
1913 | */ | ||
1914 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | ||
1915 | if (!src_mci) { | ||
1916 | amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", | ||
1917 | (unsigned long)sys_addr); | ||
1918 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | ||
1919 | return; | ||
1920 | } | ||
1921 | |||
1922 | log_mci = src_mci; | ||
1923 | |||
1924 | csrow = sys_addr_to_csrow(log_mci, sys_addr); | ||
1925 | if (csrow < 0) { | ||
1926 | amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", | ||
1927 | (unsigned long)sys_addr); | ||
1928 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | ||
1929 | } else { | ||
1930 | error_address_to_page_and_offset(sys_addr, &page, &offset); | ||
1931 | edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); | ||
1932 | } | ||
1893 | } | 1933 | } |
1894 | 1934 | ||
1895 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | 1935 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, |
1896 | struct mce *m) | 1936 | struct mce *m) |
1897 | { | 1937 | { |
1898 | struct amd64_pvt *pvt = mci->pvt_info; | ||
1899 | u8 ecc_type = (m->status >> 45) & 0x3; | ||
1900 | u8 xec = XEC(m->status, 0x1f); | ||
1901 | u16 ec = EC(m->status); | 1938 | u16 ec = EC(m->status); |
1902 | u64 sys_addr; | 1939 | u8 xec = XEC(m->status, 0x1f); |
1903 | struct err_info err; | 1940 | u8 ecc_type = (m->status >> 45) & 0x3; |
1904 | 1941 | ||
1905 | /* Bail out early if this was an 'observed' error */ | 1942 | /* Bail early out if this was an 'observed' error */ |
1906 | if (PP(ec) == NBSL_PP_OBS) | 1943 | if (PP(ec) == NBSL_PP_OBS) |
1907 | return; | 1944 | return; |
1908 | 1945 | ||
@@ -1910,21 +1947,17 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | |||
1910 | if (xec && xec != F10_NBSL_EXT_ERR_ECC) | 1947 | if (xec && xec != F10_NBSL_EXT_ERR_ECC) |
1911 | return; | 1948 | return; |
1912 | 1949 | ||
1913 | memset(&err, 0, sizeof(err)); | ||
1914 | |||
1915 | sys_addr = get_error_address(m); | ||
1916 | |||
1917 | if (ecc_type == 2) | 1950 | if (ecc_type == 2) |
1918 | err.syndrome = extract_syndrome(m->status); | 1951 | amd64_handle_ce(mci, m); |
1919 | 1952 | else if (ecc_type == 1) | |
1920 | pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err); | 1953 | amd64_handle_ue(mci, m); |
1921 | |||
1922 | __log_bus_error(mci, &err, ecc_type); | ||
1923 | } | 1954 | } |
1924 | 1955 | ||
1925 | void amd64_decode_bus_error(int node_id, struct mce *m) | 1956 | void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) |
1926 | { | 1957 | { |
1927 | __amd64_decode_bus_error(mcis[node_id], m); | 1958 | struct mem_ctl_info *mci = mcis[node_id]; |
1959 | |||
1960 | __amd64_decode_bus_error(mci, m); | ||
1928 | } | 1961 | } |
1929 | 1962 | ||
1930 | /* | 1963 | /* |
@@ -1954,9 +1987,9 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id) | |||
1954 | 1987 | ||
1955 | return -ENODEV; | 1988 | return -ENODEV; |
1956 | } | 1989 | } |
1957 | edac_dbg(1, "F1: %s\n", pci_name(pvt->F1)); | 1990 | debugf1("F1: %s\n", pci_name(pvt->F1)); |
1958 | edac_dbg(1, "F2: %s\n", pci_name(pvt->F2)); | 1991 | debugf1("F2: %s\n", pci_name(pvt->F2)); |
1959 | edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); | 1992 | debugf1("F3: %s\n", pci_name(pvt->F3)); |
1960 | 1993 | ||
1961 | return 0; | 1994 | return 0; |
1962 | } | 1995 | } |
@@ -1983,15 +2016,15 @@ static void read_mc_regs(struct amd64_pvt *pvt) | |||
1983 | * those are Read-As-Zero | 2016 | * those are Read-As-Zero |
1984 | */ | 2017 | */ |
1985 | rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); | 2018 | rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); |
1986 | edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); | 2019 | debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); |
1987 | 2020 | ||
1988 | /* check first whether TOP_MEM2 is enabled */ | 2021 | /* check first whether TOP_MEM2 is enabled */ |
1989 | rdmsrl(MSR_K8_SYSCFG, msr_val); | 2022 | rdmsrl(MSR_K8_SYSCFG, msr_val); |
1990 | if (msr_val & (1U << 21)) { | 2023 | if (msr_val & (1U << 21)) { |
1991 | rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); | 2024 | rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); |
1992 | edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); | 2025 | debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); |
1993 | } else | 2026 | } else |
1994 | edac_dbg(0, " TOP_MEM2 disabled\n"); | 2027 | debugf0(" TOP_MEM2 disabled.\n"); |
1995 | 2028 | ||
1996 | amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); | 2029 | amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); |
1997 | 2030 | ||
@@ -2007,17 +2040,17 @@ static void read_mc_regs(struct amd64_pvt *pvt) | |||
2007 | if (!rw) | 2040 | if (!rw) |
2008 | continue; | 2041 | continue; |
2009 | 2042 | ||
2010 | edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", | 2043 | debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", |
2011 | range, | 2044 | range, |
2012 | get_dram_base(pvt, range), | 2045 | get_dram_base(pvt, range), |
2013 | get_dram_limit(pvt, range)); | 2046 | get_dram_limit(pvt, range)); |
2014 | 2047 | ||
2015 | edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", | 2048 | debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", |
2016 | dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", | 2049 | dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", |
2017 | (rw & 0x1) ? "R" : "-", | 2050 | (rw & 0x1) ? "R" : "-", |
2018 | (rw & 0x2) ? "W" : "-", | 2051 | (rw & 0x2) ? "W" : "-", |
2019 | dram_intlv_sel(pvt, range), | 2052 | dram_intlv_sel(pvt, range), |
2020 | dram_dst_node(pvt, range)); | 2053 | dram_dst_node(pvt, range)); |
2021 | } | 2054 | } |
2022 | 2055 | ||
2023 | read_dct_base_mask(pvt); | 2056 | read_dct_base_mask(pvt); |
@@ -2085,8 +2118,6 @@ static void read_mc_regs(struct amd64_pvt *pvt) | |||
2085 | static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) | 2118 | static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) |
2086 | { | 2119 | { |
2087 | u32 cs_mode, nr_pages; | 2120 | u32 cs_mode, nr_pages; |
2088 | u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; | ||
2089 | |||
2090 | 2121 | ||
2091 | /* | 2122 | /* |
2092 | * The math on this doesn't look right on the surface because x/2*4 can | 2123 | * The math on this doesn't look right on the surface because x/2*4 can |
@@ -2095,13 +2126,19 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) | |||
2095 | * number of bits to shift the DBAM register to extract the proper CSROW | 2126 | * number of bits to shift the DBAM register to extract the proper CSROW |
2096 | * field. | 2127 | * field. |
2097 | */ | 2128 | */ |
2098 | cs_mode = DBAM_DIMM(csrow_nr / 2, dbam); | 2129 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
2099 | 2130 | ||
2100 | nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); | 2131 | nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); |
2101 | 2132 | ||
2102 | edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", | 2133 | /* |
2103 | csrow_nr, dct, cs_mode); | 2134 | * If dual channel then double the memory size of single channel. |
2104 | edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); | 2135 | * Channel count is 1 or 2 |
2136 | */ | ||
2137 | nr_pages <<= (pvt->channel_count - 1); | ||
2138 | |||
2139 | debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); | ||
2140 | debugf0(" nr_pages= %u channel-count = %d\n", | ||
2141 | nr_pages, pvt->channel_count); | ||
2105 | 2142 | ||
2106 | return nr_pages; | 2143 | return nr_pages; |
2107 | } | 2144 | } |
@@ -2112,69 +2149,66 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) | |||
2112 | */ | 2149 | */ |
2113 | static int init_csrows(struct mem_ctl_info *mci) | 2150 | static int init_csrows(struct mem_ctl_info *mci) |
2114 | { | 2151 | { |
2115 | struct amd64_pvt *pvt = mci->pvt_info; | ||
2116 | struct csrow_info *csrow; | 2152 | struct csrow_info *csrow; |
2117 | struct dimm_info *dimm; | 2153 | struct amd64_pvt *pvt = mci->pvt_info; |
2118 | enum edac_type edac_mode; | 2154 | u64 input_addr_min, input_addr_max, sys_addr, base, mask; |
2119 | enum mem_type mtype; | ||
2120 | int i, j, empty = 1; | ||
2121 | int nr_pages = 0; | ||
2122 | u32 val; | 2155 | u32 val; |
2156 | int i, empty = 1; | ||
2123 | 2157 | ||
2124 | amd64_read_pci_cfg(pvt->F3, NBCFG, &val); | 2158 | amd64_read_pci_cfg(pvt->F3, NBCFG, &val); |
2125 | 2159 | ||
2126 | pvt->nbcfg = val; | 2160 | pvt->nbcfg = val; |
2127 | 2161 | ||
2128 | edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", | 2162 | debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", |
2129 | pvt->mc_node_id, val, | 2163 | pvt->mc_node_id, val, |
2130 | !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); | 2164 | !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); |
2131 | 2165 | ||
2132 | /* | ||
2133 | * We iterate over DCT0 here but we look at DCT1 in parallel, if needed. | ||
2134 | */ | ||
2135 | for_each_chip_select(i, 0, pvt) { | 2166 | for_each_chip_select(i, 0, pvt) { |
2136 | bool row_dct0 = !!csrow_enabled(i, 0, pvt); | 2167 | csrow = &mci->csrows[i]; |
2137 | bool row_dct1 = false; | ||
2138 | 2168 | ||
2139 | if (boot_cpu_data.x86 != 0xf) | 2169 | if (!csrow_enabled(i, 0, pvt)) { |
2140 | row_dct1 = !!csrow_enabled(i, 1, pvt); | 2170 | debugf1("----CSROW %d EMPTY for node %d\n", i, |
2141 | 2171 | pvt->mc_node_id); | |
2142 | if (!row_dct0 && !row_dct1) | ||
2143 | continue; | 2172 | continue; |
2173 | } | ||
2144 | 2174 | ||
2145 | csrow = mci->csrows[i]; | 2175 | debugf1("----CSROW %d VALID for MC node %d\n", |
2146 | empty = 0; | 2176 | i, pvt->mc_node_id); |
2147 | |||
2148 | edac_dbg(1, "MC node: %d, csrow: %d\n", | ||
2149 | pvt->mc_node_id, i); | ||
2150 | |||
2151 | if (row_dct0) | ||
2152 | nr_pages = amd64_csrow_nr_pages(pvt, 0, i); | ||
2153 | |||
2154 | /* K8 has only one DCT */ | ||
2155 | if (boot_cpu_data.x86 != 0xf && row_dct1) | ||
2156 | nr_pages += amd64_csrow_nr_pages(pvt, 1, i); | ||
2157 | |||
2158 | mtype = amd64_determine_memory_type(pvt, i); | ||
2159 | 2177 | ||
2160 | edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); | 2178 | empty = 0; |
2179 | csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); | ||
2180 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); | ||
2181 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); | ||
2182 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); | ||
2183 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); | ||
2184 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); | ||
2185 | |||
2186 | get_cs_base_and_mask(pvt, i, 0, &base, &mask); | ||
2187 | csrow->page_mask = ~mask; | ||
2188 | /* 8 bytes of resolution */ | ||
2189 | |||
2190 | csrow->mtype = amd64_determine_memory_type(pvt, i); | ||
2191 | |||
2192 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); | ||
2193 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", | ||
2194 | (unsigned long)input_addr_min, | ||
2195 | (unsigned long)input_addr_max); | ||
2196 | debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n", | ||
2197 | (unsigned long)sys_addr, csrow->page_mask); | ||
2198 | debugf1(" nr_pages: %u first_page: 0x%lx " | ||
2199 | "last_page: 0x%lx\n", | ||
2200 | (unsigned)csrow->nr_pages, | ||
2201 | csrow->first_page, csrow->last_page); | ||
2161 | 2202 | ||
2162 | /* | 2203 | /* |
2163 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating | 2204 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating |
2164 | */ | 2205 | */ |
2165 | if (pvt->nbcfg & NBCFG_ECC_ENABLE) | 2206 | if (pvt->nbcfg & NBCFG_ECC_ENABLE) |
2166 | edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ? | 2207 | csrow->edac_mode = |
2167 | EDAC_S4ECD4ED : EDAC_SECDED; | 2208 | (pvt->nbcfg & NBCFG_CHIPKILL) ? |
2209 | EDAC_S4ECD4ED : EDAC_SECDED; | ||
2168 | else | 2210 | else |
2169 | edac_mode = EDAC_NONE; | 2211 | csrow->edac_mode = EDAC_NONE; |
2170 | |||
2171 | for (j = 0; j < pvt->channel_count; j++) { | ||
2172 | dimm = csrow->channels[j]->dimm; | ||
2173 | dimm->mtype = mtype; | ||
2174 | dimm->edac_mode = edac_mode; | ||
2175 | dimm->nr_pages = nr_pages; | ||
2176 | } | ||
2177 | csrow->nr_pages = nr_pages; | ||
2178 | } | 2212 | } |
2179 | 2213 | ||
2180 | return empty; | 2214 | return empty; |
@@ -2210,9 +2244,9 @@ static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) | |||
2210 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 2244 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2211 | nbe = reg->l & MSR_MCGCTL_NBE; | 2245 | nbe = reg->l & MSR_MCGCTL_NBE; |
2212 | 2246 | ||
2213 | edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | 2247 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", |
2214 | cpu, reg->q, | 2248 | cpu, reg->q, |
2215 | (nbe ? "enabled" : "disabled")); | 2249 | (nbe ? "enabled" : "disabled")); |
2216 | 2250 | ||
2217 | if (!nbe) | 2251 | if (!nbe) |
2218 | goto out; | 2252 | goto out; |
@@ -2283,8 +2317,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, | |||
2283 | 2317 | ||
2284 | amd64_read_pci_cfg(F3, NBCFG, &value); | 2318 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2285 | 2319 | ||
2286 | edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", | 2320 | debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2287 | nid, value, !!(value & NBCFG_ECC_ENABLE)); | 2321 | nid, value, !!(value & NBCFG_ECC_ENABLE)); |
2288 | 2322 | ||
2289 | if (!(value & NBCFG_ECC_ENABLE)) { | 2323 | if (!(value & NBCFG_ECC_ENABLE)) { |
2290 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); | 2324 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); |
@@ -2308,8 +2342,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, | |||
2308 | s->flags.nb_ecc_prev = 1; | 2342 | s->flags.nb_ecc_prev = 1; |
2309 | } | 2343 | } |
2310 | 2344 | ||
2311 | edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", | 2345 | debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2312 | nid, value, !!(value & NBCFG_ECC_ENABLE)); | 2346 | nid, value, !!(value & NBCFG_ECC_ENABLE)); |
2313 | 2347 | ||
2314 | return ret; | 2348 | return ret; |
2315 | } | 2349 | } |
@@ -2377,29 +2411,26 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid) | |||
2377 | return true; | 2411 | return true; |
2378 | } | 2412 | } |
2379 | 2413 | ||
2380 | static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) | 2414 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + |
2381 | { | 2415 | ARRAY_SIZE(amd64_inj_attrs) + |
2382 | int rc; | 2416 | 1]; |
2383 | 2417 | ||
2384 | rc = amd64_create_sysfs_dbg_files(mci); | 2418 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; |
2385 | if (rc < 0) | ||
2386 | return rc; | ||
2387 | 2419 | ||
2388 | if (boot_cpu_data.x86 >= 0x10) { | 2420 | static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) |
2389 | rc = amd64_create_sysfs_inject_files(mci); | ||
2390 | if (rc < 0) | ||
2391 | return rc; | ||
2392 | } | ||
2393 | |||
2394 | return 0; | ||
2395 | } | ||
2396 | |||
2397 | static void del_mc_sysfs_attrs(struct mem_ctl_info *mci) | ||
2398 | { | 2421 | { |
2399 | amd64_remove_sysfs_dbg_files(mci); | 2422 | unsigned int i = 0, j = 0; |
2423 | |||
2424 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) | ||
2425 | sysfs_attrs[i] = amd64_dbg_attrs[i]; | ||
2400 | 2426 | ||
2401 | if (boot_cpu_data.x86 >= 0x10) | 2427 | if (boot_cpu_data.x86 >= 0x10) |
2402 | amd64_remove_sysfs_inject_files(mci); | 2428 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) |
2429 | sysfs_attrs[i] = amd64_inj_attrs[j]; | ||
2430 | |||
2431 | sysfs_attrs[i] = terminator; | ||
2432 | |||
2433 | mci->mc_driver_sysfs_attributes = sysfs_attrs; | ||
2403 | } | 2434 | } |
2404 | 2435 | ||
2405 | static void setup_mci_misc_attrs(struct mem_ctl_info *mci, | 2436 | static void setup_mci_misc_attrs(struct mem_ctl_info *mci, |
@@ -2472,7 +2503,6 @@ static int amd64_init_one_instance(struct pci_dev *F2) | |||
2472 | struct amd64_pvt *pvt = NULL; | 2503 | struct amd64_pvt *pvt = NULL; |
2473 | struct amd64_family_type *fam_type = NULL; | 2504 | struct amd64_family_type *fam_type = NULL; |
2474 | struct mem_ctl_info *mci = NULL; | 2505 | struct mem_ctl_info *mci = NULL; |
2475 | struct edac_mc_layer layers[2]; | ||
2476 | int err = 0, ret; | 2506 | int err = 0, ret; |
2477 | u8 nid = get_node_id(F2); | 2507 | u8 nid = get_node_id(F2); |
2478 | 2508 | ||
@@ -2507,34 +2537,25 @@ static int amd64_init_one_instance(struct pci_dev *F2) | |||
2507 | goto err_siblings; | 2537 | goto err_siblings; |
2508 | 2538 | ||
2509 | ret = -ENOMEM; | 2539 | ret = -ENOMEM; |
2510 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 2540 | mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); |
2511 | layers[0].size = pvt->csels[0].b_cnt; | ||
2512 | layers[0].is_virt_csrow = true; | ||
2513 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
2514 | layers[1].size = pvt->channel_count; | ||
2515 | layers[1].is_virt_csrow = false; | ||
2516 | mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0); | ||
2517 | if (!mci) | 2541 | if (!mci) |
2518 | goto err_siblings; | 2542 | goto err_siblings; |
2519 | 2543 | ||
2520 | mci->pvt_info = pvt; | 2544 | mci->pvt_info = pvt; |
2521 | mci->pdev = &pvt->F2->dev; | 2545 | mci->dev = &pvt->F2->dev; |
2522 | mci->csbased = 1; | ||
2523 | 2546 | ||
2524 | setup_mci_misc_attrs(mci, fam_type); | 2547 | setup_mci_misc_attrs(mci, fam_type); |
2525 | 2548 | ||
2526 | if (init_csrows(mci)) | 2549 | if (init_csrows(mci)) |
2527 | mci->edac_cap = EDAC_FLAG_NONE; | 2550 | mci->edac_cap = EDAC_FLAG_NONE; |
2528 | 2551 | ||
2552 | set_mc_sysfs_attrs(mci); | ||
2553 | |||
2529 | ret = -ENODEV; | 2554 | ret = -ENODEV; |
2530 | if (edac_mc_add_mc(mci)) { | 2555 | if (edac_mc_add_mc(mci)) { |
2531 | edac_dbg(1, "failed edac_mc_add_mc()\n"); | 2556 | debugf1("failed edac_mc_add_mc()\n"); |
2532 | goto err_add_mc; | 2557 | goto err_add_mc; |
2533 | } | 2558 | } |
2534 | if (set_mc_sysfs_attrs(mci)) { | ||
2535 | edac_dbg(1, "failed edac_mc_add_mc()\n"); | ||
2536 | goto err_add_sysfs; | ||
2537 | } | ||
2538 | 2559 | ||
2539 | /* register stuff with EDAC MCE */ | 2560 | /* register stuff with EDAC MCE */ |
2540 | if (report_gart_errors) | 2561 | if (report_gart_errors) |
@@ -2548,8 +2569,6 @@ static int amd64_init_one_instance(struct pci_dev *F2) | |||
2548 | 2569 | ||
2549 | return 0; | 2570 | return 0; |
2550 | 2571 | ||
2551 | err_add_sysfs: | ||
2552 | edac_mc_del_mc(mci->pdev); | ||
2553 | err_add_mc: | 2572 | err_add_mc: |
2554 | edac_mc_free(mci); | 2573 | edac_mc_free(mci); |
2555 | 2574 | ||
@@ -2563,8 +2582,8 @@ err_ret: | |||
2563 | return ret; | 2582 | return ret; |
2564 | } | 2583 | } |
2565 | 2584 | ||
2566 | static int amd64_probe_one_instance(struct pci_dev *pdev, | 2585 | static int __devinit amd64_probe_one_instance(struct pci_dev *pdev, |
2567 | const struct pci_device_id *mc_type) | 2586 | const struct pci_device_id *mc_type) |
2568 | { | 2587 | { |
2569 | u8 nid = get_node_id(pdev); | 2588 | u8 nid = get_node_id(pdev); |
2570 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | 2589 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; |
@@ -2573,7 +2592,7 @@ static int amd64_probe_one_instance(struct pci_dev *pdev, | |||
2573 | 2592 | ||
2574 | ret = pci_enable_device(pdev); | 2593 | ret = pci_enable_device(pdev); |
2575 | if (ret < 0) { | 2594 | if (ret < 0) { |
2576 | edac_dbg(0, "ret=%d\n", ret); | 2595 | debugf0("ret=%d\n", ret); |
2577 | return -EIO; | 2596 | return -EIO; |
2578 | } | 2597 | } |
2579 | 2598 | ||
@@ -2612,7 +2631,7 @@ err_out: | |||
2612 | return ret; | 2631 | return ret; |
2613 | } | 2632 | } |
2614 | 2633 | ||
2615 | static void amd64_remove_one_instance(struct pci_dev *pdev) | 2634 | static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) |
2616 | { | 2635 | { |
2617 | struct mem_ctl_info *mci; | 2636 | struct mem_ctl_info *mci; |
2618 | struct amd64_pvt *pvt; | 2637 | struct amd64_pvt *pvt; |
@@ -2620,8 +2639,6 @@ static void amd64_remove_one_instance(struct pci_dev *pdev) | |||
2620 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | 2639 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; |
2621 | struct ecc_settings *s = ecc_stngs[nid]; | 2640 | struct ecc_settings *s = ecc_stngs[nid]; |
2622 | 2641 | ||
2623 | mci = find_mci_by_dev(&pdev->dev); | ||
2624 | del_mc_sysfs_attrs(mci); | ||
2625 | /* Remove from EDAC CORE tracking list */ | 2642 | /* Remove from EDAC CORE tracking list */ |
2626 | mci = edac_mc_del_mc(&pdev->dev); | 2643 | mci = edac_mc_del_mc(&pdev->dev); |
2627 | if (!mci) | 2644 | if (!mci) |
@@ -2653,7 +2670,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev) | |||
2653 | * PCI core identifies what devices are on a system during boot, and then | 2670 | * PCI core identifies what devices are on a system during boot, and then |
2654 | * inquiry this table to see if this driver is for a given device found. | 2671 | * inquiry this table to see if this driver is for a given device found. |
2655 | */ | 2672 | */ |
2656 | static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = { | 2673 | static const struct pci_device_id amd64_pci_table[] __devinitdata = { |
2657 | { | 2674 | { |
2658 | .vendor = PCI_VENDOR_ID_AMD, | 2675 | .vendor = PCI_VENDOR_ID_AMD, |
2659 | .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, | 2676 | .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, |
@@ -2686,7 +2703,7 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table); | |||
2686 | static struct pci_driver amd64_pci_driver = { | 2703 | static struct pci_driver amd64_pci_driver = { |
2687 | .name = EDAC_MOD_STR, | 2704 | .name = EDAC_MOD_STR, |
2688 | .probe = amd64_probe_one_instance, | 2705 | .probe = amd64_probe_one_instance, |
2689 | .remove = amd64_remove_one_instance, | 2706 | .remove = __devexit_p(amd64_remove_one_instance), |
2690 | .id_table = amd64_pci_table, | 2707 | .id_table = amd64_pci_table, |
2691 | }; | 2708 | }; |
2692 | 2709 | ||
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index e864f407806..9a666cb985b 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -33,7 +33,7 @@ | |||
33 | * detection. The mods to Rev F required more family | 33 | * detection. The mods to Rev F required more family |
34 | * information detection. | 34 | * information detection. |
35 | * | 35 | * |
36 | * Changes/Fixes by Borislav Petkov <bp@alien8.de>: | 36 | * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>: |
37 | * - misc fixes and code cleanups | 37 | * - misc fixes and code cleanups |
38 | * | 38 | * |
39 | * This module is based on the following documents | 39 | * This module is based on the following documents |
@@ -219,7 +219,7 @@ | |||
219 | #define DBAM1 0x180 | 219 | #define DBAM1 0x180 |
220 | 220 | ||
221 | /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */ | 221 | /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */ |
222 | #define DBAM_DIMM(i, reg) ((((reg) >> (4*(i)))) & 0xF) | 222 | #define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF) |
223 | 223 | ||
224 | #define DBAM_MAX_VALUE 11 | 224 | #define DBAM_MAX_VALUE 11 |
225 | 225 | ||
@@ -267,20 +267,18 @@ | |||
267 | #define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7) | 267 | #define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7) |
268 | 268 | ||
269 | #define F10_NB_ARRAY_ADDR 0xB8 | 269 | #define F10_NB_ARRAY_ADDR 0xB8 |
270 | #define F10_NB_ARRAY_DRAM BIT(31) | 270 | #define F10_NB_ARRAY_DRAM_ECC BIT(31) |
271 | 271 | ||
272 | /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ | 272 | /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ |
273 | #define SET_NB_ARRAY_ADDR(section) (((section) & 0x3) << 1) | 273 | #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) |
274 | 274 | ||
275 | #define F10_NB_ARRAY_DATA 0xBC | 275 | #define F10_NB_ARRAY_DATA 0xBC |
276 | #define F10_NB_ARR_ECC_WR_REQ BIT(17) | 276 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ |
277 | #define SET_NB_DRAM_INJECTION_WRITE(inj) \ | 277 | (BIT(((word) & 0xF) + 20) | \ |
278 | (BIT(((inj.word) & 0xF) + 20) | \ | 278 | BIT(17) | bits) |
279 | F10_NB_ARR_ECC_WR_REQ | inj.bit_map) | 279 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ |
280 | #define SET_NB_DRAM_INJECTION_READ(inj) \ | 280 | (BIT(((word) & 0xF) + 20) | \ |
281 | (BIT(((inj.word) & 0xF) + 20) | \ | 281 | BIT(16) | bits) |
282 | BIT(16) | inj.bit_map) | ||
283 | |||
284 | 282 | ||
285 | #define NBCAP 0xE8 | 283 | #define NBCAP 0xE8 |
286 | #define NBCAP_CHIPKILL BIT(4) | 284 | #define NBCAP_CHIPKILL BIT(4) |
@@ -307,9 +305,9 @@ enum amd_families { | |||
307 | 305 | ||
308 | /* Error injection control structure */ | 306 | /* Error injection control structure */ |
309 | struct error_injection { | 307 | struct error_injection { |
310 | u32 section; | 308 | u32 section; |
311 | u32 word; | 309 | u32 word; |
312 | u32 bit_map; | 310 | u32 bit_map; |
313 | }; | 311 | }; |
314 | 312 | ||
315 | /* low and high part of PCI config space regs */ | 313 | /* low and high part of PCI config space regs */ |
@@ -376,23 +374,6 @@ struct amd64_pvt { | |||
376 | struct error_injection injection; | 374 | struct error_injection injection; |
377 | }; | 375 | }; |
378 | 376 | ||
379 | enum err_codes { | ||
380 | DECODE_OK = 0, | ||
381 | ERR_NODE = -1, | ||
382 | ERR_CSROW = -2, | ||
383 | ERR_CHANNEL = -3, | ||
384 | }; | ||
385 | |||
386 | struct err_info { | ||
387 | int err_code; | ||
388 | struct mem_ctl_info *src_mci; | ||
389 | int csrow; | ||
390 | int channel; | ||
391 | u16 syndrome; | ||
392 | u32 page; | ||
393 | u32 offset; | ||
394 | }; | ||
395 | |||
396 | static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i) | 377 | static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i) |
397 | { | 378 | { |
398 | u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8; | 379 | u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8; |
@@ -432,33 +413,20 @@ struct ecc_settings { | |||
432 | }; | 413 | }; |
433 | 414 | ||
434 | #ifdef CONFIG_EDAC_DEBUG | 415 | #ifdef CONFIG_EDAC_DEBUG |
435 | int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci); | 416 | #define NUM_DBG_ATTRS 5 |
436 | void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci); | ||
437 | |||
438 | #else | 417 | #else |
439 | static inline int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci) | 418 | #define NUM_DBG_ATTRS 0 |
440 | { | ||
441 | return 0; | ||
442 | } | ||
443 | static void inline amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci) | ||
444 | { | ||
445 | } | ||
446 | #endif | 419 | #endif |
447 | 420 | ||
448 | #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION | 421 | #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION |
449 | int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci); | 422 | #define NUM_INJ_ATTRS 5 |
450 | void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci); | ||
451 | |||
452 | #else | 423 | #else |
453 | static inline int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci) | 424 | #define NUM_INJ_ATTRS 0 |
454 | { | ||
455 | return 0; | ||
456 | } | ||
457 | static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci) | ||
458 | { | ||
459 | } | ||
460 | #endif | 425 | #endif |
461 | 426 | ||
427 | extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], | ||
428 | amd64_inj_attrs[NUM_INJ_ATTRS]; | ||
429 | |||
462 | /* | 430 | /* |
463 | * Each of the PCI Device IDs types have their own set of hardware accessor | 431 | * Each of the PCI Device IDs types have their own set of hardware accessor |
464 | * functions and per device encoding/decoding logic. | 432 | * functions and per device encoding/decoding logic. |
@@ -466,7 +434,7 @@ static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci) | |||
466 | struct low_ops { | 434 | struct low_ops { |
467 | int (*early_channel_count) (struct amd64_pvt *pvt); | 435 | int (*early_channel_count) (struct amd64_pvt *pvt); |
468 | void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, | 436 | void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, |
469 | struct err_info *); | 437 | u16 syndrome); |
470 | int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); | 438 | int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); |
471 | int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, | 439 | int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, |
472 | u32 *val, const char *func); | 440 | u32 *val, const char *func); |
@@ -478,8 +446,6 @@ struct amd64_family_type { | |||
478 | struct low_ops ops; | 446 | struct low_ops ops; |
479 | }; | 447 | }; |
480 | 448 | ||
481 | int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
482 | u32 *val, const char *func); | ||
483 | int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, | 449 | int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, |
484 | u32 val, const char *func); | 450 | u32 val, const char *func); |
485 | 451 | ||
@@ -494,17 +460,3 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, | |||
494 | 460 | ||
495 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | 461 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, |
496 | u64 *hole_offset, u64 *hole_size); | 462 | u64 *hole_offset, u64 *hole_size); |
497 | |||
498 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) | ||
499 | |||
500 | /* Injection helpers */ | ||
501 | static inline void disable_caches(void *dummy) | ||
502 | { | ||
503 | write_cr0(read_cr0() | X86_CR0_CD); | ||
504 | wbinvd(); | ||
505 | } | ||
506 | |||
507 | static inline void enable_caches(void *dummy) | ||
508 | { | ||
509 | write_cr0(read_cr0() & ~X86_CR0_CD); | ||
510 | } | ||
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c index 2c1bbf74060..e3562288f4c 100644 --- a/drivers/edac/amd64_edac_dbg.c +++ b/drivers/edac/amd64_edac_dbg.c | |||
@@ -1,11 +1,8 @@ | |||
1 | #include "amd64_edac.h" | 1 | #include "amd64_edac.h" |
2 | 2 | ||
3 | #define EDAC_DCT_ATTR_SHOW(reg) \ | 3 | #define EDAC_DCT_ATTR_SHOW(reg) \ |
4 | static ssize_t amd64_##reg##_show(struct device *dev, \ | 4 | static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \ |
5 | struct device_attribute *mattr, \ | ||
6 | char *data) \ | ||
7 | { \ | 5 | { \ |
8 | struct mem_ctl_info *mci = to_mci(dev); \ | ||
9 | struct amd64_pvt *pvt = mci->pvt_info; \ | 6 | struct amd64_pvt *pvt = mci->pvt_info; \ |
10 | return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ | 7 | return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ |
11 | } | 8 | } |
@@ -15,12 +12,8 @@ EDAC_DCT_ATTR_SHOW(dbam0); | |||
15 | EDAC_DCT_ATTR_SHOW(top_mem); | 12 | EDAC_DCT_ATTR_SHOW(top_mem); |
16 | EDAC_DCT_ATTR_SHOW(top_mem2); | 13 | EDAC_DCT_ATTR_SHOW(top_mem2); |
17 | 14 | ||
18 | static ssize_t amd64_hole_show(struct device *dev, | 15 | static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) |
19 | struct device_attribute *mattr, | ||
20 | char *data) | ||
21 | { | 16 | { |
22 | struct mem_ctl_info *mci = to_mci(dev); | ||
23 | |||
24 | u64 hole_base = 0; | 17 | u64 hole_base = 0; |
25 | u64 hole_offset = 0; | 18 | u64 hole_offset = 0; |
26 | u64 hole_size = 0; | 19 | u64 hole_size = 0; |
@@ -34,40 +27,46 @@ static ssize_t amd64_hole_show(struct device *dev, | |||
34 | /* | 27 | /* |
35 | * update NUM_DBG_ATTRS in case you add new members | 28 | * update NUM_DBG_ATTRS in case you add new members |
36 | */ | 29 | */ |
37 | static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL); | 30 | struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { |
38 | static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL); | ||
39 | static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL); | ||
40 | static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL); | ||
41 | static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL); | ||
42 | |||
43 | int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci) | ||
44 | { | ||
45 | int rc; | ||
46 | |||
47 | rc = device_create_file(&mci->dev, &dev_attr_dhar); | ||
48 | if (rc < 0) | ||
49 | return rc; | ||
50 | rc = device_create_file(&mci->dev, &dev_attr_dbam); | ||
51 | if (rc < 0) | ||
52 | return rc; | ||
53 | rc = device_create_file(&mci->dev, &dev_attr_topmem); | ||
54 | if (rc < 0) | ||
55 | return rc; | ||
56 | rc = device_create_file(&mci->dev, &dev_attr_topmem2); | ||
57 | if (rc < 0) | ||
58 | return rc; | ||
59 | rc = device_create_file(&mci->dev, &dev_attr_dram_hole); | ||
60 | if (rc < 0) | ||
61 | return rc; | ||
62 | 31 | ||
63 | return 0; | 32 | { |
64 | } | 33 | .attr = { |
65 | 34 | .name = "dhar", | |
66 | void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci) | 35 | .mode = (S_IRUGO) |
67 | { | 36 | }, |
68 | device_remove_file(&mci->dev, &dev_attr_dhar); | 37 | .show = amd64_dhar_show, |
69 | device_remove_file(&mci->dev, &dev_attr_dbam); | 38 | .store = NULL, |
70 | device_remove_file(&mci->dev, &dev_attr_topmem); | 39 | }, |
71 | device_remove_file(&mci->dev, &dev_attr_topmem2); | 40 | { |
72 | device_remove_file(&mci->dev, &dev_attr_dram_hole); | 41 | .attr = { |
73 | } | 42 | .name = "dbam", |
43 | .mode = (S_IRUGO) | ||
44 | }, | ||
45 | .show = amd64_dbam0_show, | ||
46 | .store = NULL, | ||
47 | }, | ||
48 | { | ||
49 | .attr = { | ||
50 | .name = "topmem", | ||
51 | .mode = (S_IRUGO) | ||
52 | }, | ||
53 | .show = amd64_top_mem_show, | ||
54 | .store = NULL, | ||
55 | }, | ||
56 | { | ||
57 | .attr = { | ||
58 | .name = "topmem2", | ||
59 | .mode = (S_IRUGO) | ||
60 | }, | ||
61 | .show = amd64_top_mem2_show, | ||
62 | .store = NULL, | ||
63 | }, | ||
64 | { | ||
65 | .attr = { | ||
66 | .name = "dram_hole", | ||
67 | .mode = (S_IRUGO) | ||
68 | }, | ||
69 | .show = amd64_hole_show, | ||
70 | .store = NULL, | ||
71 | }, | ||
72 | }; | ||
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index 8c171fa1cb9..303f10e03dd 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c | |||
@@ -1,10 +1,7 @@ | |||
1 | #include "amd64_edac.h" | 1 | #include "amd64_edac.h" |
2 | 2 | ||
3 | static ssize_t amd64_inject_section_show(struct device *dev, | 3 | static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) |
4 | struct device_attribute *mattr, | ||
5 | char *buf) | ||
6 | { | 4 | { |
7 | struct mem_ctl_info *mci = to_mci(dev); | ||
8 | struct amd64_pvt *pvt = mci->pvt_info; | 5 | struct amd64_pvt *pvt = mci->pvt_info; |
9 | return sprintf(buf, "0x%x\n", pvt->injection.section); | 6 | return sprintf(buf, "0x%x\n", pvt->injection.section); |
10 | } | 7 | } |
@@ -15,33 +12,29 @@ static ssize_t amd64_inject_section_show(struct device *dev, | |||
15 | * | 12 | * |
16 | * range: 0..3 | 13 | * range: 0..3 |
17 | */ | 14 | */ |
18 | static ssize_t amd64_inject_section_store(struct device *dev, | 15 | static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, |
19 | struct device_attribute *mattr, | ||
20 | const char *data, size_t count) | 16 | const char *data, size_t count) |
21 | { | 17 | { |
22 | struct mem_ctl_info *mci = to_mci(dev); | ||
23 | struct amd64_pvt *pvt = mci->pvt_info; | 18 | struct amd64_pvt *pvt = mci->pvt_info; |
24 | unsigned long value; | 19 | unsigned long value; |
25 | int ret; | 20 | int ret = 0; |
26 | 21 | ||
27 | ret = strict_strtoul(data, 10, &value); | 22 | ret = strict_strtoul(data, 10, &value); |
28 | if (ret < 0) | 23 | if (ret != -EINVAL) { |
29 | return ret; | ||
30 | 24 | ||
31 | if (value > 3) { | 25 | if (value > 3) { |
32 | amd64_warn("%s: invalid section 0x%lx\n", __func__, value); | 26 | amd64_warn("%s: invalid section 0x%lx\n", __func__, value); |
33 | return -EINVAL; | 27 | return -EINVAL; |
34 | } | 28 | } |
35 | 29 | ||
36 | pvt->injection.section = (u32) value; | 30 | pvt->injection.section = (u32) value; |
37 | return count; | 31 | return count; |
32 | } | ||
33 | return ret; | ||
38 | } | 34 | } |
39 | 35 | ||
40 | static ssize_t amd64_inject_word_show(struct device *dev, | 36 | static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) |
41 | struct device_attribute *mattr, | ||
42 | char *buf) | ||
43 | { | 37 | { |
44 | struct mem_ctl_info *mci = to_mci(dev); | ||
45 | struct amd64_pvt *pvt = mci->pvt_info; | 38 | struct amd64_pvt *pvt = mci->pvt_info; |
46 | return sprintf(buf, "0x%x\n", pvt->injection.word); | 39 | return sprintf(buf, "0x%x\n", pvt->injection.word); |
47 | } | 40 | } |
@@ -52,33 +45,29 @@ static ssize_t amd64_inject_word_show(struct device *dev, | |||
52 | * | 45 | * |
53 | * range: 0..8 | 46 | * range: 0..8 |
54 | */ | 47 | */ |
55 | static ssize_t amd64_inject_word_store(struct device *dev, | 48 | static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, |
56 | struct device_attribute *mattr, | 49 | const char *data, size_t count) |
57 | const char *data, size_t count) | ||
58 | { | 50 | { |
59 | struct mem_ctl_info *mci = to_mci(dev); | ||
60 | struct amd64_pvt *pvt = mci->pvt_info; | 51 | struct amd64_pvt *pvt = mci->pvt_info; |
61 | unsigned long value; | 52 | unsigned long value; |
62 | int ret; | 53 | int ret = 0; |
63 | 54 | ||
64 | ret = strict_strtoul(data, 10, &value); | 55 | ret = strict_strtoul(data, 10, &value); |
65 | if (ret < 0) | 56 | if (ret != -EINVAL) { |
66 | return ret; | ||
67 | 57 | ||
68 | if (value > 8) { | 58 | if (value > 8) { |
69 | amd64_warn("%s: invalid word 0x%lx\n", __func__, value); | 59 | amd64_warn("%s: invalid word 0x%lx\n", __func__, value); |
70 | return -EINVAL; | 60 | return -EINVAL; |
71 | } | 61 | } |
72 | 62 | ||
73 | pvt->injection.word = (u32) value; | 63 | pvt->injection.word = (u32) value; |
74 | return count; | 64 | return count; |
65 | } | ||
66 | return ret; | ||
75 | } | 67 | } |
76 | 68 | ||
77 | static ssize_t amd64_inject_ecc_vector_show(struct device *dev, | 69 | static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) |
78 | struct device_attribute *mattr, | ||
79 | char *buf) | ||
80 | { | 70 | { |
81 | struct mem_ctl_info *mci = to_mci(dev); | ||
82 | struct amd64_pvt *pvt = mci->pvt_info; | 71 | struct amd64_pvt *pvt = mci->pvt_info; |
83 | return sprintf(buf, "0x%x\n", pvt->injection.bit_map); | 72 | return sprintf(buf, "0x%x\n", pvt->injection.bit_map); |
84 | } | 73 | } |
@@ -88,154 +77,137 @@ static ssize_t amd64_inject_ecc_vector_show(struct device *dev, | |||
88 | * corresponding bit within the error injection word above. When used during a | 77 | * corresponding bit within the error injection word above. When used during a |
89 | * DRAM ECC read, it holds the contents of the of the DRAM ECC bits. | 78 | * DRAM ECC read, it holds the contents of the of the DRAM ECC bits. |
90 | */ | 79 | */ |
91 | static ssize_t amd64_inject_ecc_vector_store(struct device *dev, | 80 | static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, |
92 | struct device_attribute *mattr, | 81 | const char *data, size_t count) |
93 | const char *data, size_t count) | ||
94 | { | 82 | { |
95 | struct mem_ctl_info *mci = to_mci(dev); | ||
96 | struct amd64_pvt *pvt = mci->pvt_info; | 83 | struct amd64_pvt *pvt = mci->pvt_info; |
97 | unsigned long value; | 84 | unsigned long value; |
98 | int ret; | 85 | int ret = 0; |
99 | 86 | ||
100 | ret = strict_strtoul(data, 16, &value); | 87 | ret = strict_strtoul(data, 16, &value); |
101 | if (ret < 0) | 88 | if (ret != -EINVAL) { |
102 | return ret; | ||
103 | 89 | ||
104 | if (value & 0xFFFF0000) { | 90 | if (value & 0xFFFF0000) { |
105 | amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value); | 91 | amd64_warn("%s: invalid EccVector: 0x%lx\n", |
106 | return -EINVAL; | 92 | __func__, value); |
107 | } | 93 | return -EINVAL; |
94 | } | ||
108 | 95 | ||
109 | pvt->injection.bit_map = (u32) value; | 96 | pvt->injection.bit_map = (u32) value; |
110 | return count; | 97 | return count; |
98 | } | ||
99 | return ret; | ||
111 | } | 100 | } |
112 | 101 | ||
113 | /* | 102 | /* |
114 | * Do a DRAM ECC read. Assemble staged values in the pvt area, format into | 103 | * Do a DRAM ECC read. Assemble staged values in the pvt area, format into |
115 | * fields needed by the injection registers and read the NB Array Data Port. | 104 | * fields needed by the injection registers and read the NB Array Data Port. |
116 | */ | 105 | */ |
117 | static ssize_t amd64_inject_read_store(struct device *dev, | 106 | static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, |
118 | struct device_attribute *mattr, | 107 | const char *data, size_t count) |
119 | const char *data, size_t count) | ||
120 | { | 108 | { |
121 | struct mem_ctl_info *mci = to_mci(dev); | ||
122 | struct amd64_pvt *pvt = mci->pvt_info; | 109 | struct amd64_pvt *pvt = mci->pvt_info; |
123 | unsigned long value; | 110 | unsigned long value; |
124 | u32 section, word_bits; | 111 | u32 section, word_bits; |
125 | int ret; | 112 | int ret = 0; |
126 | 113 | ||
127 | ret = strict_strtoul(data, 10, &value); | 114 | ret = strict_strtoul(data, 10, &value); |
128 | if (ret < 0) | 115 | if (ret != -EINVAL) { |
129 | return ret; | ||
130 | |||
131 | /* Form value to choose 16-byte section of cacheline */ | ||
132 | section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); | ||
133 | 116 | ||
134 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); | 117 | /* Form value to choose 16-byte section of cacheline */ |
118 | section = F10_NB_ARRAY_DRAM_ECC | | ||
119 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); | ||
120 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); | ||
135 | 121 | ||
136 | word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection); | 122 | word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, |
123 | pvt->injection.bit_map); | ||
137 | 124 | ||
138 | /* Issue 'word' and 'bit' along with the READ request */ | 125 | /* Issue 'word' and 'bit' along with the READ request */ |
139 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); | 126 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); |
140 | 127 | ||
141 | edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits); | 128 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); |
142 | 129 | ||
143 | return count; | 130 | return count; |
131 | } | ||
132 | return ret; | ||
144 | } | 133 | } |
145 | 134 | ||
146 | /* | 135 | /* |
147 | * Do a DRAM ECC write. Assemble staged values in the pvt area and format into | 136 | * Do a DRAM ECC write. Assemble staged values in the pvt area and format into |
148 | * fields needed by the injection registers. | 137 | * fields needed by the injection registers. |
149 | */ | 138 | */ |
150 | static ssize_t amd64_inject_write_store(struct device *dev, | 139 | static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, |
151 | struct device_attribute *mattr, | ||
152 | const char *data, size_t count) | 140 | const char *data, size_t count) |
153 | { | 141 | { |
154 | struct mem_ctl_info *mci = to_mci(dev); | ||
155 | struct amd64_pvt *pvt = mci->pvt_info; | 142 | struct amd64_pvt *pvt = mci->pvt_info; |
156 | u32 section, word_bits, tmp; | ||
157 | unsigned long value; | 143 | unsigned long value; |
158 | int ret; | 144 | u32 section, word_bits; |
145 | int ret = 0; | ||
159 | 146 | ||
160 | ret = strict_strtoul(data, 10, &value); | 147 | ret = strict_strtoul(data, 10, &value); |
161 | if (ret < 0) | 148 | if (ret != -EINVAL) { |
162 | return ret; | ||
163 | |||
164 | /* Form value to choose 16-byte section of cacheline */ | ||
165 | section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); | ||
166 | 149 | ||
167 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); | 150 | /* Form value to choose 16-byte section of cacheline */ |
151 | section = F10_NB_ARRAY_DRAM_ECC | | ||
152 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); | ||
153 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); | ||
168 | 154 | ||
169 | word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection); | 155 | word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, |
156 | pvt->injection.bit_map); | ||
170 | 157 | ||
171 | pr_notice_once("Don't forget to decrease MCE polling interval in\n" | 158 | /* Issue 'word' and 'bit' along with the READ request */ |
172 | "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n" | 159 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); |
173 | "so that you can get the error report faster.\n"); | ||
174 | 160 | ||
175 | on_each_cpu(disable_caches, NULL, 1); | 161 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); |
176 | 162 | ||
177 | /* Issue 'word' and 'bit' along with the READ request */ | 163 | return count; |
178 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); | ||
179 | |||
180 | retry: | ||
181 | /* wait until injection happens */ | ||
182 | amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp); | ||
183 | if (tmp & F10_NB_ARR_ECC_WR_REQ) { | ||
184 | cpu_relax(); | ||
185 | goto retry; | ||
186 | } | 164 | } |
187 | 165 | return ret; | |
188 | on_each_cpu(enable_caches, NULL, 1); | ||
189 | |||
190 | edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits); | ||
191 | |||
192 | return count; | ||
193 | } | 166 | } |
194 | 167 | ||
195 | /* | 168 | /* |
196 | * update NUM_INJ_ATTRS in case you add new members | 169 | * update NUM_INJ_ATTRS in case you add new members |
197 | */ | 170 | */ |
198 | 171 | struct mcidev_sysfs_attribute amd64_inj_attrs[] = { | |
199 | static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR, | 172 | |
200 | amd64_inject_section_show, amd64_inject_section_store); | 173 | { |
201 | static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR, | 174 | .attr = { |
202 | amd64_inject_word_show, amd64_inject_word_store); | 175 | .name = "inject_section", |
203 | static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR, | 176 | .mode = (S_IRUGO | S_IWUSR) |
204 | amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store); | 177 | }, |
205 | static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR, | 178 | .show = amd64_inject_section_show, |
206 | NULL, amd64_inject_write_store); | 179 | .store = amd64_inject_section_store, |
207 | static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR, | 180 | }, |
208 | NULL, amd64_inject_read_store); | 181 | { |
209 | 182 | .attr = { | |
210 | 183 | .name = "inject_word", | |
211 | int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci) | 184 | .mode = (S_IRUGO | S_IWUSR) |
212 | { | 185 | }, |
213 | int rc; | 186 | .show = amd64_inject_word_show, |
214 | 187 | .store = amd64_inject_word_store, | |
215 | rc = device_create_file(&mci->dev, &dev_attr_inject_section); | 188 | }, |
216 | if (rc < 0) | 189 | { |
217 | return rc; | 190 | .attr = { |
218 | rc = device_create_file(&mci->dev, &dev_attr_inject_word); | 191 | .name = "inject_ecc_vector", |
219 | if (rc < 0) | 192 | .mode = (S_IRUGO | S_IWUSR) |
220 | return rc; | 193 | }, |
221 | rc = device_create_file(&mci->dev, &dev_attr_inject_ecc_vector); | 194 | .show = amd64_inject_ecc_vector_show, |
222 | if (rc < 0) | 195 | .store = amd64_inject_ecc_vector_store, |
223 | return rc; | 196 | }, |
224 | rc = device_create_file(&mci->dev, &dev_attr_inject_write); | 197 | { |
225 | if (rc < 0) | 198 | .attr = { |
226 | return rc; | 199 | .name = "inject_write", |
227 | rc = device_create_file(&mci->dev, &dev_attr_inject_read); | 200 | .mode = (S_IRUGO | S_IWUSR) |
228 | if (rc < 0) | 201 | }, |
229 | return rc; | 202 | .show = NULL, |
230 | 203 | .store = amd64_inject_write_store, | |
231 | return 0; | 204 | }, |
232 | } | 205 | { |
233 | 206 | .attr = { | |
234 | void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci) | 207 | .name = "inject_read", |
235 | { | 208 | .mode = (S_IRUGO | S_IWUSR) |
236 | device_remove_file(&mci->dev, &dev_attr_inject_section); | 209 | }, |
237 | device_remove_file(&mci->dev, &dev_attr_inject_word); | 210 | .show = NULL, |
238 | device_remove_file(&mci->dev, &dev_attr_inject_ecc_vector); | 211 | .store = amd64_inject_read_store, |
239 | device_remove_file(&mci->dev, &dev_attr_inject_write); | 212 | }, |
240 | device_remove_file(&mci->dev, &dev_attr_inject_read); | 213 | }; |
241 | } | ||
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index 96e3ee3460a..e47e73bbbcc 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c | |||
@@ -29,6 +29,7 @@ | |||
29 | edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) | 29 | edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) |
30 | 30 | ||
31 | #define AMD76X_NR_CSROWS 8 | 31 | #define AMD76X_NR_CSROWS 8 |
32 | #define AMD76X_NR_CHANS 1 | ||
32 | #define AMD76X_NR_DIMMS 4 | 33 | #define AMD76X_NR_DIMMS 4 |
33 | 34 | ||
34 | /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ | 35 | /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ |
@@ -105,7 +106,7 @@ static void amd76x_get_error_info(struct mem_ctl_info *mci, | |||
105 | { | 106 | { |
106 | struct pci_dev *pdev; | 107 | struct pci_dev *pdev; |
107 | 108 | ||
108 | pdev = to_pci_dev(mci->pdev); | 109 | pdev = to_pci_dev(mci->dev); |
109 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, | 110 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, |
110 | &info->ecc_mode_status); | 111 | &info->ecc_mode_status); |
111 | 112 | ||
@@ -145,10 +146,8 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, | |||
145 | 146 | ||
146 | if (handle_errors) { | 147 | if (handle_errors) { |
147 | row = (info->ecc_mode_status >> 4) & 0xf; | 148 | row = (info->ecc_mode_status >> 4) & 0xf; |
148 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 149 | edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, |
149 | mci->csrows[row]->first_page, 0, 0, | 150 | row, mci->ctl_name); |
150 | row, 0, -1, | ||
151 | mci->ctl_name, ""); | ||
152 | } | 151 | } |
153 | } | 152 | } |
154 | 153 | ||
@@ -160,10 +159,8 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, | |||
160 | 159 | ||
161 | if (handle_errors) { | 160 | if (handle_errors) { |
162 | row = info->ecc_mode_status & 0xf; | 161 | row = info->ecc_mode_status & 0xf; |
163 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 162 | edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, |
164 | mci->csrows[row]->first_page, 0, 0, | 163 | 0, row, 0, mci->ctl_name); |
165 | row, 0, -1, | ||
166 | mci->ctl_name, ""); | ||
167 | } | 164 | } |
168 | } | 165 | } |
169 | 166 | ||
@@ -180,7 +177,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, | |||
180 | static void amd76x_check(struct mem_ctl_info *mci) | 177 | static void amd76x_check(struct mem_ctl_info *mci) |
181 | { | 178 | { |
182 | struct amd76x_error_info info; | 179 | struct amd76x_error_info info; |
183 | edac_dbg(3, "\n"); | 180 | debugf3("%s()\n", __func__); |
184 | amd76x_get_error_info(mci, &info); | 181 | amd76x_get_error_info(mci, &info); |
185 | amd76x_process_error_info(mci, &info, 1); | 182 | amd76x_process_error_info(mci, &info, 1); |
186 | } | 183 | } |
@@ -189,13 +186,11 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
189 | enum edac_type edac_mode) | 186 | enum edac_type edac_mode) |
190 | { | 187 | { |
191 | struct csrow_info *csrow; | 188 | struct csrow_info *csrow; |
192 | struct dimm_info *dimm; | ||
193 | u32 mba, mba_base, mba_mask, dms; | 189 | u32 mba, mba_base, mba_mask, dms; |
194 | int index; | 190 | int index; |
195 | 191 | ||
196 | for (index = 0; index < mci->nr_csrows; index++) { | 192 | for (index = 0; index < mci->nr_csrows; index++) { |
197 | csrow = mci->csrows[index]; | 193 | csrow = &mci->csrows[index]; |
198 | dimm = csrow->channels[0]->dimm; | ||
199 | 194 | ||
200 | /* find the DRAM Chip Select Base address and mask */ | 195 | /* find the DRAM Chip Select Base address and mask */ |
201 | pci_read_config_dword(pdev, | 196 | pci_read_config_dword(pdev, |
@@ -208,13 +203,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
208 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | 203 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; |
209 | pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); | 204 | pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); |
210 | csrow->first_page = mba_base >> PAGE_SHIFT; | 205 | csrow->first_page = mba_base >> PAGE_SHIFT; |
211 | dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | 206 | csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; |
212 | csrow->last_page = csrow->first_page + dimm->nr_pages - 1; | 207 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
213 | csrow->page_mask = mba_mask >> PAGE_SHIFT; | 208 | csrow->page_mask = mba_mask >> PAGE_SHIFT; |
214 | dimm->grain = dimm->nr_pages << PAGE_SHIFT; | 209 | csrow->grain = csrow->nr_pages << PAGE_SHIFT; |
215 | dimm->mtype = MEM_RDDR; | 210 | csrow->mtype = MEM_RDDR; |
216 | dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; | 211 | csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; |
217 | dimm->edac_mode = edac_mode; | 212 | csrow->edac_mode = edac_mode; |
218 | } | 213 | } |
219 | } | 214 | } |
220 | 215 | ||
@@ -235,29 +230,22 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
235 | EDAC_SECDED, | 230 | EDAC_SECDED, |
236 | EDAC_SECDED | 231 | EDAC_SECDED |
237 | }; | 232 | }; |
238 | struct mem_ctl_info *mci; | 233 | struct mem_ctl_info *mci = NULL; |
239 | struct edac_mc_layer layers[2]; | ||
240 | u32 ems; | 234 | u32 ems; |
241 | u32 ems_mode; | 235 | u32 ems_mode; |
242 | struct amd76x_error_info discard; | 236 | struct amd76x_error_info discard; |
243 | 237 | ||
244 | edac_dbg(0, "\n"); | 238 | debugf0("%s()\n", __func__); |
245 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); | 239 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); |
246 | ems_mode = (ems >> 10) & 0x3; | 240 | ems_mode = (ems >> 10) & 0x3; |
241 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0); | ||
247 | 242 | ||
248 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 243 | if (mci == NULL) { |
249 | layers[0].size = AMD76X_NR_CSROWS; | ||
250 | layers[0].is_virt_csrow = true; | ||
251 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
252 | layers[1].size = 1; | ||
253 | layers[1].is_virt_csrow = false; | ||
254 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); | ||
255 | |||
256 | if (mci == NULL) | ||
257 | return -ENOMEM; | 244 | return -ENOMEM; |
245 | } | ||
258 | 246 | ||
259 | edac_dbg(0, "mci = %p\n", mci); | 247 | debugf0("%s(): mci = %p\n", __func__, mci); |
260 | mci->pdev = &pdev->dev; | 248 | mci->dev = &pdev->dev; |
261 | mci->mtype_cap = MEM_FLAG_RDDR; | 249 | mci->mtype_cap = MEM_FLAG_RDDR; |
262 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 250 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
263 | mci->edac_cap = ems_mode ? | 251 | mci->edac_cap = ems_mode ? |
@@ -276,7 +264,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
276 | * type of memory controller. The ID is therefore hardcoded to 0. | 264 | * type of memory controller. The ID is therefore hardcoded to 0. |
277 | */ | 265 | */ |
278 | if (edac_mc_add_mc(mci)) { | 266 | if (edac_mc_add_mc(mci)) { |
279 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 267 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
280 | goto fail; | 268 | goto fail; |
281 | } | 269 | } |
282 | 270 | ||
@@ -292,7 +280,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
292 | } | 280 | } |
293 | 281 | ||
294 | /* get this far and it's successful */ | 282 | /* get this far and it's successful */ |
295 | edac_dbg(3, "success\n"); | 283 | debugf3("%s(): success\n", __func__); |
296 | return 0; | 284 | return 0; |
297 | 285 | ||
298 | fail: | 286 | fail: |
@@ -301,10 +289,10 @@ fail: | |||
301 | } | 289 | } |
302 | 290 | ||
303 | /* returns count (>= 0), or negative on error */ | 291 | /* returns count (>= 0), or negative on error */ |
304 | static int amd76x_init_one(struct pci_dev *pdev, | 292 | static int __devinit amd76x_init_one(struct pci_dev *pdev, |
305 | const struct pci_device_id *ent) | 293 | const struct pci_device_id *ent) |
306 | { | 294 | { |
307 | edac_dbg(0, "\n"); | 295 | debugf0("%s()\n", __func__); |
308 | 296 | ||
309 | /* don't need to call pci_enable_device() */ | 297 | /* don't need to call pci_enable_device() */ |
310 | return amd76x_probe1(pdev, ent->driver_data); | 298 | return amd76x_probe1(pdev, ent->driver_data); |
@@ -318,11 +306,11 @@ static int amd76x_init_one(struct pci_dev *pdev, | |||
318 | * structure for the device then delete the mci and free the | 306 | * structure for the device then delete the mci and free the |
319 | * resources. | 307 | * resources. |
320 | */ | 308 | */ |
321 | static void amd76x_remove_one(struct pci_dev *pdev) | 309 | static void __devexit amd76x_remove_one(struct pci_dev *pdev) |
322 | { | 310 | { |
323 | struct mem_ctl_info *mci; | 311 | struct mem_ctl_info *mci; |
324 | 312 | ||
325 | edac_dbg(0, "\n"); | 313 | debugf0("%s()\n", __func__); |
326 | 314 | ||
327 | if (amd76x_pci) | 315 | if (amd76x_pci) |
328 | edac_pci_release_generic_ctl(amd76x_pci); | 316 | edac_pci_release_generic_ctl(amd76x_pci); |
@@ -333,7 +321,7 @@ static void amd76x_remove_one(struct pci_dev *pdev) | |||
333 | edac_mc_free(mci); | 321 | edac_mc_free(mci); |
334 | } | 322 | } |
335 | 323 | ||
336 | static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = { | 324 | static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { |
337 | { | 325 | { |
338 | PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 326 | PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
339 | AMD762}, | 327 | AMD762}, |
@@ -350,7 +338,7 @@ MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); | |||
350 | static struct pci_driver amd76x_driver = { | 338 | static struct pci_driver amd76x_driver = { |
351 | .name = EDAC_MOD_STR, | 339 | .name = EDAC_MOD_STR, |
352 | .probe = amd76x_init_one, | 340 | .probe = amd76x_init_one, |
353 | .remove = amd76x_remove_one, | 341 | .remove = __devexit_p(amd76x_remove_one), |
354 | .id_table = amd76x_pci_tbl, | 342 | .id_table = amd76x_pci_tbl, |
355 | }; | 343 | }; |
356 | 344 | ||
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index c2eaf334b90..9a6a274e692 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c | |||
@@ -33,10 +33,10 @@ struct cell_edac_priv | |||
33 | static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) | 33 | static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) |
34 | { | 34 | { |
35 | struct cell_edac_priv *priv = mci->pvt_info; | 35 | struct cell_edac_priv *priv = mci->pvt_info; |
36 | struct csrow_info *csrow = mci->csrows[0]; | 36 | struct csrow_info *csrow = &mci->csrows[0]; |
37 | unsigned long address, pfn, offset, syndrome; | 37 | unsigned long address, pfn, offset, syndrome; |
38 | 38 | ||
39 | dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", | 39 | dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", |
40 | priv->node, chan, ar); | 40 | priv->node, chan, ar); |
41 | 41 | ||
42 | /* Address decoding is likely a bit bogus, to dbl check */ | 42 | /* Address decoding is likely a bit bogus, to dbl check */ |
@@ -48,18 +48,17 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) | |||
48 | syndrome = (ar & 0x000000001fe00000ul) >> 21; | 48 | syndrome = (ar & 0x000000001fe00000ul) >> 21; |
49 | 49 | ||
50 | /* TODO: Decoding of the error address */ | 50 | /* TODO: Decoding of the error address */ |
51 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 51 | edac_mc_handle_ce(mci, csrow->first_page + pfn, offset, |
52 | csrow->first_page + pfn, offset, syndrome, | 52 | syndrome, 0, chan, ""); |
53 | 0, chan, -1, "", ""); | ||
54 | } | 53 | } |
55 | 54 | ||
56 | static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) | 55 | static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) |
57 | { | 56 | { |
58 | struct cell_edac_priv *priv = mci->pvt_info; | 57 | struct cell_edac_priv *priv = mci->pvt_info; |
59 | struct csrow_info *csrow = mci->csrows[0]; | 58 | struct csrow_info *csrow = &mci->csrows[0]; |
60 | unsigned long address, pfn, offset; | 59 | unsigned long address, pfn, offset; |
61 | 60 | ||
62 | dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", | 61 | dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", |
63 | priv->node, chan, ar); | 62 | priv->node, chan, ar); |
64 | 63 | ||
65 | /* Address decoding is likely a bit bogus, to dbl check */ | 64 | /* Address decoding is likely a bit bogus, to dbl check */ |
@@ -70,9 +69,7 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) | |||
70 | offset = address & ~PAGE_MASK; | 69 | offset = address & ~PAGE_MASK; |
71 | 70 | ||
72 | /* TODO: Decoding of the error address */ | 71 | /* TODO: Decoding of the error address */ |
73 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 72 | edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, ""); |
74 | csrow->first_page + pfn, offset, 0, | ||
75 | 0, chan, -1, "", ""); | ||
76 | } | 73 | } |
77 | 74 | ||
78 | static void cell_edac_check(struct mem_ctl_info *mci) | 75 | static void cell_edac_check(struct mem_ctl_info *mci) |
@@ -83,7 +80,7 @@ static void cell_edac_check(struct mem_ctl_info *mci) | |||
83 | fir = in_be64(&priv->regs->mic_fir); | 80 | fir = in_be64(&priv->regs->mic_fir); |
84 | #ifdef DEBUG | 81 | #ifdef DEBUG |
85 | if (fir != priv->prev_fir) { | 82 | if (fir != priv->prev_fir) { |
86 | dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir); | 83 | dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir); |
87 | priv->prev_fir = fir; | 84 | priv->prev_fir = fir; |
88 | } | 85 | } |
89 | #endif | 86 | #endif |
@@ -119,19 +116,16 @@ static void cell_edac_check(struct mem_ctl_info *mci) | |||
119 | mb(); /* sync up */ | 116 | mb(); /* sync up */ |
120 | #ifdef DEBUG | 117 | #ifdef DEBUG |
121 | fir = in_be64(&priv->regs->mic_fir); | 118 | fir = in_be64(&priv->regs->mic_fir); |
122 | dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir); | 119 | dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir); |
123 | #endif | 120 | #endif |
124 | } | 121 | } |
125 | } | 122 | } |
126 | 123 | ||
127 | static void cell_edac_init_csrows(struct mem_ctl_info *mci) | 124 | static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) |
128 | { | 125 | { |
129 | struct csrow_info *csrow = mci->csrows[0]; | 126 | struct csrow_info *csrow = &mci->csrows[0]; |
130 | struct dimm_info *dimm; | ||
131 | struct cell_edac_priv *priv = mci->pvt_info; | 127 | struct cell_edac_priv *priv = mci->pvt_info; |
132 | struct device_node *np; | 128 | struct device_node *np; |
133 | int j; | ||
134 | u32 nr_pages; | ||
135 | 129 | ||
136 | for (np = NULL; | 130 | for (np = NULL; |
137 | (np = of_find_node_by_name(np, "memory")) != NULL;) { | 131 | (np = of_find_node_by_name(np, "memory")) != NULL;) { |
@@ -146,32 +140,26 @@ static void cell_edac_init_csrows(struct mem_ctl_info *mci) | |||
146 | if (of_node_to_nid(np) != priv->node) | 140 | if (of_node_to_nid(np) != priv->node) |
147 | continue; | 141 | continue; |
148 | csrow->first_page = r.start >> PAGE_SHIFT; | 142 | csrow->first_page = r.start >> PAGE_SHIFT; |
149 | nr_pages = resource_size(&r) >> PAGE_SHIFT; | 143 | csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT; |
150 | csrow->last_page = csrow->first_page + nr_pages - 1; | 144 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
151 | 145 | csrow->mtype = MEM_XDR; | |
152 | for (j = 0; j < csrow->nr_channels; j++) { | 146 | csrow->edac_mode = EDAC_SECDED; |
153 | dimm = csrow->channels[j]->dimm; | 147 | dev_dbg(mci->dev, |
154 | dimm->mtype = MEM_XDR; | ||
155 | dimm->edac_mode = EDAC_SECDED; | ||
156 | dimm->nr_pages = nr_pages / csrow->nr_channels; | ||
157 | } | ||
158 | dev_dbg(mci->pdev, | ||
159 | "Initialized on node %d, chanmask=0x%x," | 148 | "Initialized on node %d, chanmask=0x%x," |
160 | " first_page=0x%lx, nr_pages=0x%x\n", | 149 | " first_page=0x%lx, nr_pages=0x%x\n", |
161 | priv->node, priv->chanmask, | 150 | priv->node, priv->chanmask, |
162 | csrow->first_page, nr_pages); | 151 | csrow->first_page, csrow->nr_pages); |
163 | break; | 152 | break; |
164 | } | 153 | } |
165 | } | 154 | } |
166 | 155 | ||
167 | static int cell_edac_probe(struct platform_device *pdev) | 156 | static int __devinit cell_edac_probe(struct platform_device *pdev) |
168 | { | 157 | { |
169 | struct cbe_mic_tm_regs __iomem *regs; | 158 | struct cbe_mic_tm_regs __iomem *regs; |
170 | struct mem_ctl_info *mci; | 159 | struct mem_ctl_info *mci; |
171 | struct edac_mc_layer layers[2]; | ||
172 | struct cell_edac_priv *priv; | 160 | struct cell_edac_priv *priv; |
173 | u64 reg; | 161 | u64 reg; |
174 | int rc, chanmask, num_chans; | 162 | int rc, chanmask; |
175 | 163 | ||
176 | regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id)); | 164 | regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id)); |
177 | if (regs == NULL) | 165 | if (regs == NULL) |
@@ -196,23 +184,15 @@ static int cell_edac_probe(struct platform_device *pdev) | |||
196 | in_be64(®s->mic_fir)); | 184 | in_be64(®s->mic_fir)); |
197 | 185 | ||
198 | /* Allocate & init EDAC MC data structure */ | 186 | /* Allocate & init EDAC MC data structure */ |
199 | num_chans = chanmask == 3 ? 2 : 1; | 187 | mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1, |
200 | 188 | chanmask == 3 ? 2 : 1, pdev->id); | |
201 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | ||
202 | layers[0].size = 1; | ||
203 | layers[0].is_virt_csrow = true; | ||
204 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
205 | layers[1].size = num_chans; | ||
206 | layers[1].is_virt_csrow = false; | ||
207 | mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, | ||
208 | sizeof(struct cell_edac_priv)); | ||
209 | if (mci == NULL) | 189 | if (mci == NULL) |
210 | return -ENOMEM; | 190 | return -ENOMEM; |
211 | priv = mci->pvt_info; | 191 | priv = mci->pvt_info; |
212 | priv->regs = regs; | 192 | priv->regs = regs; |
213 | priv->node = pdev->id; | 193 | priv->node = pdev->id; |
214 | priv->chanmask = chanmask; | 194 | priv->chanmask = chanmask; |
215 | mci->pdev = &pdev->dev; | 195 | mci->dev = &pdev->dev; |
216 | mci->mtype_cap = MEM_FLAG_XDR; | 196 | mci->mtype_cap = MEM_FLAG_XDR; |
217 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 197 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
218 | mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 198 | mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
@@ -233,7 +213,7 @@ static int cell_edac_probe(struct platform_device *pdev) | |||
233 | return 0; | 213 | return 0; |
234 | } | 214 | } |
235 | 215 | ||
236 | static int cell_edac_remove(struct platform_device *pdev) | 216 | static int __devexit cell_edac_remove(struct platform_device *pdev) |
237 | { | 217 | { |
238 | struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); | 218 | struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); |
239 | if (mci) | 219 | if (mci) |
@@ -247,7 +227,7 @@ static struct platform_driver cell_edac_driver = { | |||
247 | .owner = THIS_MODULE, | 227 | .owner = THIS_MODULE, |
248 | }, | 228 | }, |
249 | .probe = cell_edac_probe, | 229 | .probe = cell_edac_probe, |
250 | .remove = cell_edac_remove, | 230 | .remove = __devexit_p(cell_edac_remove), |
251 | }; | 231 | }; |
252 | 232 | ||
253 | static int __init cell_edac_init(void) | 233 | static int __init cell_edac_init(void) |
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index 7f3c57113ba..a687a0d1696 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c | |||
@@ -90,7 +90,6 @@ enum apimask_bits { | |||
90 | ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | | 90 | ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | |
91 | APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), | 91 | APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), |
92 | }; | 92 | }; |
93 | #define APIMASK_ADI(n) CPC925_BIT(((n)+1)) | ||
94 | 93 | ||
95 | /************************************************************ | 94 | /************************************************************ |
96 | * Processor Interface Exception Register (APIEXCP) | 95 | * Processor Interface Exception Register (APIEXCP) |
@@ -316,23 +315,22 @@ static void get_total_mem(struct cpc925_mc_pdata *pdata) | |||
316 | reg += aw; | 315 | reg += aw; |
317 | size = of_read_number(reg, sw); | 316 | size = of_read_number(reg, sw); |
318 | reg += sw; | 317 | reg += sw; |
319 | edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size); | 318 | debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, |
319 | start, size); | ||
320 | pdata->total_mem += size; | 320 | pdata->total_mem += size; |
321 | } while (reg < reg_end); | 321 | } while (reg < reg_end); |
322 | 322 | ||
323 | of_node_put(np); | 323 | of_node_put(np); |
324 | edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem); | 324 | debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); |
325 | } | 325 | } |
326 | 326 | ||
327 | static void cpc925_init_csrows(struct mem_ctl_info *mci) | 327 | static void cpc925_init_csrows(struct mem_ctl_info *mci) |
328 | { | 328 | { |
329 | struct cpc925_mc_pdata *pdata = mci->pvt_info; | 329 | struct cpc925_mc_pdata *pdata = mci->pvt_info; |
330 | struct csrow_info *csrow; | 330 | struct csrow_info *csrow; |
331 | struct dimm_info *dimm; | 331 | int index; |
332 | enum dev_type dtype; | 332 | u32 mbmr, mbbar, bba; |
333 | int index, j; | 333 | unsigned long row_size, last_nr_pages = 0; |
334 | u32 mbmr, mbbar, bba, grain; | ||
335 | unsigned long row_size, nr_pages, last_nr_pages = 0; | ||
336 | 334 | ||
337 | get_total_mem(pdata); | 335 | get_total_mem(pdata); |
338 | 336 | ||
@@ -347,44 +345,40 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci) | |||
347 | if (bba == 0) | 345 | if (bba == 0) |
348 | continue; /* not populated */ | 346 | continue; /* not populated */ |
349 | 347 | ||
350 | csrow = mci->csrows[index]; | 348 | csrow = &mci->csrows[index]; |
351 | 349 | ||
352 | row_size = bba * (1UL << 28); /* 256M */ | 350 | row_size = bba * (1UL << 28); /* 256M */ |
353 | csrow->first_page = last_nr_pages; | 351 | csrow->first_page = last_nr_pages; |
354 | nr_pages = row_size >> PAGE_SHIFT; | 352 | csrow->nr_pages = row_size >> PAGE_SHIFT; |
355 | csrow->last_page = csrow->first_page + nr_pages - 1; | 353 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
356 | last_nr_pages = csrow->last_page + 1; | 354 | last_nr_pages = csrow->last_page + 1; |
357 | 355 | ||
356 | csrow->mtype = MEM_RDDR; | ||
357 | csrow->edac_mode = EDAC_SECDED; | ||
358 | |||
358 | switch (csrow->nr_channels) { | 359 | switch (csrow->nr_channels) { |
359 | case 1: /* Single channel */ | 360 | case 1: /* Single channel */ |
360 | grain = 32; /* four-beat burst of 32 bytes */ | 361 | csrow->grain = 32; /* four-beat burst of 32 bytes */ |
361 | break; | 362 | break; |
362 | case 2: /* Dual channel */ | 363 | case 2: /* Dual channel */ |
363 | default: | 364 | default: |
364 | grain = 64; /* four-beat burst of 64 bytes */ | 365 | csrow->grain = 64; /* four-beat burst of 64 bytes */ |
365 | break; | 366 | break; |
366 | } | 367 | } |
368 | |||
367 | switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { | 369 | switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { |
368 | case 6: /* 0110, no way to differentiate X8 VS X16 */ | 370 | case 6: /* 0110, no way to differentiate X8 VS X16 */ |
369 | case 5: /* 0101 */ | 371 | case 5: /* 0101 */ |
370 | case 8: /* 1000 */ | 372 | case 8: /* 1000 */ |
371 | dtype = DEV_X16; | 373 | csrow->dtype = DEV_X16; |
372 | break; | 374 | break; |
373 | case 7: /* 0111 */ | 375 | case 7: /* 0111 */ |
374 | case 9: /* 1001 */ | 376 | case 9: /* 1001 */ |
375 | dtype = DEV_X8; | 377 | csrow->dtype = DEV_X8; |
376 | break; | 378 | break; |
377 | default: | 379 | default: |
378 | dtype = DEV_UNKNOWN; | 380 | csrow->dtype = DEV_UNKNOWN; |
379 | break; | 381 | break; |
380 | } | ||
381 | for (j = 0; j < csrow->nr_channels; j++) { | ||
382 | dimm = csrow->channels[j]->dimm; | ||
383 | dimm->nr_pages = nr_pages / csrow->nr_channels; | ||
384 | dimm->mtype = MEM_RDDR; | ||
385 | dimm->edac_mode = EDAC_SECDED; | ||
386 | dimm->grain = grain; | ||
387 | dimm->dtype = dtype; | ||
388 | } | 382 | } |
389 | } | 383 | } |
390 | } | 384 | } |
@@ -462,7 +456,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, | |||
462 | *csrow = rank; | 456 | *csrow = rank; |
463 | 457 | ||
464 | #ifdef CONFIG_EDAC_DEBUG | 458 | #ifdef CONFIG_EDAC_DEBUG |
465 | if (mci->csrows[rank]->first_page == 0) { | 459 | if (mci->csrows[rank].first_page == 0) { |
466 | cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " | 460 | cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " |
467 | "non-populated csrow, broken hardware?\n"); | 461 | "non-populated csrow, broken hardware?\n"); |
468 | return; | 462 | return; |
@@ -470,7 +464,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, | |||
470 | #endif | 464 | #endif |
471 | 465 | ||
472 | /* Revert csrow number */ | 466 | /* Revert csrow number */ |
473 | pa = mci->csrows[rank]->first_page << PAGE_SHIFT; | 467 | pa = mci->csrows[rank].first_page << PAGE_SHIFT; |
474 | 468 | ||
475 | /* Revert column address */ | 469 | /* Revert column address */ |
476 | col += bcnt; | 470 | col += bcnt; |
@@ -511,7 +505,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, | |||
511 | *offset = pa & (PAGE_SIZE - 1); | 505 | *offset = pa & (PAGE_SIZE - 1); |
512 | *pfn = pa >> PAGE_SHIFT; | 506 | *pfn = pa >> PAGE_SHIFT; |
513 | 507 | ||
514 | edac_dbg(0, "ECC physical address 0x%lx\n", pa); | 508 | debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); |
515 | } | 509 | } |
516 | 510 | ||
517 | static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) | 511 | static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) |
@@ -554,18 +548,13 @@ static void cpc925_mc_check(struct mem_ctl_info *mci) | |||
554 | if (apiexcp & CECC_EXCP_DETECTED) { | 548 | if (apiexcp & CECC_EXCP_DETECTED) { |
555 | cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); | 549 | cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); |
556 | channel = cpc925_mc_find_channel(mci, syndrome); | 550 | channel = cpc925_mc_find_channel(mci, syndrome); |
557 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 551 | edac_mc_handle_ce(mci, pfn, offset, syndrome, |
558 | pfn, offset, syndrome, | 552 | csrow, channel, mci->ctl_name); |
559 | csrow, channel, -1, | ||
560 | mci->ctl_name, ""); | ||
561 | } | 553 | } |
562 | 554 | ||
563 | if (apiexcp & UECC_EXCP_DETECTED) { | 555 | if (apiexcp & UECC_EXCP_DETECTED) { |
564 | cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); | 556 | cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); |
565 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 557 | edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name); |
566 | pfn, offset, 0, | ||
567 | csrow, -1, -1, | ||
568 | mci->ctl_name, ""); | ||
569 | } | 558 | } |
570 | 559 | ||
571 | cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); | 560 | cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); |
@@ -592,73 +581,16 @@ static void cpc925_mc_check(struct mem_ctl_info *mci) | |||
592 | } | 581 | } |
593 | 582 | ||
594 | /******************** CPU err device********************************/ | 583 | /******************** CPU err device********************************/ |
595 | static u32 cpc925_cpu_mask_disabled(void) | ||
596 | { | ||
597 | struct device_node *cpus; | ||
598 | struct device_node *cpunode = NULL; | ||
599 | static u32 mask = 0; | ||
600 | |||
601 | /* use cached value if available */ | ||
602 | if (mask != 0) | ||
603 | return mask; | ||
604 | |||
605 | mask = APIMASK_ADI0 | APIMASK_ADI1; | ||
606 | |||
607 | cpus = of_find_node_by_path("/cpus"); | ||
608 | if (cpus == NULL) { | ||
609 | cpc925_printk(KERN_DEBUG, "No /cpus node !\n"); | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) { | ||
614 | const u32 *reg = of_get_property(cpunode, "reg", NULL); | ||
615 | |||
616 | if (strcmp(cpunode->type, "cpu")) { | ||
617 | cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name); | ||
618 | continue; | ||
619 | } | ||
620 | |||
621 | if (reg == NULL || *reg > 2) { | ||
622 | cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name); | ||
623 | continue; | ||
624 | } | ||
625 | |||
626 | mask &= ~APIMASK_ADI(*reg); | ||
627 | } | ||
628 | |||
629 | if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) { | ||
630 | /* We assume that each CPU sits on it's own PI and that | ||
631 | * for present CPUs the reg property equals to the PI | ||
632 | * interface id */ | ||
633 | cpc925_printk(KERN_WARNING, | ||
634 | "Assuming PI id is equal to CPU MPIC id!\n"); | ||
635 | } | ||
636 | |||
637 | of_node_put(cpunode); | ||
638 | of_node_put(cpus); | ||
639 | |||
640 | return mask; | ||
641 | } | ||
642 | |||
643 | /* Enable CPU Errors detection */ | 584 | /* Enable CPU Errors detection */ |
644 | static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) | 585 | static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) |
645 | { | 586 | { |
646 | u32 apimask; | 587 | u32 apimask; |
647 | u32 cpumask; | ||
648 | 588 | ||
649 | apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); | 589 | apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); |
650 | 590 | if ((apimask & CPU_MASK_ENABLE) == 0) { | |
651 | cpumask = cpc925_cpu_mask_disabled(); | ||
652 | if (apimask & cpumask) { | ||
653 | cpc925_printk(KERN_WARNING, "CPU(s) not present, " | ||
654 | "but enabled in APIMASK, disabling\n"); | ||
655 | apimask &= ~cpumask; | ||
656 | } | ||
657 | |||
658 | if ((apimask & CPU_MASK_ENABLE) == 0) | ||
659 | apimask |= CPU_MASK_ENABLE; | 591 | apimask |= CPU_MASK_ENABLE; |
660 | 592 | __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); | |
661 | __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); | 593 | } |
662 | } | 594 | } |
663 | 595 | ||
664 | /* Disable CPU Errors detection */ | 596 | /* Disable CPU Errors detection */ |
@@ -690,9 +622,6 @@ static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) | |||
690 | if ((apiexcp & CPU_EXCP_DETECTED) == 0) | 622 | if ((apiexcp & CPU_EXCP_DETECTED) == 0) |
691 | return; | 623 | return; |
692 | 624 | ||
693 | if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0) | ||
694 | return; | ||
695 | |||
696 | apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); | 625 | apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); |
697 | cpc925_printk(KERN_INFO, "Processor Interface Fault\n" | 626 | cpc925_printk(KERN_INFO, "Processor Interface Fault\n" |
698 | "Processor Interface register dump:\n"); | 627 | "Processor Interface register dump:\n"); |
@@ -851,8 +780,8 @@ static void cpc925_add_edac_devices(void __iomem *vbase) | |||
851 | goto err2; | 780 | goto err2; |
852 | } | 781 | } |
853 | 782 | ||
854 | edac_dbg(0, "Successfully added edac device for %s\n", | 783 | debugf0("%s: Successfully added edac device for %s\n", |
855 | dev_info->ctl_name); | 784 | __func__, dev_info->ctl_name); |
856 | 785 | ||
857 | continue; | 786 | continue; |
858 | 787 | ||
@@ -883,8 +812,8 @@ static void cpc925_del_edac_devices(void) | |||
883 | if (dev_info->exit) | 812 | if (dev_info->exit) |
884 | dev_info->exit(dev_info); | 813 | dev_info->exit(dev_info); |
885 | 814 | ||
886 | edac_dbg(0, "Successfully deleted edac device for %s\n", | 815 | debugf0("%s: Successfully deleted edac device for %s\n", |
887 | dev_info->ctl_name); | 816 | __func__, dev_info->ctl_name); |
888 | } | 817 | } |
889 | } | 818 | } |
890 | 819 | ||
@@ -899,7 +828,7 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) | |||
899 | mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); | 828 | mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); |
900 | si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; | 829 | si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; |
901 | 830 | ||
902 | edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr); | 831 | debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); |
903 | 832 | ||
904 | if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || | 833 | if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || |
905 | (si == 0)) { | 834 | (si == 0)) { |
@@ -927,22 +856,22 @@ static int cpc925_mc_get_channels(void __iomem *vbase) | |||
927 | ((mbcr & MBCR_64BITBUS_MASK) == 0)) | 856 | ((mbcr & MBCR_64BITBUS_MASK) == 0)) |
928 | dual = 1; | 857 | dual = 1; |
929 | 858 | ||
930 | edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single"); | 859 | debugf0("%s: %s channel\n", __func__, |
860 | (dual > 0) ? "Dual" : "Single"); | ||
931 | 861 | ||
932 | return dual; | 862 | return dual; |
933 | } | 863 | } |
934 | 864 | ||
935 | static int cpc925_probe(struct platform_device *pdev) | 865 | static int __devinit cpc925_probe(struct platform_device *pdev) |
936 | { | 866 | { |
937 | static int edac_mc_idx; | 867 | static int edac_mc_idx; |
938 | struct mem_ctl_info *mci; | 868 | struct mem_ctl_info *mci; |
939 | struct edac_mc_layer layers[2]; | ||
940 | void __iomem *vbase; | 869 | void __iomem *vbase; |
941 | struct cpc925_mc_pdata *pdata; | 870 | struct cpc925_mc_pdata *pdata; |
942 | struct resource *r; | 871 | struct resource *r; |
943 | int res = 0, nr_channels; | 872 | int res = 0, nr_channels; |
944 | 873 | ||
945 | edac_dbg(0, "%s platform device found!\n", pdev->name); | 874 | debugf0("%s: %s platform device found!\n", __func__, pdev->name); |
946 | 875 | ||
947 | if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { | 876 | if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { |
948 | res = -ENOMEM; | 877 | res = -ENOMEM; |
@@ -972,16 +901,9 @@ static int cpc925_probe(struct platform_device *pdev) | |||
972 | goto err2; | 901 | goto err2; |
973 | } | 902 | } |
974 | 903 | ||
975 | nr_channels = cpc925_mc_get_channels(vbase) + 1; | 904 | nr_channels = cpc925_mc_get_channels(vbase); |
976 | 905 | mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata), | |
977 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 906 | CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx); |
978 | layers[0].size = CPC925_NR_CSROWS; | ||
979 | layers[0].is_virt_csrow = true; | ||
980 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
981 | layers[1].size = nr_channels; | ||
982 | layers[1].is_virt_csrow = false; | ||
983 | mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, | ||
984 | sizeof(struct cpc925_mc_pdata)); | ||
985 | if (!mci) { | 907 | if (!mci) { |
986 | cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); | 908 | cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); |
987 | res = -ENOMEM; | 909 | res = -ENOMEM; |
@@ -993,7 +915,7 @@ static int cpc925_probe(struct platform_device *pdev) | |||
993 | pdata->edac_idx = edac_mc_idx++; | 915 | pdata->edac_idx = edac_mc_idx++; |
994 | pdata->name = pdev->name; | 916 | pdata->name = pdev->name; |
995 | 917 | ||
996 | mci->pdev = &pdev->dev; | 918 | mci->dev = &pdev->dev; |
997 | platform_set_drvdata(pdev, mci); | 919 | platform_set_drvdata(pdev, mci); |
998 | mci->dev_name = dev_name(&pdev->dev); | 920 | mci->dev_name = dev_name(&pdev->dev); |
999 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; | 921 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; |
@@ -1024,7 +946,7 @@ static int cpc925_probe(struct platform_device *pdev) | |||
1024 | cpc925_add_edac_devices(vbase); | 946 | cpc925_add_edac_devices(vbase); |
1025 | 947 | ||
1026 | /* get this far and it's successful */ | 948 | /* get this far and it's successful */ |
1027 | edac_dbg(0, "success\n"); | 949 | debugf0("%s: success\n", __func__); |
1028 | 950 | ||
1029 | res = 0; | 951 | res = 0; |
1030 | goto out; | 952 | goto out; |
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 644fec54681..1af531a11d2 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -4,11 +4,7 @@ | |||
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * Implement support for the e7520, E7525, e7320 and i3100 memory controllers. | 7 | * See "enum e752x_chips" below for supported chipsets |
8 | * | ||
9 | * Datasheets: | ||
10 | * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html | ||
11 | * ftp://download.intel.com/design/intarch/datashts/31345803.pdf | ||
12 | * | 8 | * |
13 | * Written by Tom Zimmerman | 9 | * Written by Tom Zimmerman |
14 | * | 10 | * |
@@ -17,6 +13,8 @@ | |||
17 | * Wang Zhenyu at intel.com | 13 | * Wang Zhenyu at intel.com |
18 | * Dave Jiang at mvista.com | 14 | * Dave Jiang at mvista.com |
19 | * | 15 | * |
16 | * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ | ||
17 | * | ||
20 | */ | 18 | */ |
21 | 19 | ||
22 | #include <linux/module.h> | 20 | #include <linux/module.h> |
@@ -189,25 +187,6 @@ enum e752x_chips { | |||
189 | I3100 = 3 | 187 | I3100 = 3 |
190 | }; | 188 | }; |
191 | 189 | ||
192 | /* | ||
193 | * Those chips Support single-rank and dual-rank memories only. | ||
194 | * | ||
195 | * On e752x chips, the odd rows are present only on dual-rank memories. | ||
196 | * Dividing the rank by two will provide the dimm# | ||
197 | * | ||
198 | * i3100 MC has a different mapping: it supports only 4 ranks. | ||
199 | * | ||
200 | * The mapping is (from 1 to n): | ||
201 | * slot single-ranked double-ranked | ||
202 | * dimm #1 -> rank #4 NA | ||
203 | * dimm #2 -> rank #3 NA | ||
204 | * dimm #3 -> rank #2 Ranks 2 and 3 | ||
205 | * dimm #4 -> rank $1 Ranks 1 and 4 | ||
206 | * | ||
207 | * FIXME: The current mapping for i3100 considers that it supports up to 8 | ||
208 | * ranks/chanel, but datasheet says that the MC supports only 4 ranks. | ||
209 | */ | ||
210 | |||
211 | struct e752x_pvt { | 190 | struct e752x_pvt { |
212 | struct pci_dev *bridge_ck; | 191 | struct pci_dev *bridge_ck; |
213 | struct pci_dev *dev_d0f0; | 192 | struct pci_dev *dev_d0f0; |
@@ -309,7 +288,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | |||
309 | u32 remap; | 288 | u32 remap; |
310 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; | 289 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; |
311 | 290 | ||
312 | edac_dbg(3, "\n"); | 291 | debugf3("%s()\n", __func__); |
313 | 292 | ||
314 | if (page < pvt->tolm) | 293 | if (page < pvt->tolm) |
315 | return page; | 294 | return page; |
@@ -335,7 +314,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, | |||
335 | int i; | 314 | int i; |
336 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; | 315 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; |
337 | 316 | ||
338 | edac_dbg(3, "\n"); | 317 | debugf3("%s()\n", __func__); |
339 | 318 | ||
340 | /* convert the addr to 4k page */ | 319 | /* convert the addr to 4k page */ |
341 | page = sec1_add >> (PAGE_SHIFT - 4); | 320 | page = sec1_add >> (PAGE_SHIFT - 4); |
@@ -371,10 +350,8 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, | |||
371 | channel = !(error_one & 1); | 350 | channel = !(error_one & 1); |
372 | 351 | ||
373 | /* e752x mc reads 34:6 of the DRAM linear address */ | 352 | /* e752x mc reads 34:6 of the DRAM linear address */ |
374 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 353 | edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4), |
375 | page, offset_in_page(sec1_add << 4), sec1_syndrome, | 354 | sec1_syndrome, row, channel, "e752x CE"); |
376 | row, channel, -1, | ||
377 | "e752x CE", ""); | ||
378 | } | 355 | } |
379 | 356 | ||
380 | static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, | 357 | static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, |
@@ -394,7 +371,7 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, | |||
394 | int row; | 371 | int row; |
395 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; | 372 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; |
396 | 373 | ||
397 | edac_dbg(3, "\n"); | 374 | debugf3("%s()\n", __func__); |
398 | 375 | ||
399 | if (error_one & 0x0202) { | 376 | if (error_one & 0x0202) { |
400 | error_2b = ded_add; | 377 | error_2b = ded_add; |
@@ -408,12 +385,9 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, | |||
408 | edac_mc_find_csrow_by_page(mci, block_page); | 385 | edac_mc_find_csrow_by_page(mci, block_page); |
409 | 386 | ||
410 | /* e752x mc reads 34:6 of the DRAM linear address */ | 387 | /* e752x mc reads 34:6 of the DRAM linear address */ |
411 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 388 | edac_mc_handle_ue(mci, block_page, |
412 | block_page, | 389 | offset_in_page(error_2b << 4), |
413 | offset_in_page(error_2b << 4), 0, | 390 | row, "e752x UE from Read"); |
414 | row, -1, -1, | ||
415 | "e752x UE from Read", ""); | ||
416 | |||
417 | } | 391 | } |
418 | if (error_one & 0x0404) { | 392 | if (error_one & 0x0404) { |
419 | error_2b = scrb_add; | 393 | error_2b = scrb_add; |
@@ -427,11 +401,9 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, | |||
427 | edac_mc_find_csrow_by_page(mci, block_page); | 401 | edac_mc_find_csrow_by_page(mci, block_page); |
428 | 402 | ||
429 | /* e752x mc reads 34:6 of the DRAM linear address */ | 403 | /* e752x mc reads 34:6 of the DRAM linear address */ |
430 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 404 | edac_mc_handle_ue(mci, block_page, |
431 | block_page, | 405 | offset_in_page(error_2b << 4), |
432 | offset_in_page(error_2b << 4), 0, | 406 | row, "e752x UE from Scruber"); |
433 | row, -1, -1, | ||
434 | "e752x UE from Scruber", ""); | ||
435 | } | 407 | } |
436 | } | 408 | } |
437 | 409 | ||
@@ -453,10 +425,8 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, | |||
453 | if (!handle_error) | 425 | if (!handle_error) |
454 | return; | 426 | return; |
455 | 427 | ||
456 | edac_dbg(3, "\n"); | 428 | debugf3("%s()\n", __func__); |
457 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 429 | edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); |
458 | -1, -1, -1, | ||
459 | "e752x UE log memory write", ""); | ||
460 | } | 430 | } |
461 | 431 | ||
462 | static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, | 432 | static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, |
@@ -982,7 +952,7 @@ static void e752x_check(struct mem_ctl_info *mci) | |||
982 | { | 952 | { |
983 | struct e752x_error_info info; | 953 | struct e752x_error_info info; |
984 | 954 | ||
985 | edac_dbg(3, "\n"); | 955 | debugf3("%s()\n", __func__); |
986 | e752x_get_error_info(mci, &info); | 956 | e752x_get_error_info(mci, &info); |
987 | e752x_process_error_info(mci, &info, 1); | 957 | e752x_process_error_info(mci, &info, 1); |
988 | } | 958 | } |
@@ -1069,13 +1039,12 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
1069 | u16 ddrcsr) | 1039 | u16 ddrcsr) |
1070 | { | 1040 | { |
1071 | struct csrow_info *csrow; | 1041 | struct csrow_info *csrow; |
1072 | enum edac_type edac_mode; | ||
1073 | unsigned long last_cumul_size; | 1042 | unsigned long last_cumul_size; |
1074 | int index, mem_dev, drc_chan; | 1043 | int index, mem_dev, drc_chan; |
1075 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ | 1044 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ |
1076 | int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ | 1045 | int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ |
1077 | u8 value; | 1046 | u8 value; |
1078 | u32 dra, drc, cumul_size, i, nr_pages; | 1047 | u32 dra, drc, cumul_size; |
1079 | 1048 | ||
1080 | dra = 0; | 1049 | dra = 0; |
1081 | for (index = 0; index < 4; index++) { | 1050 | for (index = 0; index < 4; index++) { |
@@ -1084,7 +1053,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
1084 | dra |= dra_reg << (index * 8); | 1053 | dra |= dra_reg << (index * 8); |
1085 | } | 1054 | } |
1086 | pci_read_config_dword(pdev, E752X_DRC, &drc); | 1055 | pci_read_config_dword(pdev, E752X_DRC, &drc); |
1087 | drc_chan = dual_channel_active(ddrcsr) ? 1 : 0; | 1056 | drc_chan = dual_channel_active(ddrcsr); |
1088 | drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ | 1057 | drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ |
1089 | drc_ddim = (drc >> 20) & 0x3; | 1058 | drc_ddim = (drc >> 20) & 0x3; |
1090 | 1059 | ||
@@ -1096,45 +1065,39 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
1096 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | 1065 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { |
1097 | /* mem_dev 0=x8, 1=x4 */ | 1066 | /* mem_dev 0=x8, 1=x4 */ |
1098 | mem_dev = (dra >> (index * 4 + 2)) & 0x3; | 1067 | mem_dev = (dra >> (index * 4 + 2)) & 0x3; |
1099 | csrow = mci->csrows[remap_csrow_index(mci, index)]; | 1068 | csrow = &mci->csrows[remap_csrow_index(mci, index)]; |
1100 | 1069 | ||
1101 | mem_dev = (mem_dev == 2); | 1070 | mem_dev = (mem_dev == 2); |
1102 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | 1071 | pci_read_config_byte(pdev, E752X_DRB + index, &value); |
1103 | /* convert a 128 or 64 MiB DRB to a page size. */ | 1072 | /* convert a 128 or 64 MiB DRB to a page size. */ |
1104 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | 1073 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
1105 | edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); | 1074 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
1075 | cumul_size); | ||
1106 | if (cumul_size == last_cumul_size) | 1076 | if (cumul_size == last_cumul_size) |
1107 | continue; /* not populated */ | 1077 | continue; /* not populated */ |
1108 | 1078 | ||
1109 | csrow->first_page = last_cumul_size; | 1079 | csrow->first_page = last_cumul_size; |
1110 | csrow->last_page = cumul_size - 1; | 1080 | csrow->last_page = cumul_size - 1; |
1111 | nr_pages = cumul_size - last_cumul_size; | 1081 | csrow->nr_pages = cumul_size - last_cumul_size; |
1112 | last_cumul_size = cumul_size; | 1082 | last_cumul_size = cumul_size; |
1083 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
1084 | csrow->mtype = MEM_RDDR; /* only one type supported */ | ||
1085 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
1113 | 1086 | ||
1114 | /* | 1087 | /* |
1115 | * if single channel or x8 devices then SECDED | 1088 | * if single channel or x8 devices then SECDED |
1116 | * if dual channel and x4 then S4ECD4ED | 1089 | * if dual channel and x4 then S4ECD4ED |
1117 | */ | 1090 | */ |
1118 | if (drc_ddim) { | 1091 | if (drc_ddim) { |
1119 | if (drc_chan && mem_dev) { | 1092 | if (drc_chan && mem_dev) { |
1120 | edac_mode = EDAC_S4ECD4ED; | 1093 | csrow->edac_mode = EDAC_S4ECD4ED; |
1121 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | 1094 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; |
1122 | } else { | 1095 | } else { |
1123 | edac_mode = EDAC_SECDED; | 1096 | csrow->edac_mode = EDAC_SECDED; |
1124 | mci->edac_cap |= EDAC_FLAG_SECDED; | 1097 | mci->edac_cap |= EDAC_FLAG_SECDED; |
1125 | } | 1098 | } |
1126 | } else | 1099 | } else |
1127 | edac_mode = EDAC_NONE; | 1100 | csrow->edac_mode = EDAC_NONE; |
1128 | for (i = 0; i < csrow->nr_channels; i++) { | ||
1129 | struct dimm_info *dimm = csrow->channels[i]->dimm; | ||
1130 | |||
1131 | edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i); | ||
1132 | dimm->nr_pages = nr_pages / csrow->nr_channels; | ||
1133 | dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
1134 | dimm->mtype = MEM_RDDR; /* only one type supported */ | ||
1135 | dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
1136 | dimm->edac_mode = edac_mode; | ||
1137 | } | ||
1138 | } | 1101 | } |
1139 | } | 1102 | } |
1140 | 1103 | ||
@@ -1263,14 +1226,13 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1263 | u16 pci_data; | 1226 | u16 pci_data; |
1264 | u8 stat8; | 1227 | u8 stat8; |
1265 | struct mem_ctl_info *mci; | 1228 | struct mem_ctl_info *mci; |
1266 | struct edac_mc_layer layers[2]; | ||
1267 | struct e752x_pvt *pvt; | 1229 | struct e752x_pvt *pvt; |
1268 | u16 ddrcsr; | 1230 | u16 ddrcsr; |
1269 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ | 1231 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ |
1270 | struct e752x_error_info discard; | 1232 | struct e752x_error_info discard; |
1271 | 1233 | ||
1272 | edac_dbg(0, "mci\n"); | 1234 | debugf0("%s(): mci\n", __func__); |
1273 | edac_dbg(0, "Starting Probe1\n"); | 1235 | debugf0("Starting Probe1\n"); |
1274 | 1236 | ||
1275 | /* check to see if device 0 function 1 is enabled; if it isn't, we | 1237 | /* check to see if device 0 function 1 is enabled; if it isn't, we |
1276 | * assume the BIOS has reserved it for a reason and is expecting | 1238 | * assume the BIOS has reserved it for a reason and is expecting |
@@ -1290,17 +1252,13 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1290 | /* Dual channel = 1, Single channel = 0 */ | 1252 | /* Dual channel = 1, Single channel = 0 */ |
1291 | drc_chan = dual_channel_active(ddrcsr); | 1253 | drc_chan = dual_channel_active(ddrcsr); |
1292 | 1254 | ||
1293 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 1255 | mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0); |
1294 | layers[0].size = E752X_NR_CSROWS; | 1256 | |
1295 | layers[0].is_virt_csrow = true; | 1257 | if (mci == NULL) { |
1296 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
1297 | layers[1].size = drc_chan + 1; | ||
1298 | layers[1].is_virt_csrow = false; | ||
1299 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | ||
1300 | if (mci == NULL) | ||
1301 | return -ENOMEM; | 1258 | return -ENOMEM; |
1259 | } | ||
1302 | 1260 | ||
1303 | edac_dbg(3, "init mci\n"); | 1261 | debugf3("%s(): init mci\n", __func__); |
1304 | mci->mtype_cap = MEM_FLAG_RDDR; | 1262 | mci->mtype_cap = MEM_FLAG_RDDR; |
1305 | /* 3100 IMCH supports SECDEC only */ | 1263 | /* 3100 IMCH supports SECDEC only */ |
1306 | mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : | 1264 | mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : |
@@ -1308,9 +1266,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1308 | /* FIXME - what if different memory types are in different csrows? */ | 1266 | /* FIXME - what if different memory types are in different csrows? */ |
1309 | mci->mod_name = EDAC_MOD_STR; | 1267 | mci->mod_name = EDAC_MOD_STR; |
1310 | mci->mod_ver = E752X_REVISION; | 1268 | mci->mod_ver = E752X_REVISION; |
1311 | mci->pdev = &pdev->dev; | 1269 | mci->dev = &pdev->dev; |
1312 | 1270 | ||
1313 | edac_dbg(3, "init pvt\n"); | 1271 | debugf3("%s(): init pvt\n", __func__); |
1314 | pvt = (struct e752x_pvt *)mci->pvt_info; | 1272 | pvt = (struct e752x_pvt *)mci->pvt_info; |
1315 | pvt->dev_info = &e752x_devs[dev_idx]; | 1273 | pvt->dev_info = &e752x_devs[dev_idx]; |
1316 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); | 1274 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); |
@@ -1320,7 +1278,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1320 | return -ENODEV; | 1278 | return -ENODEV; |
1321 | } | 1279 | } |
1322 | 1280 | ||
1323 | edac_dbg(3, "more mci init\n"); | 1281 | debugf3("%s(): more mci init\n", __func__); |
1324 | mci->ctl_name = pvt->dev_info->ctl_name; | 1282 | mci->ctl_name = pvt->dev_info->ctl_name; |
1325 | mci->dev_name = pci_name(pdev); | 1283 | mci->dev_name = pci_name(pdev); |
1326 | mci->edac_check = e752x_check; | 1284 | mci->edac_check = e752x_check; |
@@ -1342,7 +1300,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1342 | mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ | 1300 | mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ |
1343 | else | 1301 | else |
1344 | mci->edac_cap |= EDAC_FLAG_NONE; | 1302 | mci->edac_cap |= EDAC_FLAG_NONE; |
1345 | edac_dbg(3, "tolm, remapbase, remaplimit\n"); | 1303 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
1346 | 1304 | ||
1347 | /* load the top of low memory, remap base, and remap limit vars */ | 1305 | /* load the top of low memory, remap base, and remap limit vars */ |
1348 | pci_read_config_word(pdev, E752X_TOLM, &pci_data); | 1306 | pci_read_config_word(pdev, E752X_TOLM, &pci_data); |
@@ -1359,7 +1317,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1359 | * type of memory controller. The ID is therefore hardcoded to 0. | 1317 | * type of memory controller. The ID is therefore hardcoded to 0. |
1360 | */ | 1318 | */ |
1361 | if (edac_mc_add_mc(mci)) { | 1319 | if (edac_mc_add_mc(mci)) { |
1362 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 1320 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
1363 | goto fail; | 1321 | goto fail; |
1364 | } | 1322 | } |
1365 | 1323 | ||
@@ -1377,7 +1335,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1377 | } | 1335 | } |
1378 | 1336 | ||
1379 | /* get this far and it's successful */ | 1337 | /* get this far and it's successful */ |
1380 | edac_dbg(3, "success\n"); | 1338 | debugf3("%s(): success\n", __func__); |
1381 | return 0; | 1339 | return 0; |
1382 | 1340 | ||
1383 | fail: | 1341 | fail: |
@@ -1390,9 +1348,10 @@ fail: | |||
1390 | } | 1348 | } |
1391 | 1349 | ||
1392 | /* returns count (>= 0), or negative on error */ | 1350 | /* returns count (>= 0), or negative on error */ |
1393 | static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1351 | static int __devinit e752x_init_one(struct pci_dev *pdev, |
1352 | const struct pci_device_id *ent) | ||
1394 | { | 1353 | { |
1395 | edac_dbg(0, "\n"); | 1354 | debugf0("%s()\n", __func__); |
1396 | 1355 | ||
1397 | /* wake up and enable device */ | 1356 | /* wake up and enable device */ |
1398 | if (pci_enable_device(pdev) < 0) | 1357 | if (pci_enable_device(pdev) < 0) |
@@ -1401,12 +1360,12 @@ static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1401 | return e752x_probe1(pdev, ent->driver_data); | 1360 | return e752x_probe1(pdev, ent->driver_data); |
1402 | } | 1361 | } |
1403 | 1362 | ||
1404 | static void e752x_remove_one(struct pci_dev *pdev) | 1363 | static void __devexit e752x_remove_one(struct pci_dev *pdev) |
1405 | { | 1364 | { |
1406 | struct mem_ctl_info *mci; | 1365 | struct mem_ctl_info *mci; |
1407 | struct e752x_pvt *pvt; | 1366 | struct e752x_pvt *pvt; |
1408 | 1367 | ||
1409 | edac_dbg(0, "\n"); | 1368 | debugf0("%s()\n", __func__); |
1410 | 1369 | ||
1411 | if (e752x_pci) | 1370 | if (e752x_pci) |
1412 | edac_pci_release_generic_ctl(e752x_pci); | 1371 | edac_pci_release_generic_ctl(e752x_pci); |
@@ -1421,7 +1380,7 @@ static void e752x_remove_one(struct pci_dev *pdev) | |||
1421 | edac_mc_free(mci); | 1380 | edac_mc_free(mci); |
1422 | } | 1381 | } |
1423 | 1382 | ||
1424 | static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = { | 1383 | static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { |
1425 | { | 1384 | { |
1426 | PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 1385 | PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1427 | E7520}, | 1386 | E7520}, |
@@ -1444,7 +1403,7 @@ MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); | |||
1444 | static struct pci_driver e752x_driver = { | 1403 | static struct pci_driver e752x_driver = { |
1445 | .name = EDAC_MOD_STR, | 1404 | .name = EDAC_MOD_STR, |
1446 | .probe = e752x_init_one, | 1405 | .probe = e752x_init_one, |
1447 | .remove = e752x_remove_one, | 1406 | .remove = __devexit_p(e752x_remove_one), |
1448 | .id_table = e752x_pci_tbl, | 1407 | .id_table = e752x_pci_tbl, |
1449 | }; | 1408 | }; |
1450 | 1409 | ||
@@ -1452,7 +1411,7 @@ static int __init e752x_init(void) | |||
1452 | { | 1411 | { |
1453 | int pci_rc; | 1412 | int pci_rc; |
1454 | 1413 | ||
1455 | edac_dbg(3, "\n"); | 1414 | debugf3("%s()\n", __func__); |
1456 | 1415 | ||
1457 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 1416 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
1458 | opstate_init(); | 1417 | opstate_init(); |
@@ -1463,7 +1422,7 @@ static int __init e752x_init(void) | |||
1463 | 1422 | ||
1464 | static void __exit e752x_exit(void) | 1423 | static void __exit e752x_exit(void) |
1465 | { | 1424 | { |
1466 | edac_dbg(3, "\n"); | 1425 | debugf3("%s()\n", __func__); |
1467 | pci_unregister_driver(&e752x_driver); | 1426 | pci_unregister_driver(&e752x_driver); |
1468 | } | 1427 | } |
1469 | 1428 | ||
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 1c4056a5038..6ffb6d23281 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c | |||
@@ -10,9 +10,6 @@ | |||
10 | * Based on work by Dan Hollis <goemon at anime dot net> and others. | 10 | * Based on work by Dan Hollis <goemon at anime dot net> and others. |
11 | * http://www.anime.net/~goemon/linux-ecc/ | 11 | * http://www.anime.net/~goemon/linux-ecc/ |
12 | * | 12 | * |
13 | * Datasheet: | ||
14 | * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html | ||
15 | * | ||
16 | * Contributors: | 13 | * Contributors: |
17 | * Eric Biederman (Linux Networx) | 14 | * Eric Biederman (Linux Networx) |
18 | * Tom Zimmerman (Linux Networx) | 15 | * Tom Zimmerman (Linux Networx) |
@@ -74,7 +71,7 @@ | |||
74 | #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ | 71 | #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ |
75 | 72 | ||
76 | #define E7XXX_NR_CSROWS 8 /* number of csrows */ | 73 | #define E7XXX_NR_CSROWS 8 /* number of csrows */ |
77 | #define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */ | 74 | #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ |
78 | 75 | ||
79 | /* E7XXX register addresses - device 0 function 0 */ | 76 | /* E7XXX register addresses - device 0 function 0 */ |
80 | #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ | 77 | #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ |
@@ -166,7 +163,7 @@ static const struct e7xxx_dev_info e7xxx_devs[] = { | |||
166 | /* FIXME - is this valid for both SECDED and S4ECD4ED? */ | 163 | /* FIXME - is this valid for both SECDED and S4ECD4ED? */ |
167 | static inline int e7xxx_find_channel(u16 syndrome) | 164 | static inline int e7xxx_find_channel(u16 syndrome) |
168 | { | 165 | { |
169 | edac_dbg(3, "\n"); | 166 | debugf3("%s()\n", __func__); |
170 | 167 | ||
171 | if ((syndrome & 0xff00) == 0) | 168 | if ((syndrome & 0xff00) == 0) |
172 | return 0; | 169 | return 0; |
@@ -186,7 +183,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | |||
186 | u32 remap; | 183 | u32 remap; |
187 | struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; | 184 | struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; |
188 | 185 | ||
189 | edac_dbg(3, "\n"); | 186 | debugf3("%s()\n", __func__); |
190 | 187 | ||
191 | if ((page < pvt->tolm) || | 188 | if ((page < pvt->tolm) || |
192 | ((page >= 0x100000) && (page < pvt->remapbase))) | 189 | ((page >= 0x100000) && (page < pvt->remapbase))) |
@@ -208,7 +205,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) | |||
208 | int row; | 205 | int row; |
209 | int channel; | 206 | int channel; |
210 | 207 | ||
211 | edac_dbg(3, "\n"); | 208 | debugf3("%s()\n", __func__); |
212 | /* read the error address */ | 209 | /* read the error address */ |
213 | error_1b = info->dram_celog_add; | 210 | error_1b = info->dram_celog_add; |
214 | /* FIXME - should use PAGE_SHIFT */ | 211 | /* FIXME - should use PAGE_SHIFT */ |
@@ -219,15 +216,13 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) | |||
219 | row = edac_mc_find_csrow_by_page(mci, page); | 216 | row = edac_mc_find_csrow_by_page(mci, page); |
220 | /* convert syndrome to channel */ | 217 | /* convert syndrome to channel */ |
221 | channel = e7xxx_find_channel(syndrome); | 218 | channel = e7xxx_find_channel(syndrome); |
222 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome, | 219 | edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); |
223 | row, channel, -1, "e7xxx CE", ""); | ||
224 | } | 220 | } |
225 | 221 | ||
226 | static void process_ce_no_info(struct mem_ctl_info *mci) | 222 | static void process_ce_no_info(struct mem_ctl_info *mci) |
227 | { | 223 | { |
228 | edac_dbg(3, "\n"); | 224 | debugf3("%s()\n", __func__); |
229 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, | 225 | edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); |
230 | "e7xxx CE log register overflow", ""); | ||
231 | } | 226 | } |
232 | 227 | ||
233 | static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) | 228 | static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) |
@@ -235,23 +230,19 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) | |||
235 | u32 error_2b, block_page; | 230 | u32 error_2b, block_page; |
236 | int row; | 231 | int row; |
237 | 232 | ||
238 | edac_dbg(3, "\n"); | 233 | debugf3("%s()\n", __func__); |
239 | /* read the error address */ | 234 | /* read the error address */ |
240 | error_2b = info->dram_uelog_add; | 235 | error_2b = info->dram_uelog_add; |
241 | /* FIXME - should use PAGE_SHIFT */ | 236 | /* FIXME - should use PAGE_SHIFT */ |
242 | block_page = error_2b >> 6; /* convert to 4k address */ | 237 | block_page = error_2b >> 6; /* convert to 4k address */ |
243 | row = edac_mc_find_csrow_by_page(mci, block_page); | 238 | row = edac_mc_find_csrow_by_page(mci, block_page); |
244 | 239 | edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); | |
245 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, 0, 0, | ||
246 | row, -1, -1, "e7xxx UE", ""); | ||
247 | } | 240 | } |
248 | 241 | ||
249 | static void process_ue_no_info(struct mem_ctl_info *mci) | 242 | static void process_ue_no_info(struct mem_ctl_info *mci) |
250 | { | 243 | { |
251 | edac_dbg(3, "\n"); | 244 | debugf3("%s()\n", __func__); |
252 | 245 | edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); | |
253 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, | ||
254 | "e7xxx UE log register overflow", ""); | ||
255 | } | 246 | } |
256 | 247 | ||
257 | static void e7xxx_get_error_info(struct mem_ctl_info *mci, | 248 | static void e7xxx_get_error_info(struct mem_ctl_info *mci, |
@@ -334,7 +325,7 @@ static void e7xxx_check(struct mem_ctl_info *mci) | |||
334 | { | 325 | { |
335 | struct e7xxx_error_info info; | 326 | struct e7xxx_error_info info; |
336 | 327 | ||
337 | edac_dbg(3, "\n"); | 328 | debugf3("%s()\n", __func__); |
338 | e7xxx_get_error_info(mci, &info); | 329 | e7xxx_get_error_info(mci, &info); |
339 | e7xxx_process_error_info(mci, &info, 1); | 330 | e7xxx_process_error_info(mci, &info, 1); |
340 | } | 331 | } |
@@ -356,13 +347,11 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
356 | int dev_idx, u32 drc) | 347 | int dev_idx, u32 drc) |
357 | { | 348 | { |
358 | unsigned long last_cumul_size; | 349 | unsigned long last_cumul_size; |
359 | int index, j; | 350 | int index; |
360 | u8 value; | 351 | u8 value; |
361 | u32 dra, cumul_size, nr_pages; | 352 | u32 dra, cumul_size; |
362 | int drc_chan, drc_drbg, drc_ddim, mem_dev; | 353 | int drc_chan, drc_drbg, drc_ddim, mem_dev; |
363 | struct csrow_info *csrow; | 354 | struct csrow_info *csrow; |
364 | struct dimm_info *dimm; | ||
365 | enum edac_type edac_mode; | ||
366 | 355 | ||
367 | pci_read_config_dword(pdev, E7XXX_DRA, &dra); | 356 | pci_read_config_dword(pdev, E7XXX_DRA, &dra); |
368 | drc_chan = dual_channel_active(drc, dev_idx); | 357 | drc_chan = dual_channel_active(drc, dev_idx); |
@@ -378,44 +367,38 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
378 | for (index = 0; index < mci->nr_csrows; index++) { | 367 | for (index = 0; index < mci->nr_csrows; index++) { |
379 | /* mem_dev 0=x8, 1=x4 */ | 368 | /* mem_dev 0=x8, 1=x4 */ |
380 | mem_dev = (dra >> (index * 4 + 3)) & 0x1; | 369 | mem_dev = (dra >> (index * 4 + 3)) & 0x1; |
381 | csrow = mci->csrows[index]; | 370 | csrow = &mci->csrows[index]; |
382 | 371 | ||
383 | pci_read_config_byte(pdev, E7XXX_DRB + index, &value); | 372 | pci_read_config_byte(pdev, E7XXX_DRB + index, &value); |
384 | /* convert a 64 or 32 MiB DRB to a page size. */ | 373 | /* convert a 64 or 32 MiB DRB to a page size. */ |
385 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | 374 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
386 | edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); | 375 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
376 | cumul_size); | ||
387 | if (cumul_size == last_cumul_size) | 377 | if (cumul_size == last_cumul_size) |
388 | continue; /* not populated */ | 378 | continue; /* not populated */ |
389 | 379 | ||
390 | csrow->first_page = last_cumul_size; | 380 | csrow->first_page = last_cumul_size; |
391 | csrow->last_page = cumul_size - 1; | 381 | csrow->last_page = cumul_size - 1; |
392 | nr_pages = cumul_size - last_cumul_size; | 382 | csrow->nr_pages = cumul_size - last_cumul_size; |
393 | last_cumul_size = cumul_size; | 383 | last_cumul_size = cumul_size; |
384 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
385 | csrow->mtype = MEM_RDDR; /* only one type supported */ | ||
386 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
394 | 387 | ||
395 | /* | 388 | /* |
396 | * if single channel or x8 devices then SECDED | 389 | * if single channel or x8 devices then SECDED |
397 | * if dual channel and x4 then S4ECD4ED | 390 | * if dual channel and x4 then S4ECD4ED |
398 | */ | 391 | */ |
399 | if (drc_ddim) { | 392 | if (drc_ddim) { |
400 | if (drc_chan && mem_dev) { | 393 | if (drc_chan && mem_dev) { |
401 | edac_mode = EDAC_S4ECD4ED; | 394 | csrow->edac_mode = EDAC_S4ECD4ED; |
402 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | 395 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; |
403 | } else { | 396 | } else { |
404 | edac_mode = EDAC_SECDED; | 397 | csrow->edac_mode = EDAC_SECDED; |
405 | mci->edac_cap |= EDAC_FLAG_SECDED; | 398 | mci->edac_cap |= EDAC_FLAG_SECDED; |
406 | } | 399 | } |
407 | } else | 400 | } else |
408 | edac_mode = EDAC_NONE; | 401 | csrow->edac_mode = EDAC_NONE; |
409 | |||
410 | for (j = 0; j < drc_chan + 1; j++) { | ||
411 | dimm = csrow->channels[j]->dimm; | ||
412 | |||
413 | dimm->nr_pages = nr_pages / (drc_chan + 1); | ||
414 | dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
415 | dimm->mtype = MEM_RDDR; /* only one type supported */ | ||
416 | dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
417 | dimm->edac_mode = edac_mode; | ||
418 | } | ||
419 | } | 402 | } |
420 | } | 403 | } |
421 | 404 | ||
@@ -423,44 +406,30 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
423 | { | 406 | { |
424 | u16 pci_data; | 407 | u16 pci_data; |
425 | struct mem_ctl_info *mci = NULL; | 408 | struct mem_ctl_info *mci = NULL; |
426 | struct edac_mc_layer layers[2]; | ||
427 | struct e7xxx_pvt *pvt = NULL; | 409 | struct e7xxx_pvt *pvt = NULL; |
428 | u32 drc; | 410 | u32 drc; |
429 | int drc_chan; | 411 | int drc_chan; |
430 | struct e7xxx_error_info discard; | 412 | struct e7xxx_error_info discard; |
431 | 413 | ||
432 | edac_dbg(0, "mci\n"); | 414 | debugf0("%s(): mci\n", __func__); |
433 | 415 | ||
434 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | 416 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); |
435 | 417 | ||
436 | drc_chan = dual_channel_active(drc, dev_idx); | 418 | drc_chan = dual_channel_active(drc, dev_idx); |
437 | /* | 419 | mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0); |
438 | * According with the datasheet, this device has a maximum of | 420 | |
439 | * 4 DIMMS per channel, either single-rank or dual-rank. So, the | ||
440 | * total amount of dimms is 8 (E7XXX_NR_DIMMS). | ||
441 | * That means that the DIMM is mapped as CSROWs, and the channel | ||
442 | * will map the rank. So, an error to either channel should be | ||
443 | * attributed to the same dimm. | ||
444 | */ | ||
445 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | ||
446 | layers[0].size = E7XXX_NR_CSROWS; | ||
447 | layers[0].is_virt_csrow = true; | ||
448 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
449 | layers[1].size = drc_chan + 1; | ||
450 | layers[1].is_virt_csrow = false; | ||
451 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | ||
452 | if (mci == NULL) | 421 | if (mci == NULL) |
453 | return -ENOMEM; | 422 | return -ENOMEM; |
454 | 423 | ||
455 | edac_dbg(3, "init mci\n"); | 424 | debugf3("%s(): init mci\n", __func__); |
456 | mci->mtype_cap = MEM_FLAG_RDDR; | 425 | mci->mtype_cap = MEM_FLAG_RDDR; |
457 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | 426 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | |
458 | EDAC_FLAG_S4ECD4ED; | 427 | EDAC_FLAG_S4ECD4ED; |
459 | /* FIXME - what if different memory types are in different csrows? */ | 428 | /* FIXME - what if different memory types are in different csrows? */ |
460 | mci->mod_name = EDAC_MOD_STR; | 429 | mci->mod_name = EDAC_MOD_STR; |
461 | mci->mod_ver = E7XXX_REVISION; | 430 | mci->mod_ver = E7XXX_REVISION; |
462 | mci->pdev = &pdev->dev; | 431 | mci->dev = &pdev->dev; |
463 | edac_dbg(3, "init pvt\n"); | 432 | debugf3("%s(): init pvt\n", __func__); |
464 | pvt = (struct e7xxx_pvt *)mci->pvt_info; | 433 | pvt = (struct e7xxx_pvt *)mci->pvt_info; |
465 | pvt->dev_info = &e7xxx_devs[dev_idx]; | 434 | pvt->dev_info = &e7xxx_devs[dev_idx]; |
466 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | 435 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, |
@@ -473,14 +442,14 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
473 | goto fail0; | 442 | goto fail0; |
474 | } | 443 | } |
475 | 444 | ||
476 | edac_dbg(3, "more mci init\n"); | 445 | debugf3("%s(): more mci init\n", __func__); |
477 | mci->ctl_name = pvt->dev_info->ctl_name; | 446 | mci->ctl_name = pvt->dev_info->ctl_name; |
478 | mci->dev_name = pci_name(pdev); | 447 | mci->dev_name = pci_name(pdev); |
479 | mci->edac_check = e7xxx_check; | 448 | mci->edac_check = e7xxx_check; |
480 | mci->ctl_page_to_phys = ctl_page_to_phys; | 449 | mci->ctl_page_to_phys = ctl_page_to_phys; |
481 | e7xxx_init_csrows(mci, pdev, dev_idx, drc); | 450 | e7xxx_init_csrows(mci, pdev, dev_idx, drc); |
482 | mci->edac_cap |= EDAC_FLAG_NONE; | 451 | mci->edac_cap |= EDAC_FLAG_NONE; |
483 | edac_dbg(3, "tolm, remapbase, remaplimit\n"); | 452 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
484 | /* load the top of low memory, remap base, and remap limit vars */ | 453 | /* load the top of low memory, remap base, and remap limit vars */ |
485 | pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); | 454 | pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); |
486 | pvt->tolm = ((u32) pci_data) << 4; | 455 | pvt->tolm = ((u32) pci_data) << 4; |
@@ -499,7 +468,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
499 | * type of memory controller. The ID is therefore hardcoded to 0. | 468 | * type of memory controller. The ID is therefore hardcoded to 0. |
500 | */ | 469 | */ |
501 | if (edac_mc_add_mc(mci)) { | 470 | if (edac_mc_add_mc(mci)) { |
502 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 471 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
503 | goto fail1; | 472 | goto fail1; |
504 | } | 473 | } |
505 | 474 | ||
@@ -515,7 +484,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
515 | } | 484 | } |
516 | 485 | ||
517 | /* get this far and it's successful */ | 486 | /* get this far and it's successful */ |
518 | edac_dbg(3, "success\n"); | 487 | debugf3("%s(): success\n", __func__); |
519 | return 0; | 488 | return 0; |
520 | 489 | ||
521 | fail1: | 490 | fail1: |
@@ -528,21 +497,22 @@ fail0: | |||
528 | } | 497 | } |
529 | 498 | ||
530 | /* returns count (>= 0), or negative on error */ | 499 | /* returns count (>= 0), or negative on error */ |
531 | static int e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 500 | static int __devinit e7xxx_init_one(struct pci_dev *pdev, |
501 | const struct pci_device_id *ent) | ||
532 | { | 502 | { |
533 | edac_dbg(0, "\n"); | 503 | debugf0("%s()\n", __func__); |
534 | 504 | ||
535 | /* wake up and enable device */ | 505 | /* wake up and enable device */ |
536 | return pci_enable_device(pdev) ? | 506 | return pci_enable_device(pdev) ? |
537 | -EIO : e7xxx_probe1(pdev, ent->driver_data); | 507 | -EIO : e7xxx_probe1(pdev, ent->driver_data); |
538 | } | 508 | } |
539 | 509 | ||
540 | static void e7xxx_remove_one(struct pci_dev *pdev) | 510 | static void __devexit e7xxx_remove_one(struct pci_dev *pdev) |
541 | { | 511 | { |
542 | struct mem_ctl_info *mci; | 512 | struct mem_ctl_info *mci; |
543 | struct e7xxx_pvt *pvt; | 513 | struct e7xxx_pvt *pvt; |
544 | 514 | ||
545 | edac_dbg(0, "\n"); | 515 | debugf0("%s()\n", __func__); |
546 | 516 | ||
547 | if (e7xxx_pci) | 517 | if (e7xxx_pci) |
548 | edac_pci_release_generic_ctl(e7xxx_pci); | 518 | edac_pci_release_generic_ctl(e7xxx_pci); |
@@ -555,7 +525,7 @@ static void e7xxx_remove_one(struct pci_dev *pdev) | |||
555 | edac_mc_free(mci); | 525 | edac_mc_free(mci); |
556 | } | 526 | } |
557 | 527 | ||
558 | static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = { | 528 | static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { |
559 | { | 529 | { |
560 | PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 530 | PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
561 | E7205}, | 531 | E7205}, |
@@ -578,7 +548,7 @@ MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); | |||
578 | static struct pci_driver e7xxx_driver = { | 548 | static struct pci_driver e7xxx_driver = { |
579 | .name = EDAC_MOD_STR, | 549 | .name = EDAC_MOD_STR, |
580 | .probe = e7xxx_init_one, | 550 | .probe = e7xxx_init_one, |
581 | .remove = e7xxx_remove_one, | 551 | .remove = __devexit_p(e7xxx_remove_one), |
582 | .id_table = e7xxx_pci_tbl, | 552 | .id_table = e7xxx_pci_tbl, |
583 | }; | 553 | }; |
584 | 554 | ||
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 23bb99fa44f..55b8278bb17 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -32,11 +32,13 @@ | |||
32 | #include <linux/completion.h> | 32 | #include <linux/completion.h> |
33 | #include <linux/kobject.h> | 33 | #include <linux/kobject.h> |
34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
35 | #include <linux/sysdev.h> | ||
35 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
36 | #include <linux/edac.h> | ||
37 | 37 | ||
38 | #define EDAC_MC_LABEL_LEN 31 | ||
38 | #define EDAC_DEVICE_NAME_LEN 31 | 39 | #define EDAC_DEVICE_NAME_LEN 31 |
39 | #define EDAC_ATTRIB_VALUE_LEN 15 | 40 | #define EDAC_ATTRIB_VALUE_LEN 15 |
41 | #define MC_PROC_NAME_MAX_LEN 7 | ||
40 | 42 | ||
41 | #if PAGE_SHIFT < 20 | 43 | #if PAGE_SHIFT < 20 |
42 | #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) | 44 | #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) |
@@ -71,21 +73,26 @@ extern const char *edac_mem_types[]; | |||
71 | #ifdef CONFIG_EDAC_DEBUG | 73 | #ifdef CONFIG_EDAC_DEBUG |
72 | extern int edac_debug_level; | 74 | extern int edac_debug_level; |
73 | 75 | ||
74 | #define edac_dbg(level, fmt, ...) \ | 76 | #define edac_debug_printk(level, fmt, arg...) \ |
75 | do { \ | 77 | do { \ |
76 | if (level <= edac_debug_level) \ | 78 | if (level <= edac_debug_level) \ |
77 | edac_printk(KERN_DEBUG, EDAC_DEBUG, \ | 79 | edac_printk(KERN_DEBUG, EDAC_DEBUG, \ |
78 | "%s: " fmt, __func__, ##__VA_ARGS__); \ | 80 | "%s: " fmt, __func__, ##arg); \ |
79 | } while (0) | 81 | } while (0) |
82 | |||
83 | #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) | ||
84 | #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) | ||
85 | #define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) | ||
86 | #define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) | ||
87 | #define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) | ||
80 | 88 | ||
81 | #else /* !CONFIG_EDAC_DEBUG */ | 89 | #else /* !CONFIG_EDAC_DEBUG */ |
82 | 90 | ||
83 | #define edac_dbg(level, fmt, ...) \ | 91 | #define debugf0( ... ) |
84 | do { \ | 92 | #define debugf1( ... ) |
85 | if (0) \ | 93 | #define debugf2( ... ) |
86 | edac_printk(KERN_DEBUG, EDAC_DEBUG, \ | 94 | #define debugf3( ... ) |
87 | "%s: " fmt, __func__, ##__VA_ARGS__); \ | 95 | #define debugf4( ... ) |
88 | } while (0) | ||
89 | 96 | ||
90 | #endif /* !CONFIG_EDAC_DEBUG */ | 97 | #endif /* !CONFIG_EDAC_DEBUG */ |
91 | 98 | ||
@@ -94,6 +101,353 @@ do { \ | |||
94 | 101 | ||
95 | #define edac_dev_name(dev) (dev)->dev_name | 102 | #define edac_dev_name(dev) (dev)->dev_name |
96 | 103 | ||
104 | /* memory devices */ | ||
105 | enum dev_type { | ||
106 | DEV_UNKNOWN = 0, | ||
107 | DEV_X1, | ||
108 | DEV_X2, | ||
109 | DEV_X4, | ||
110 | DEV_X8, | ||
111 | DEV_X16, | ||
112 | DEV_X32, /* Do these parts exist? */ | ||
113 | DEV_X64 /* Do these parts exist? */ | ||
114 | }; | ||
115 | |||
116 | #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) | ||
117 | #define DEV_FLAG_X1 BIT(DEV_X1) | ||
118 | #define DEV_FLAG_X2 BIT(DEV_X2) | ||
119 | #define DEV_FLAG_X4 BIT(DEV_X4) | ||
120 | #define DEV_FLAG_X8 BIT(DEV_X8) | ||
121 | #define DEV_FLAG_X16 BIT(DEV_X16) | ||
122 | #define DEV_FLAG_X32 BIT(DEV_X32) | ||
123 | #define DEV_FLAG_X64 BIT(DEV_X64) | ||
124 | |||
125 | /* memory types */ | ||
126 | enum mem_type { | ||
127 | MEM_EMPTY = 0, /* Empty csrow */ | ||
128 | MEM_RESERVED, /* Reserved csrow type */ | ||
129 | MEM_UNKNOWN, /* Unknown csrow type */ | ||
130 | MEM_FPM, /* Fast page mode */ | ||
131 | MEM_EDO, /* Extended data out */ | ||
132 | MEM_BEDO, /* Burst Extended data out */ | ||
133 | MEM_SDR, /* Single data rate SDRAM */ | ||
134 | MEM_RDR, /* Registered single data rate SDRAM */ | ||
135 | MEM_DDR, /* Double data rate SDRAM */ | ||
136 | MEM_RDDR, /* Registered Double data rate SDRAM */ | ||
137 | MEM_RMBS, /* Rambus DRAM */ | ||
138 | MEM_DDR2, /* DDR2 RAM */ | ||
139 | MEM_FB_DDR2, /* fully buffered DDR2 */ | ||
140 | MEM_RDDR2, /* Registered DDR2 RAM */ | ||
141 | MEM_XDR, /* Rambus XDR */ | ||
142 | MEM_DDR3, /* DDR3 RAM */ | ||
143 | MEM_RDDR3, /* Registered DDR3 RAM */ | ||
144 | }; | ||
145 | |||
146 | #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) | ||
147 | #define MEM_FLAG_RESERVED BIT(MEM_RESERVED) | ||
148 | #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) | ||
149 | #define MEM_FLAG_FPM BIT(MEM_FPM) | ||
150 | #define MEM_FLAG_EDO BIT(MEM_EDO) | ||
151 | #define MEM_FLAG_BEDO BIT(MEM_BEDO) | ||
152 | #define MEM_FLAG_SDR BIT(MEM_SDR) | ||
153 | #define MEM_FLAG_RDR BIT(MEM_RDR) | ||
154 | #define MEM_FLAG_DDR BIT(MEM_DDR) | ||
155 | #define MEM_FLAG_RDDR BIT(MEM_RDDR) | ||
156 | #define MEM_FLAG_RMBS BIT(MEM_RMBS) | ||
157 | #define MEM_FLAG_DDR2 BIT(MEM_DDR2) | ||
158 | #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) | ||
159 | #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) | ||
160 | #define MEM_FLAG_XDR BIT(MEM_XDR) | ||
161 | #define MEM_FLAG_DDR3 BIT(MEM_DDR3) | ||
162 | #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) | ||
163 | |||
164 | /* chipset Error Detection and Correction capabilities and mode */ | ||
165 | enum edac_type { | ||
166 | EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ | ||
167 | EDAC_NONE, /* Doesn't support ECC */ | ||
168 | EDAC_RESERVED, /* Reserved ECC type */ | ||
169 | EDAC_PARITY, /* Detects parity errors */ | ||
170 | EDAC_EC, /* Error Checking - no correction */ | ||
171 | EDAC_SECDED, /* Single bit error correction, Double detection */ | ||
172 | EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */ | ||
173 | EDAC_S4ECD4ED, /* Chipkill x4 devices */ | ||
174 | EDAC_S8ECD8ED, /* Chipkill x8 devices */ | ||
175 | EDAC_S16ECD16ED, /* Chipkill x16 devices */ | ||
176 | }; | ||
177 | |||
178 | #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) | ||
179 | #define EDAC_FLAG_NONE BIT(EDAC_NONE) | ||
180 | #define EDAC_FLAG_PARITY BIT(EDAC_PARITY) | ||
181 | #define EDAC_FLAG_EC BIT(EDAC_EC) | ||
182 | #define EDAC_FLAG_SECDED BIT(EDAC_SECDED) | ||
183 | #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) | ||
184 | #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) | ||
185 | #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) | ||
186 | #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) | ||
187 | |||
188 | /* scrubbing capabilities */ | ||
189 | enum scrub_type { | ||
190 | SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ | ||
191 | SCRUB_NONE, /* No scrubber */ | ||
192 | SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */ | ||
193 | SCRUB_SW_SRC, /* Software scrub only errors */ | ||
194 | SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */ | ||
195 | SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */ | ||
196 | SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */ | ||
197 | SCRUB_HW_SRC, /* Hardware scrub only errors */ | ||
198 | SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */ | ||
199 | SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */ | ||
200 | }; | ||
201 | |||
202 | #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) | ||
203 | #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) | ||
204 | #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) | ||
205 | #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) | ||
206 | #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) | ||
207 | #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) | ||
208 | #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) | ||
209 | #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) | ||
210 | |||
211 | /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ | ||
212 | |||
213 | /* EDAC internal operation states */ | ||
214 | #define OP_ALLOC 0x100 | ||
215 | #define OP_RUNNING_POLL 0x201 | ||
216 | #define OP_RUNNING_INTERRUPT 0x202 | ||
217 | #define OP_RUNNING_POLL_INTR 0x203 | ||
218 | #define OP_OFFLINE 0x300 | ||
219 | |||
220 | /* | ||
221 | * There are several things to be aware of that aren't at all obvious: | ||
222 | * | ||
223 | * | ||
224 | * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. | ||
225 | * | ||
226 | * These are some of the many terms that are thrown about that don't always | ||
227 | * mean what people think they mean (Inconceivable!). In the interest of | ||
228 | * creating a common ground for discussion, terms and their definitions | ||
229 | * will be established. | ||
230 | * | ||
231 | * Memory devices: The individual chip on a memory stick. These devices | ||
232 | * commonly output 4 and 8 bits each. Grouping several | ||
233 | * of these in parallel provides 64 bits which is common | ||
234 | * for a memory stick. | ||
235 | * | ||
236 | * Memory Stick: A printed circuit board that aggregates multiple | ||
237 | * memory devices in parallel. This is the atomic | ||
238 | * memory component that is purchaseable by Joe consumer | ||
239 | * and loaded into a memory socket. | ||
240 | * | ||
241 | * Socket: A physical connector on the motherboard that accepts | ||
242 | * a single memory stick. | ||
243 | * | ||
244 | * Channel: Set of memory devices on a memory stick that must be | ||
245 | * grouped in parallel with one or more additional | ||
246 | * channels from other memory sticks. This parallel | ||
247 | * grouping of the output from multiple channels are | ||
248 | * necessary for the smallest granularity of memory access. | ||
249 | * Some memory controllers are capable of single channel - | ||
250 | * which means that memory sticks can be loaded | ||
251 | * individually. Other memory controllers are only | ||
252 | * capable of dual channel - which means that memory | ||
253 | * sticks must be loaded as pairs (see "socket set"). | ||
254 | * | ||
255 | * Chip-select row: All of the memory devices that are selected together. | ||
256 | * for a single, minimum grain of memory access. | ||
257 | * This selects all of the parallel memory devices across | ||
258 | * all of the parallel channels. Common chip-select rows | ||
259 | * for single channel are 64 bits, for dual channel 128 | ||
260 | * bits. | ||
261 | * | ||
262 | * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. | ||
263 | * Motherboards commonly drive two chip-select pins to | ||
264 | * a memory stick. A single-ranked stick, will occupy | ||
265 | * only one of those rows. The other will be unused. | ||
266 | * | ||
267 | * Double-Ranked stick: A double-ranked stick has two chip-select rows which | ||
268 | * access different sets of memory devices. The two | ||
269 | * rows cannot be accessed concurrently. | ||
270 | * | ||
271 | * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. | ||
272 | * A double-sided stick has two chip-select rows which | ||
273 | * access different sets of memory devices. The two | ||
274 | * rows cannot be accessed concurrently. "Double-sided" | ||
275 | * is irrespective of the memory devices being mounted | ||
276 | * on both sides of the memory stick. | ||
277 | * | ||
278 | * Socket set: All of the memory sticks that are required for | ||
279 | * a single memory access or all of the memory sticks | ||
280 | * spanned by a chip-select row. A single socket set | ||
281 | * has two chip-select rows and if double-sided sticks | ||
282 | * are used these will occupy those chip-select rows. | ||
283 | * | ||
284 | * Bank: This term is avoided because it is unclear when | ||
285 | * needing to distinguish between chip-select rows and | ||
286 | * socket sets. | ||
287 | * | ||
288 | * Controller pages: | ||
289 | * | ||
290 | * Physical pages: | ||
291 | * | ||
292 | * Virtual pages: | ||
293 | * | ||
294 | * | ||
295 | * STRUCTURE ORGANIZATION AND CHOICES | ||
296 | * | ||
297 | * | ||
298 | * | ||
299 | * PS - I enjoyed writing all that about as much as you enjoyed reading it. | ||
300 | */ | ||
301 | |||
302 | struct channel_info { | ||
303 | int chan_idx; /* channel index */ | ||
304 | u32 ce_count; /* Correctable Errors for this CHANNEL */ | ||
305 | char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ | ||
306 | struct csrow_info *csrow; /* the parent */ | ||
307 | }; | ||
308 | |||
309 | struct csrow_info { | ||
310 | unsigned long first_page; /* first page number in dimm */ | ||
311 | unsigned long last_page; /* last page number in dimm */ | ||
312 | unsigned long page_mask; /* used for interleaving - | ||
313 | * 0UL for non intlv | ||
314 | */ | ||
315 | u32 nr_pages; /* number of pages in csrow */ | ||
316 | u32 grain; /* granularity of reported error in bytes */ | ||
317 | int csrow_idx; /* the chip-select row */ | ||
318 | enum dev_type dtype; /* memory device type */ | ||
319 | u32 ue_count; /* Uncorrectable Errors for this csrow */ | ||
320 | u32 ce_count; /* Correctable Errors for this csrow */ | ||
321 | enum mem_type mtype; /* memory csrow type */ | ||
322 | enum edac_type edac_mode; /* EDAC mode for this csrow */ | ||
323 | struct mem_ctl_info *mci; /* the parent */ | ||
324 | |||
325 | struct kobject kobj; /* sysfs kobject for this csrow */ | ||
326 | |||
327 | /* channel information for this csrow */ | ||
328 | u32 nr_channels; | ||
329 | struct channel_info *channels; | ||
330 | }; | ||
331 | |||
332 | struct mcidev_sysfs_group { | ||
333 | const char *name; /* group name */ | ||
334 | const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */ | ||
335 | }; | ||
336 | |||
337 | struct mcidev_sysfs_group_kobj { | ||
338 | struct list_head list; /* list for all instances within a mc */ | ||
339 | |||
340 | struct kobject kobj; /* kobj for the group */ | ||
341 | |||
342 | const struct mcidev_sysfs_group *grp; /* group description table */ | ||
343 | struct mem_ctl_info *mci; /* the parent */ | ||
344 | }; | ||
345 | |||
346 | /* mcidev_sysfs_attribute structure | ||
347 | * used for driver sysfs attributes and in mem_ctl_info | ||
348 | * sysfs top level entries | ||
349 | */ | ||
350 | struct mcidev_sysfs_attribute { | ||
351 | /* It should use either attr or grp */ | ||
352 | struct attribute attr; | ||
353 | const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */ | ||
354 | |||
355 | /* Ops for show/store values at the attribute - not used on group */ | ||
356 | ssize_t (*show)(struct mem_ctl_info *,char *); | ||
357 | ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); | ||
358 | }; | ||
359 | |||
360 | /* MEMORY controller information structure | ||
361 | */ | ||
362 | struct mem_ctl_info { | ||
363 | struct list_head link; /* for global list of mem_ctl_info structs */ | ||
364 | |||
365 | struct module *owner; /* Module owner of this control struct */ | ||
366 | |||
367 | unsigned long mtype_cap; /* memory types supported by mc */ | ||
368 | unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ | ||
369 | unsigned long edac_cap; /* configuration capabilities - this is | ||
370 | * closely related to edac_ctl_cap. The | ||
371 | * difference is that the controller may be | ||
372 | * capable of s4ecd4ed which would be listed | ||
373 | * in edac_ctl_cap, but if channels aren't | ||
374 | * capable of s4ecd4ed then the edac_cap would | ||
375 | * not have that capability. | ||
376 | */ | ||
377 | unsigned long scrub_cap; /* chipset scrub capabilities */ | ||
378 | enum scrub_type scrub_mode; /* current scrub mode */ | ||
379 | |||
380 | /* Translates sdram memory scrub rate given in bytes/sec to the | ||
381 | internal representation and configures whatever else needs | ||
382 | to be configured. | ||
383 | */ | ||
384 | int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); | ||
385 | |||
386 | /* Get the current sdram memory scrub rate from the internal | ||
387 | representation and converts it to the closest matching | ||
388 | bandwidth in bytes/sec. | ||
389 | */ | ||
390 | int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); | ||
391 | |||
392 | |||
393 | /* pointer to edac checking routine */ | ||
394 | void (*edac_check) (struct mem_ctl_info * mci); | ||
395 | |||
396 | /* | ||
397 | * Remaps memory pages: controller pages to physical pages. | ||
398 | * For most MC's, this will be NULL. | ||
399 | */ | ||
400 | /* FIXME - why not send the phys page to begin with? */ | ||
401 | unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, | ||
402 | unsigned long page); | ||
403 | int mc_idx; | ||
404 | int nr_csrows; | ||
405 | struct csrow_info *csrows; | ||
406 | /* | ||
407 | * FIXME - what about controllers on other busses? - IDs must be | ||
408 | * unique. dev pointer should be sufficiently unique, but | ||
409 | * BUS:SLOT.FUNC numbers may not be unique. | ||
410 | */ | ||
411 | struct device *dev; | ||
412 | const char *mod_name; | ||
413 | const char *mod_ver; | ||
414 | const char *ctl_name; | ||
415 | const char *dev_name; | ||
416 | char proc_name[MC_PROC_NAME_MAX_LEN + 1]; | ||
417 | void *pvt_info; | ||
418 | u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */ | ||
419 | u32 ce_noinfo_count; /* Correctable Errors w/o info */ | ||
420 | u32 ue_count; /* Total Uncorrectable Errors for this MC */ | ||
421 | u32 ce_count; /* Total Correctable Errors for this MC */ | ||
422 | unsigned long start_time; /* mci load start time (in jiffies) */ | ||
423 | |||
424 | struct completion complete; | ||
425 | |||
426 | /* edac sysfs device control */ | ||
427 | struct kobject edac_mci_kobj; | ||
428 | |||
429 | /* list for all grp instances within a mc */ | ||
430 | struct list_head grp_kobj_list; | ||
431 | |||
432 | /* Additional top controller level attributes, but specified | ||
433 | * by the low level driver. | ||
434 | * | ||
435 | * Set by the low level driver to provide attributes at the | ||
436 | * controller level, same level as 'ue_count' and 'ce_count' above. | ||
437 | * An array of structures, NULL terminated | ||
438 | * | ||
439 | * If attributes are desired, then set to array of attributes | ||
440 | * If no attributes are desired, leave NULL | ||
441 | */ | ||
442 | const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; | ||
443 | |||
444 | /* work struct for this MC */ | ||
445 | struct delayed_work work; | ||
446 | |||
447 | /* the internal state of this controller instance */ | ||
448 | int op_state; | ||
449 | }; | ||
450 | |||
97 | /* | 451 | /* |
98 | * The following are the structures to provide for a generic | 452 | * The following are the structures to provide for a generic |
99 | * or abstract 'edac_device'. This set of structures and the | 453 | * or abstract 'edac_device'. This set of structures and the |
@@ -102,13 +456,13 @@ do { \ | |||
102 | * | 456 | * |
103 | * CPU caches (L1 and L2) | 457 | * CPU caches (L1 and L2) |
104 | * DMA engines | 458 | * DMA engines |
105 | * Core CPU switches | 459 | * Core CPU swithces |
106 | * Fabric switch units | 460 | * Fabric switch units |
107 | * PCIe interface controllers | 461 | * PCIe interface controllers |
108 | * other EDAC/ECC type devices that can be monitored for | 462 | * other EDAC/ECC type devices that can be monitored for |
109 | * errors, etc. | 463 | * errors, etc. |
110 | * | 464 | * |
111 | * It allows for a 2 level set of hierarchy. For example: | 465 | * It allows for a 2 level set of hiearchry. For example: |
112 | * | 466 | * |
113 | * cache could be composed of L1, L2 and L3 levels of cache. | 467 | * cache could be composed of L1, L2 and L3 levels of cache. |
114 | * Each CPU core would have its own L1 cache, while sharing | 468 | * Each CPU core would have its own L1 cache, while sharing |
@@ -237,8 +591,8 @@ struct edac_device_ctl_info { | |||
237 | */ | 591 | */ |
238 | struct edac_dev_sysfs_attribute *sysfs_attributes; | 592 | struct edac_dev_sysfs_attribute *sysfs_attributes; |
239 | 593 | ||
240 | /* pointer to main 'edac' subsys in sysfs */ | 594 | /* pointer to main 'edac' class in sysfs */ |
241 | struct bus_type *edac_subsys; | 595 | struct sysdev_class *edac_class; |
242 | 596 | ||
243 | /* the internal state of this controller instance */ | 597 | /* the internal state of this controller instance */ |
244 | int op_state; | 598 | int op_state; |
@@ -336,7 +690,7 @@ struct edac_pci_ctl_info { | |||
336 | 690 | ||
337 | int pci_idx; | 691 | int pci_idx; |
338 | 692 | ||
339 | struct bus_type *edac_subsys; /* pointer to subsystem */ | 693 | struct sysdev_class *edac_class; /* pointer to class */ |
340 | 694 | ||
341 | /* the internal state of this controller instance */ | 695 | /* the internal state of this controller instance */ |
342 | int op_state; | 696 | int op_state; |
@@ -442,10 +796,8 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset, | |||
442 | 796 | ||
443 | #endif /* CONFIG_PCI */ | 797 | #endif /* CONFIG_PCI */ |
444 | 798 | ||
445 | struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, | 799 | extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, |
446 | unsigned n_layers, | 800 | unsigned nr_chans, int edac_index); |
447 | struct edac_mc_layer *layers, | ||
448 | unsigned sz_pvt); | ||
449 | extern int edac_mc_add_mc(struct mem_ctl_info *mci); | 801 | extern int edac_mc_add_mc(struct mem_ctl_info *mci); |
450 | extern void edac_mc_free(struct mem_ctl_info *mci); | 802 | extern void edac_mc_free(struct mem_ctl_info *mci); |
451 | extern struct mem_ctl_info *edac_mc_find(int idx); | 803 | extern struct mem_ctl_info *edac_mc_find(int idx); |
@@ -453,17 +805,35 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev); | |||
453 | extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); | 805 | extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); |
454 | extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, | 806 | extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, |
455 | unsigned long page); | 807 | unsigned long page); |
456 | void edac_mc_handle_error(const enum hw_event_mc_err_type type, | 808 | |
457 | struct mem_ctl_info *mci, | 809 | /* |
458 | const u16 error_count, | 810 | * The no info errors are used when error overflows are reported. |
459 | const unsigned long page_frame_number, | 811 | * There are a limited number of error logging registers that can |
460 | const unsigned long offset_in_page, | 812 | * be exausted. When all registers are exhausted and an additional |
461 | const unsigned long syndrome, | 813 | * error occurs then an error overflow register records that an |
462 | const int top_layer, | 814 | * error occurred and the type of error, but doesn't have any |
463 | const int mid_layer, | 815 | * further information. The ce/ue versions make for cleaner |
464 | const int low_layer, | 816 | * reporting logic and function interface - reduces conditional |
465 | const char *msg, | 817 | * statement clutter and extra function arguments. |
466 | const char *other_detail); | 818 | */ |
819 | extern void edac_mc_handle_ce(struct mem_ctl_info *mci, | ||
820 | unsigned long page_frame_number, | ||
821 | unsigned long offset_in_page, | ||
822 | unsigned long syndrome, int row, int channel, | ||
823 | const char *msg); | ||
824 | extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, | ||
825 | const char *msg); | ||
826 | extern void edac_mc_handle_ue(struct mem_ctl_info *mci, | ||
827 | unsigned long page_frame_number, | ||
828 | unsigned long offset_in_page, int row, | ||
829 | const char *msg); | ||
830 | extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, | ||
831 | const char *msg); | ||
832 | extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow, | ||
833 | unsigned int channel0, unsigned int channel1, | ||
834 | char *msg); | ||
835 | extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow, | ||
836 | unsigned int channel, char *msg); | ||
467 | 837 | ||
468 | /* | 838 | /* |
469 | * edac_device APIs | 839 | * edac_device APIs |
@@ -475,7 +845,6 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, | |||
475 | extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, | 845 | extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, |
476 | int inst_nr, int block_nr, const char *msg); | 846 | int inst_nr, int block_nr, const char *msg); |
477 | extern int edac_device_alloc_index(void); | 847 | extern int edac_device_alloc_index(void); |
478 | extern const char *edac_layer_name[]; | ||
479 | 848 | ||
480 | /* | 849 | /* |
481 | * edac_pci APIs | 850 | * edac_pci APIs |
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 211021dfec7..c3f67437afb 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/jiffies.h> | 23 | #include <linux/jiffies.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/list.h> | 25 | #include <linux/list.h> |
26 | #include <linux/sysdev.h> | ||
26 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
27 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
@@ -40,13 +41,12 @@ static LIST_HEAD(edac_device_list); | |||
40 | #ifdef CONFIG_EDAC_DEBUG | 41 | #ifdef CONFIG_EDAC_DEBUG |
41 | static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) | 42 | static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) |
42 | { | 43 | { |
43 | edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n", | 44 | debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx); |
44 | edac_dev, edac_dev->dev_idx); | 45 | debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check); |
45 | edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check); | 46 | debugf3("\tdev = %p\n", edac_dev->dev); |
46 | edac_dbg(3, "\tdev = %p\n", edac_dev->dev); | 47 | debugf3("\tmod_name:ctl_name = %s:%s\n", |
47 | edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", | 48 | edac_dev->mod_name, edac_dev->ctl_name); |
48 | edac_dev->mod_name, edac_dev->ctl_name); | 49 | debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info); |
49 | edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info); | ||
50 | } | 50 | } |
51 | #endif /* CONFIG_EDAC_DEBUG */ | 51 | #endif /* CONFIG_EDAC_DEBUG */ |
52 | 52 | ||
@@ -57,7 +57,7 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) | |||
57 | * | 57 | * |
58 | * The control structure is allocated in complete chunk | 58 | * The control structure is allocated in complete chunk |
59 | * from the OS. It is in turn sub allocated to the | 59 | * from the OS. It is in turn sub allocated to the |
60 | * various objects that compose the structure | 60 | * various objects that compose the struture |
61 | * | 61 | * |
62 | * The structure has a 'nr_instance' array within itself. | 62 | * The structure has a 'nr_instance' array within itself. |
63 | * Each instance represents a major component | 63 | * Each instance represents a major component |
@@ -80,10 +80,11 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( | |||
80 | unsigned total_size; | 80 | unsigned total_size; |
81 | unsigned count; | 81 | unsigned count; |
82 | unsigned instance, block, attr; | 82 | unsigned instance, block, attr; |
83 | void *pvt, *p; | 83 | void *pvt; |
84 | int err; | 84 | int err; |
85 | 85 | ||
86 | edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks); | 86 | debugf4("%s() instances=%d blocks=%d\n", |
87 | __func__, nr_instances, nr_blocks); | ||
87 | 88 | ||
88 | /* Calculate the size of memory we need to allocate AND | 89 | /* Calculate the size of memory we need to allocate AND |
89 | * determine the offsets of the various item arrays | 90 | * determine the offsets of the various item arrays |
@@ -92,30 +93,35 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( | |||
92 | * to be at least as stringent as what the compiler would | 93 | * to be at least as stringent as what the compiler would |
93 | * provide if we could simply hardcode everything into a single struct. | 94 | * provide if we could simply hardcode everything into a single struct. |
94 | */ | 95 | */ |
95 | p = NULL; | 96 | dev_ctl = (struct edac_device_ctl_info *)NULL; |
96 | dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1); | ||
97 | 97 | ||
98 | /* Calc the 'end' offset past end of ONE ctl_info structure | 98 | /* Calc the 'end' offset past end of ONE ctl_info structure |
99 | * which will become the start of the 'instance' array | 99 | * which will become the start of the 'instance' array |
100 | */ | 100 | */ |
101 | dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances); | 101 | dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst)); |
102 | 102 | ||
103 | /* Calc the 'end' offset past the instance array within the ctl_info | 103 | /* Calc the 'end' offset past the instance array within the ctl_info |
104 | * which will become the start of the block array | 104 | * which will become the start of the block array |
105 | */ | 105 | */ |
106 | count = nr_instances * nr_blocks; | 106 | dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk)); |
107 | dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count); | ||
108 | 107 | ||
109 | /* Calc the 'end' offset past the dev_blk array | 108 | /* Calc the 'end' offset past the dev_blk array |
110 | * which will become the start of the attrib array, if any. | 109 | * which will become the start of the attrib array, if any. |
111 | */ | 110 | */ |
112 | /* calc how many nr_attrib we need */ | 111 | count = nr_instances * nr_blocks; |
113 | if (nr_attrib > 0) | 112 | dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib)); |
113 | |||
114 | /* Check for case of when an attribute array is specified */ | ||
115 | if (nr_attrib > 0) { | ||
116 | /* calc how many nr_attrib we need */ | ||
114 | count *= nr_attrib; | 117 | count *= nr_attrib; |
115 | dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count); | ||
116 | 118 | ||
117 | /* Calc the 'end' offset past the attributes array */ | 119 | /* Calc the 'end' offset past the attributes array */ |
118 | pvt = edac_align_ptr(&p, sz_private, 1); | 120 | pvt = edac_align_ptr(&dev_attrib[count], sz_private); |
121 | } else { | ||
122 | /* no attribute array specificed */ | ||
123 | pvt = edac_align_ptr(dev_attrib, sz_private); | ||
124 | } | ||
119 | 125 | ||
120 | /* 'pvt' now points to where the private data area is. | 126 | /* 'pvt' now points to where the private data area is. |
121 | * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib) | 127 | * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib) |
@@ -156,8 +162,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( | |||
156 | /* Name of this edac device */ | 162 | /* Name of this edac device */ |
157 | snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); | 163 | snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); |
158 | 164 | ||
159 | edac_dbg(4, "edac_dev=%p next after end=%p\n", | 165 | debugf4("%s() edac_dev=%p next after end=%p\n", |
160 | dev_ctl, pvt + sz_private); | 166 | __func__, dev_ctl, pvt + sz_private ); |
161 | 167 | ||
162 | /* Initialize every Instance */ | 168 | /* Initialize every Instance */ |
163 | for (instance = 0; instance < nr_instances; instance++) { | 169 | for (instance = 0; instance < nr_instances; instance++) { |
@@ -178,8 +184,10 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( | |||
178 | snprintf(blk->name, sizeof(blk->name), | 184 | snprintf(blk->name, sizeof(blk->name), |
179 | "%s%d", edac_block_name, block+offset_value); | 185 | "%s%d", edac_block_name, block+offset_value); |
180 | 186 | ||
181 | edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n", | 187 | debugf4("%s() instance=%d inst_p=%p block=#%d " |
182 | instance, inst, block, blk, blk->name); | 188 | "block_p=%p name='%s'\n", |
189 | __func__, instance, inst, block, | ||
190 | blk, blk->name); | ||
183 | 191 | ||
184 | /* if there are NO attributes OR no attribute pointer | 192 | /* if there are NO attributes OR no attribute pointer |
185 | * then continue on to next block iteration | 193 | * then continue on to next block iteration |
@@ -192,8 +200,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( | |||
192 | attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; | 200 | attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; |
193 | blk->block_attributes = attrib_p; | 201 | blk->block_attributes = attrib_p; |
194 | 202 | ||
195 | edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n", | 203 | debugf4("%s() THIS BLOCK_ATTRIB=%p\n", |
196 | blk->block_attributes); | 204 | __func__, blk->block_attributes); |
197 | 205 | ||
198 | /* Initialize every user specified attribute in this | 206 | /* Initialize every user specified attribute in this |
199 | * block with the data the caller passed in | 207 | * block with the data the caller passed in |
@@ -212,10 +220,11 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( | |||
212 | 220 | ||
213 | attrib->block = blk; /* up link */ | 221 | attrib->block = blk; /* up link */ |
214 | 222 | ||
215 | edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n", | 223 | debugf4("%s() alloc-attrib=%p attrib_name='%s' " |
216 | attrib, attrib->attr.name, | 224 | "attrib-spec=%p spec-name=%s\n", |
217 | &attrib_spec[attr], | 225 | __func__, attrib, attrib->attr.name, |
218 | attrib_spec[attr].attr.name | 226 | &attrib_spec[attr], |
227 | attrib_spec[attr].attr.name | ||
219 | ); | 228 | ); |
220 | } | 229 | } |
221 | } | 230 | } |
@@ -270,7 +279,7 @@ static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev) | |||
270 | struct edac_device_ctl_info *edac_dev; | 279 | struct edac_device_ctl_info *edac_dev; |
271 | struct list_head *item; | 280 | struct list_head *item; |
272 | 281 | ||
273 | edac_dbg(0, "\n"); | 282 | debugf0("%s()\n", __func__); |
274 | 283 | ||
275 | list_for_each(item, &edac_device_list) { | 284 | list_for_each(item, &edac_device_list) { |
276 | edac_dev = list_entry(item, struct edac_device_ctl_info, link); | 285 | edac_dev = list_entry(item, struct edac_device_ctl_info, link); |
@@ -386,7 +395,7 @@ static void edac_device_workq_function(struct work_struct *work_req) | |||
386 | 395 | ||
387 | /* Reschedule the workq for the next time period to start again | 396 | /* Reschedule the workq for the next time period to start again |
388 | * if the number of msec is for 1 sec, then adjust to the next | 397 | * if the number of msec is for 1 sec, then adjust to the next |
389 | * whole one second to save timers firing all over the period | 398 | * whole one second to save timers fireing all over the period |
390 | * between integral seconds | 399 | * between integral seconds |
391 | */ | 400 | */ |
392 | if (edac_dev->poll_msec == 1000) | 401 | if (edac_dev->poll_msec == 1000) |
@@ -405,7 +414,7 @@ static void edac_device_workq_function(struct work_struct *work_req) | |||
405 | void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, | 414 | void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, |
406 | unsigned msec) | 415 | unsigned msec) |
407 | { | 416 | { |
408 | edac_dbg(0, "\n"); | 417 | debugf0("%s()\n", __func__); |
409 | 418 | ||
410 | /* take the arg 'msec' and set it into the control structure | 419 | /* take the arg 'msec' and set it into the control structure |
411 | * to used in the time period calculation | 420 | * to used in the time period calculation |
@@ -493,7 +502,7 @@ EXPORT_SYMBOL_GPL(edac_device_alloc_index); | |||
493 | */ | 502 | */ |
494 | int edac_device_add_device(struct edac_device_ctl_info *edac_dev) | 503 | int edac_device_add_device(struct edac_device_ctl_info *edac_dev) |
495 | { | 504 | { |
496 | edac_dbg(0, "\n"); | 505 | debugf0("%s()\n", __func__); |
497 | 506 | ||
498 | #ifdef CONFIG_EDAC_DEBUG | 507 | #ifdef CONFIG_EDAC_DEBUG |
499 | if (edac_debug_level >= 3) | 508 | if (edac_debug_level >= 3) |
@@ -555,7 +564,7 @@ EXPORT_SYMBOL_GPL(edac_device_add_device); | |||
555 | * Remove sysfs entries for specified edac_device structure and | 564 | * Remove sysfs entries for specified edac_device structure and |
556 | * then remove edac_device structure from global list | 565 | * then remove edac_device structure from global list |
557 | * | 566 | * |
558 | * @dev: | 567 | * @pdev: |
559 | * Pointer to 'struct device' representing edac_device | 568 | * Pointer to 'struct device' representing edac_device |
560 | * structure to remove. | 569 | * structure to remove. |
561 | * | 570 | * |
@@ -567,7 +576,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev) | |||
567 | { | 576 | { |
568 | struct edac_device_ctl_info *edac_dev; | 577 | struct edac_device_ctl_info *edac_dev; |
569 | 578 | ||
570 | edac_dbg(0, "\n"); | 579 | debugf0("%s()\n", __func__); |
571 | 580 | ||
572 | mutex_lock(&device_ctls_mutex); | 581 | mutex_lock(&device_ctls_mutex); |
573 | 582 | ||
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index fb68a06ad68..86649df0028 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * file for managing the edac_device subsystem of devices for EDAC | 2 | * file for managing the edac_device class of devices for EDAC |
3 | * | 3 | * |
4 | * (C) 2007 SoftwareBitMaker | 4 | * (C) 2007 SoftwareBitMaker |
5 | * | 5 | * |
@@ -202,7 +202,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj) | |||
202 | { | 202 | { |
203 | struct edac_device_ctl_info *edac_dev = to_edacdev(kobj); | 203 | struct edac_device_ctl_info *edac_dev = to_edacdev(kobj); |
204 | 204 | ||
205 | edac_dbg(4, "control index=%d\n", edac_dev->dev_idx); | 205 | debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx); |
206 | 206 | ||
207 | /* decrement the EDAC CORE module ref count */ | 207 | /* decrement the EDAC CORE module ref count */ |
208 | module_put(edac_dev->owner); | 208 | module_put(edac_dev->owner); |
@@ -230,21 +230,21 @@ static struct kobj_type ktype_device_ctrl = { | |||
230 | */ | 230 | */ |
231 | int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) | 231 | int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) |
232 | { | 232 | { |
233 | struct bus_type *edac_subsys; | 233 | struct sysdev_class *edac_class; |
234 | int err; | 234 | int err; |
235 | 235 | ||
236 | edac_dbg(1, "\n"); | 236 | debugf1("%s()\n", __func__); |
237 | 237 | ||
238 | /* get the /sys/devices/system/edac reference */ | 238 | /* get the /sys/devices/system/edac reference */ |
239 | edac_subsys = edac_get_sysfs_subsys(); | 239 | edac_class = edac_get_sysfs_class(); |
240 | if (edac_subsys == NULL) { | 240 | if (edac_class == NULL) { |
241 | edac_dbg(1, "no edac_subsys error\n"); | 241 | debugf1("%s() no edac_class error\n", __func__); |
242 | err = -ENODEV; | 242 | err = -ENODEV; |
243 | goto err_out; | 243 | goto err_out; |
244 | } | 244 | } |
245 | 245 | ||
246 | /* Point to the 'edac_subsys' this instance 'reports' to */ | 246 | /* Point to the 'edac_class' this instance 'reports' to */ |
247 | edac_dev->edac_subsys = edac_subsys; | 247 | edac_dev->edac_class = edac_class; |
248 | 248 | ||
249 | /* Init the devices's kobject */ | 249 | /* Init the devices's kobject */ |
250 | memset(&edac_dev->kobj, 0, sizeof(struct kobject)); | 250 | memset(&edac_dev->kobj, 0, sizeof(struct kobject)); |
@@ -261,11 +261,11 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) | |||
261 | 261 | ||
262 | /* register */ | 262 | /* register */ |
263 | err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl, | 263 | err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl, |
264 | &edac_subsys->dev_root->kobj, | 264 | &edac_class->kset.kobj, |
265 | "%s", edac_dev->name); | 265 | "%s", edac_dev->name); |
266 | if (err) { | 266 | if (err) { |
267 | edac_dbg(1, "Failed to register '.../edac/%s'\n", | 267 | debugf1("%s()Failed to register '.../edac/%s'\n", |
268 | edac_dev->name); | 268 | __func__, edac_dev->name); |
269 | goto err_kobj_reg; | 269 | goto err_kobj_reg; |
270 | } | 270 | } |
271 | kobject_uevent(&edac_dev->kobj, KOBJ_ADD); | 271 | kobject_uevent(&edac_dev->kobj, KOBJ_ADD); |
@@ -274,7 +274,8 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) | |||
274 | * edac_device_unregister_sysfs_main_kobj() must be used | 274 | * edac_device_unregister_sysfs_main_kobj() must be used |
275 | */ | 275 | */ |
276 | 276 | ||
277 | edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name); | 277 | debugf4("%s() Registered '.../edac/%s' kobject\n", |
278 | __func__, edac_dev->name); | ||
278 | 279 | ||
279 | return 0; | 280 | return 0; |
280 | 281 | ||
@@ -283,7 +284,7 @@ err_kobj_reg: | |||
283 | module_put(edac_dev->owner); | 284 | module_put(edac_dev->owner); |
284 | 285 | ||
285 | err_mod_get: | 286 | err_mod_get: |
286 | edac_put_sysfs_subsys(); | 287 | edac_put_sysfs_class(); |
287 | 288 | ||
288 | err_out: | 289 | err_out: |
289 | return err; | 290 | return err; |
@@ -295,8 +296,9 @@ err_out: | |||
295 | */ | 296 | */ |
296 | void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) | 297 | void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) |
297 | { | 298 | { |
298 | edac_dbg(0, "\n"); | 299 | debugf0("%s()\n", __func__); |
299 | edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj)); | 300 | debugf4("%s() name of kobject is: %s\n", |
301 | __func__, kobject_name(&dev->kobj)); | ||
300 | 302 | ||
301 | /* | 303 | /* |
302 | * Unregister the edac device's kobject and | 304 | * Unregister the edac device's kobject and |
@@ -306,7 +308,7 @@ void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) | |||
306 | * b) 'kfree' the memory | 308 | * b) 'kfree' the memory |
307 | */ | 309 | */ |
308 | kobject_put(&dev->kobj); | 310 | kobject_put(&dev->kobj); |
309 | edac_put_sysfs_subsys(); | 311 | edac_put_sysfs_class(); |
310 | } | 312 | } |
311 | 313 | ||
312 | /* edac_dev -> instance information */ | 314 | /* edac_dev -> instance information */ |
@@ -334,7 +336,7 @@ static void edac_device_ctrl_instance_release(struct kobject *kobj) | |||
334 | { | 336 | { |
335 | struct edac_device_instance *instance; | 337 | struct edac_device_instance *instance; |
336 | 338 | ||
337 | edac_dbg(1, "\n"); | 339 | debugf1("%s()\n", __func__); |
338 | 340 | ||
339 | /* map from this kobj to the main control struct | 341 | /* map from this kobj to the main control struct |
340 | * and then dec the main kobj count | 342 | * and then dec the main kobj count |
@@ -440,7 +442,7 @@ static void edac_device_ctrl_block_release(struct kobject *kobj) | |||
440 | { | 442 | { |
441 | struct edac_device_block *block; | 443 | struct edac_device_block *block; |
442 | 444 | ||
443 | edac_dbg(1, "\n"); | 445 | debugf1("%s()\n", __func__); |
444 | 446 | ||
445 | /* get the container of the kobj */ | 447 | /* get the container of the kobj */ |
446 | block = to_block(kobj); | 448 | block = to_block(kobj); |
@@ -522,10 +524,10 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, | |||
522 | struct edac_dev_sysfs_block_attribute *sysfs_attrib; | 524 | struct edac_dev_sysfs_block_attribute *sysfs_attrib; |
523 | struct kobject *main_kobj; | 525 | struct kobject *main_kobj; |
524 | 526 | ||
525 | edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n", | 527 | debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n", |
526 | instance->name, instance, block->name, block); | 528 | __func__, instance->name, instance, block->name, block); |
527 | edac_dbg(4, "block kobj=%p block kobj->parent=%p\n", | 529 | debugf4("%s() block kobj=%p block kobj->parent=%p\n", |
528 | &block->kobj, &block->kobj.parent); | 530 | __func__, &block->kobj, &block->kobj.parent); |
529 | 531 | ||
530 | /* init this block's kobject */ | 532 | /* init this block's kobject */ |
531 | memset(&block->kobj, 0, sizeof(struct kobject)); | 533 | memset(&block->kobj, 0, sizeof(struct kobject)); |
@@ -544,7 +546,8 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, | |||
544 | &instance->kobj, | 546 | &instance->kobj, |
545 | "%s", block->name); | 547 | "%s", block->name); |
546 | if (err) { | 548 | if (err) { |
547 | edac_dbg(1, "Failed to register instance '%s'\n", block->name); | 549 | debugf1("%s() Failed to register instance '%s'\n", |
550 | __func__, block->name); | ||
548 | kobject_put(main_kobj); | 551 | kobject_put(main_kobj); |
549 | err = -ENODEV; | 552 | err = -ENODEV; |
550 | goto err_out; | 553 | goto err_out; |
@@ -557,9 +560,11 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, | |||
557 | if (sysfs_attrib && block->nr_attribs) { | 560 | if (sysfs_attrib && block->nr_attribs) { |
558 | for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { | 561 | for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { |
559 | 562 | ||
560 | edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n", | 563 | debugf4("%s() creating block attrib='%s' " |
561 | sysfs_attrib->attr.name, | 564 | "attrib->%p to kobj=%p\n", |
562 | sysfs_attrib, &block->kobj); | 565 | __func__, |
566 | sysfs_attrib->attr.name, | ||
567 | sysfs_attrib, &block->kobj); | ||
563 | 568 | ||
564 | /* Create each block_attribute file */ | 569 | /* Create each block_attribute file */ |
565 | err = sysfs_create_file(&block->kobj, | 570 | err = sysfs_create_file(&block->kobj, |
@@ -642,14 +647,14 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev, | |||
642 | err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl, | 647 | err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl, |
643 | &edac_dev->kobj, "%s", instance->name); | 648 | &edac_dev->kobj, "%s", instance->name); |
644 | if (err != 0) { | 649 | if (err != 0) { |
645 | edac_dbg(2, "Failed to register instance '%s'\n", | 650 | debugf2("%s() Failed to register instance '%s'\n", |
646 | instance->name); | 651 | __func__, instance->name); |
647 | kobject_put(main_kobj); | 652 | kobject_put(main_kobj); |
648 | goto err_out; | 653 | goto err_out; |
649 | } | 654 | } |
650 | 655 | ||
651 | edac_dbg(4, "now register '%d' blocks for instance %d\n", | 656 | debugf4("%s() now register '%d' blocks for instance %d\n", |
652 | instance->nr_blocks, idx); | 657 | __func__, instance->nr_blocks, idx); |
653 | 658 | ||
654 | /* register all blocks of this instance */ | 659 | /* register all blocks of this instance */ |
655 | for (i = 0; i < instance->nr_blocks; i++) { | 660 | for (i = 0; i < instance->nr_blocks; i++) { |
@@ -665,8 +670,8 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev, | |||
665 | } | 670 | } |
666 | kobject_uevent(&instance->kobj, KOBJ_ADD); | 671 | kobject_uevent(&instance->kobj, KOBJ_ADD); |
667 | 672 | ||
668 | edac_dbg(4, "Registered instance %d '%s' kobject\n", | 673 | debugf4("%s() Registered instance %d '%s' kobject\n", |
669 | idx, instance->name); | 674 | __func__, idx, instance->name); |
670 | 675 | ||
671 | return 0; | 676 | return 0; |
672 | 677 | ||
@@ -710,7 +715,7 @@ static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev) | |||
710 | int i, j; | 715 | int i, j; |
711 | int err; | 716 | int err; |
712 | 717 | ||
713 | edac_dbg(0, "\n"); | 718 | debugf0("%s()\n", __func__); |
714 | 719 | ||
715 | /* iterate over creation of the instances */ | 720 | /* iterate over creation of the instances */ |
716 | for (i = 0; i < edac_dev->nr_instances; i++) { | 721 | for (i = 0; i < edac_dev->nr_instances; i++) { |
@@ -812,12 +817,12 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) | |||
812 | int err; | 817 | int err; |
813 | struct kobject *edac_kobj = &edac_dev->kobj; | 818 | struct kobject *edac_kobj = &edac_dev->kobj; |
814 | 819 | ||
815 | edac_dbg(0, "idx=%d\n", edac_dev->dev_idx); | 820 | debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx); |
816 | 821 | ||
817 | /* go create any main attributes callers wants */ | 822 | /* go create any main attributes callers wants */ |
818 | err = edac_device_add_main_sysfs_attributes(edac_dev); | 823 | err = edac_device_add_main_sysfs_attributes(edac_dev); |
819 | if (err) { | 824 | if (err) { |
820 | edac_dbg(0, "failed to add sysfs attribs\n"); | 825 | debugf0("%s() failed to add sysfs attribs\n", __func__); |
821 | goto err_out; | 826 | goto err_out; |
822 | } | 827 | } |
823 | 828 | ||
@@ -827,7 +832,8 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) | |||
827 | err = sysfs_create_link(edac_kobj, | 832 | err = sysfs_create_link(edac_kobj, |
828 | &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK); | 833 | &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK); |
829 | if (err) { | 834 | if (err) { |
830 | edac_dbg(0, "sysfs_create_link() returned err= %d\n", err); | 835 | debugf0("%s() sysfs_create_link() returned err= %d\n", |
836 | __func__, err); | ||
831 | goto err_remove_main_attribs; | 837 | goto err_remove_main_attribs; |
832 | } | 838 | } |
833 | 839 | ||
@@ -837,13 +843,14 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) | |||
837 | */ | 843 | */ |
838 | err = edac_device_create_instances(edac_dev); | 844 | err = edac_device_create_instances(edac_dev); |
839 | if (err) { | 845 | if (err) { |
840 | edac_dbg(0, "edac_device_create_instances() returned err= %d\n", | 846 | debugf0("%s() edac_device_create_instances() " |
841 | err); | 847 | "returned err= %d\n", __func__, err); |
842 | goto err_remove_link; | 848 | goto err_remove_link; |
843 | } | 849 | } |
844 | 850 | ||
845 | 851 | ||
846 | edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx); | 852 | debugf4("%s() create-instances done, idx=%d\n", |
853 | __func__, edac_dev->dev_idx); | ||
847 | 854 | ||
848 | return 0; | 855 | return 0; |
849 | 856 | ||
@@ -866,7 +873,7 @@ err_out: | |||
866 | */ | 873 | */ |
867 | void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev) | 874 | void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev) |
868 | { | 875 | { |
869 | edac_dbg(0, "\n"); | 876 | debugf0("%s()\n", __func__); |
870 | 877 | ||
871 | /* remove any main attributes for this device */ | 878 | /* remove any main attributes for this device */ |
872 | edac_device_remove_main_sysfs_attributes(edac_dev); | 879 | edac_device_remove_main_sysfs_attributes(edac_dev); |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 281f566a551..d69144a0904 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -25,97 +25,55 @@ | |||
25 | #include <linux/jiffies.h> | 25 | #include <linux/jiffies.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/sysdev.h> | ||
28 | #include <linux/ctype.h> | 29 | #include <linux/ctype.h> |
29 | #include <linux/edac.h> | 30 | #include <linux/edac.h> |
30 | #include <linux/bitops.h> | ||
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/page.h> | 32 | #include <asm/page.h> |
33 | #include <asm/edac.h> | 33 | #include <asm/edac.h> |
34 | #include "edac_core.h" | 34 | #include "edac_core.h" |
35 | #include "edac_module.h" | 35 | #include "edac_module.h" |
36 | 36 | ||
37 | #define CREATE_TRACE_POINTS | ||
38 | #define TRACE_INCLUDE_PATH ../../include/ras | ||
39 | #include <ras/ras_event.h> | ||
40 | |||
41 | /* lock to memory controller's control array */ | 37 | /* lock to memory controller's control array */ |
42 | static DEFINE_MUTEX(mem_ctls_mutex); | 38 | static DEFINE_MUTEX(mem_ctls_mutex); |
43 | static LIST_HEAD(mc_devices); | 39 | static LIST_HEAD(mc_devices); |
44 | 40 | ||
45 | unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, | ||
46 | unsigned len) | ||
47 | { | ||
48 | struct mem_ctl_info *mci = dimm->mci; | ||
49 | int i, n, count = 0; | ||
50 | char *p = buf; | ||
51 | |||
52 | for (i = 0; i < mci->n_layers; i++) { | ||
53 | n = snprintf(p, len, "%s %d ", | ||
54 | edac_layer_name[mci->layers[i].type], | ||
55 | dimm->location[i]); | ||
56 | p += n; | ||
57 | len -= n; | ||
58 | count += n; | ||
59 | if (!len) | ||
60 | break; | ||
61 | } | ||
62 | |||
63 | return count; | ||
64 | } | ||
65 | |||
66 | #ifdef CONFIG_EDAC_DEBUG | 41 | #ifdef CONFIG_EDAC_DEBUG |
67 | 42 | ||
68 | static void edac_mc_dump_channel(struct rank_info *chan) | 43 | static void edac_mc_dump_channel(struct channel_info *chan) |
69 | { | ||
70 | edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx); | ||
71 | edac_dbg(4, " channel = %p\n", chan); | ||
72 | edac_dbg(4, " channel->csrow = %p\n", chan->csrow); | ||
73 | edac_dbg(4, " channel->dimm = %p\n", chan->dimm); | ||
74 | } | ||
75 | |||
76 | static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) | ||
77 | { | 44 | { |
78 | char location[80]; | 45 | debugf4("\tchannel = %p\n", chan); |
79 | 46 | debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); | |
80 | edac_dimm_info_location(dimm, location, sizeof(location)); | 47 | debugf4("\tchannel->ce_count = %d\n", chan->ce_count); |
81 | 48 | debugf4("\tchannel->label = '%s'\n", chan->label); | |
82 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", | 49 | debugf4("\tchannel->csrow = %p\n\n", chan->csrow); |
83 | dimm->mci->mem_is_per_rank ? "rank" : "dimm", | ||
84 | number, location, dimm->csrow, dimm->cschannel); | ||
85 | edac_dbg(4, " dimm = %p\n", dimm); | ||
86 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); | ||
87 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); | ||
88 | edac_dbg(4, " dimm->grain = %d\n", dimm->grain); | ||
89 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); | ||
90 | } | 50 | } |
91 | 51 | ||
92 | static void edac_mc_dump_csrow(struct csrow_info *csrow) | 52 | static void edac_mc_dump_csrow(struct csrow_info *csrow) |
93 | { | 53 | { |
94 | edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx); | 54 | debugf4("\tcsrow = %p\n", csrow); |
95 | edac_dbg(4, " csrow = %p\n", csrow); | 55 | debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx); |
96 | edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page); | 56 | debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); |
97 | edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page); | 57 | debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); |
98 | edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask); | 58 | debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); |
99 | edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels); | 59 | debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages); |
100 | edac_dbg(4, " csrow->channels = %p\n", csrow->channels); | 60 | debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); |
101 | edac_dbg(4, " csrow->mci = %p\n", csrow->mci); | 61 | debugf4("\tcsrow->channels = %p\n", csrow->channels); |
62 | debugf4("\tcsrow->mci = %p\n\n", csrow->mci); | ||
102 | } | 63 | } |
103 | 64 | ||
104 | static void edac_mc_dump_mci(struct mem_ctl_info *mci) | 65 | static void edac_mc_dump_mci(struct mem_ctl_info *mci) |
105 | { | 66 | { |
106 | edac_dbg(3, "\tmci = %p\n", mci); | 67 | debugf3("\tmci = %p\n", mci); |
107 | edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); | 68 | debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap); |
108 | edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); | 69 | debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); |
109 | edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); | 70 | debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap); |
110 | edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); | 71 | debugf4("\tmci->edac_check = %p\n", mci->edac_check); |
111 | edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n", | 72 | debugf3("\tmci->nr_csrows = %d, csrows = %p\n", |
112 | mci->nr_csrows, mci->csrows); | 73 | mci->nr_csrows, mci->csrows); |
113 | edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n", | 74 | debugf3("\tdev = %p\n", mci->dev); |
114 | mci->tot_dimms, mci->dimms); | 75 | debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); |
115 | edac_dbg(3, "\tdev = %p\n", mci->pdev); | 76 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); |
116 | edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", | ||
117 | mci->mod_name, mci->ctl_name); | ||
118 | edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); | ||
119 | } | 77 | } |
120 | 78 | ||
121 | #endif /* CONFIG_EDAC_DEBUG */ | 79 | #endif /* CONFIG_EDAC_DEBUG */ |
@@ -144,37 +102,18 @@ const char *edac_mem_types[] = { | |||
144 | }; | 102 | }; |
145 | EXPORT_SYMBOL_GPL(edac_mem_types); | 103 | EXPORT_SYMBOL_GPL(edac_mem_types); |
146 | 104 | ||
147 | /** | 105 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. |
148 | * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation | 106 | * Adjust 'ptr' so that its alignment is at least as stringent as what the |
149 | * @p: pointer to a pointer with the memory offset to be used. At | 107 | * compiler would provide for X and return the aligned result. |
150 | * return, this will be incremented to point to the next offset | ||
151 | * @size: Size of the data structure to be reserved | ||
152 | * @n_elems: Number of elements that should be reserved | ||
153 | * | 108 | * |
154 | * If 'size' is a constant, the compiler will optimize this whole function | 109 | * If 'size' is a constant, the compiler will optimize this whole function |
155 | * down to either a no-op or the addition of a constant to the value of '*p'. | 110 | * down to either a no-op or the addition of a constant to the value of 'ptr'. |
156 | * | ||
157 | * The 'p' pointer is absolutely needed to keep the proper advancing | ||
158 | * further in memory to the proper offsets when allocating the struct along | ||
159 | * with its embedded structs, as edac_device_alloc_ctl_info() does it | ||
160 | * above, for example. | ||
161 | * | ||
162 | * At return, the pointer 'p' will be incremented to be used on a next call | ||
163 | * to this function. | ||
164 | */ | 111 | */ |
165 | void *edac_align_ptr(void **p, unsigned size, int n_elems) | 112 | void *edac_align_ptr(void *ptr, unsigned size) |
166 | { | 113 | { |
167 | unsigned align, r; | 114 | unsigned align, r; |
168 | void *ptr = *p; | ||
169 | 115 | ||
170 | *p += size * n_elems; | 116 | /* Here we assume that the alignment of a "long long" is the most |
171 | |||
172 | /* | ||
173 | * 'p' can possibly be an unaligned item X such that sizeof(X) is | ||
174 | * 'size'. Adjust 'p' so that its alignment is at least as | ||
175 | * stringent as what the compiler would provide for X and return | ||
176 | * the aligned result. | ||
177 | * Here we assume that the alignment of a "long long" is the most | ||
178 | * stringent alignment that the compiler will ever provide by default. | 117 | * stringent alignment that the compiler will ever provide by default. |
179 | * As far as I know, this is a reasonable assumption. | 118 | * As far as I know, this is a reasonable assumption. |
180 | */ | 119 | */ |
@@ -189,53 +128,19 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems) | |||
189 | else | 128 | else |
190 | return (char *)ptr; | 129 | return (char *)ptr; |
191 | 130 | ||
192 | r = (unsigned long)p % align; | 131 | r = size % align; |
193 | 132 | ||
194 | if (r == 0) | 133 | if (r == 0) |
195 | return (char *)ptr; | 134 | return (char *)ptr; |
196 | 135 | ||
197 | *p += align - r; | ||
198 | |||
199 | return (void *)(((unsigned long)ptr) + align - r); | 136 | return (void *)(((unsigned long)ptr) + align - r); |
200 | } | 137 | } |
201 | 138 | ||
202 | static void _edac_mc_free(struct mem_ctl_info *mci) | ||
203 | { | ||
204 | int i, chn, row; | ||
205 | struct csrow_info *csr; | ||
206 | const unsigned int tot_dimms = mci->tot_dimms; | ||
207 | const unsigned int tot_channels = mci->num_cschannel; | ||
208 | const unsigned int tot_csrows = mci->nr_csrows; | ||
209 | |||
210 | if (mci->dimms) { | ||
211 | for (i = 0; i < tot_dimms; i++) | ||
212 | kfree(mci->dimms[i]); | ||
213 | kfree(mci->dimms); | ||
214 | } | ||
215 | if (mci->csrows) { | ||
216 | for (row = 0; row < tot_csrows; row++) { | ||
217 | csr = mci->csrows[row]; | ||
218 | if (csr) { | ||
219 | if (csr->channels) { | ||
220 | for (chn = 0; chn < tot_channels; chn++) | ||
221 | kfree(csr->channels[chn]); | ||
222 | kfree(csr->channels); | ||
223 | } | ||
224 | kfree(csr); | ||
225 | } | ||
226 | } | ||
227 | kfree(mci->csrows); | ||
228 | } | ||
229 | kfree(mci); | ||
230 | } | ||
231 | |||
232 | /** | 139 | /** |
233 | * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure | 140 | * edac_mc_alloc: Allocate a struct mem_ctl_info structure |
234 | * @mc_num: Memory controller number | 141 | * @size_pvt: size of private storage needed |
235 | * @n_layers: Number of MC hierarchy layers | 142 | * @nr_csrows: Number of CWROWS needed for this MC |
236 | * layers: Describes each layer as seen by the Memory Controller | 143 | * @nr_chans: Number of channels for the MC |
237 | * @size_pvt: size of private storage needed | ||
238 | * | ||
239 | * | 144 | * |
240 | * Everything is kmalloc'ed as one big chunk - more efficient. | 145 | * Everything is kmalloc'ed as one big chunk - more efficient. |
241 | * Only can be used if all structures have the same lifetime - otherwise | 146 | * Only can be used if all structures have the same lifetime - otherwise |
@@ -243,75 +148,32 @@ static void _edac_mc_free(struct mem_ctl_info *mci) | |||
243 | * | 148 | * |
244 | * Use edac_mc_free() to free mc structures allocated by this function. | 149 | * Use edac_mc_free() to free mc structures allocated by this function. |
245 | * | 150 | * |
246 | * NOTE: drivers handle multi-rank memories in different ways: in some | ||
247 | * drivers, one multi-rank memory stick is mapped as one entry, while, in | ||
248 | * others, a single multi-rank memory stick would be mapped into several | ||
249 | * entries. Currently, this function will allocate multiple struct dimm_info | ||
250 | * on such scenarios, as grouping the multiple ranks require drivers change. | ||
251 | * | ||
252 | * Returns: | 151 | * Returns: |
253 | * On failure: NULL | 152 | * NULL allocation failed |
254 | * On success: struct mem_ctl_info pointer | 153 | * struct mem_ctl_info pointer |
255 | */ | 154 | */ |
256 | struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, | 155 | struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, |
257 | unsigned n_layers, | 156 | unsigned nr_chans, int edac_index) |
258 | struct edac_mc_layer *layers, | ||
259 | unsigned sz_pvt) | ||
260 | { | 157 | { |
261 | struct mem_ctl_info *mci; | 158 | struct mem_ctl_info *mci; |
262 | struct edac_mc_layer *layer; | 159 | struct csrow_info *csi, *csrow; |
263 | struct csrow_info *csr; | 160 | struct channel_info *chi, *chp, *chan; |
264 | struct rank_info *chan; | 161 | void *pvt; |
265 | struct dimm_info *dimm; | 162 | unsigned size; |
266 | u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; | 163 | int row, chn; |
267 | unsigned pos[EDAC_MAX_LAYERS]; | 164 | int err; |
268 | unsigned size, tot_dimms = 1, count = 1; | ||
269 | unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; | ||
270 | void *pvt, *p, *ptr = NULL; | ||
271 | int i, j, row, chn, n, len, off; | ||
272 | bool per_rank = false; | ||
273 | |||
274 | BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); | ||
275 | /* | ||
276 | * Calculate the total amount of dimms and csrows/cschannels while | ||
277 | * in the old API emulation mode | ||
278 | */ | ||
279 | for (i = 0; i < n_layers; i++) { | ||
280 | tot_dimms *= layers[i].size; | ||
281 | if (layers[i].is_virt_csrow) | ||
282 | tot_csrows *= layers[i].size; | ||
283 | else | ||
284 | tot_channels *= layers[i].size; | ||
285 | |||
286 | if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT) | ||
287 | per_rank = true; | ||
288 | } | ||
289 | 165 | ||
290 | /* Figure out the offsets of the various items from the start of an mc | 166 | /* Figure out the offsets of the various items from the start of an mc |
291 | * structure. We want the alignment of each item to be at least as | 167 | * structure. We want the alignment of each item to be at least as |
292 | * stringent as what the compiler would provide if we could simply | 168 | * stringent as what the compiler would provide if we could simply |
293 | * hardcode everything into a single struct. | 169 | * hardcode everything into a single struct. |
294 | */ | 170 | */ |
295 | mci = edac_align_ptr(&ptr, sizeof(*mci), 1); | 171 | mci = (struct mem_ctl_info *)0; |
296 | layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); | 172 | csi = edac_align_ptr(&mci[1], sizeof(*csi)); |
297 | for (i = 0; i < n_layers; i++) { | 173 | chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi)); |
298 | count *= layers[i].size; | 174 | pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt); |
299 | edac_dbg(4, "errcount layer %d size %d\n", i, count); | ||
300 | ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); | ||
301 | ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); | ||
302 | tot_errcount += 2 * count; | ||
303 | } | ||
304 | |||
305 | edac_dbg(4, "allocating %d error counters\n", tot_errcount); | ||
306 | pvt = edac_align_ptr(&ptr, sz_pvt, 1); | ||
307 | size = ((unsigned long)pvt) + sz_pvt; | 175 | size = ((unsigned long)pvt) + sz_pvt; |
308 | 176 | ||
309 | edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", | ||
310 | size, | ||
311 | tot_dimms, | ||
312 | per_rank ? "ranks" : "dimms", | ||
313 | tot_csrows * tot_channels); | ||
314 | |||
315 | mci = kzalloc(size, GFP_KERNEL); | 177 | mci = kzalloc(size, GFP_KERNEL); |
316 | if (mci == NULL) | 178 | if (mci == NULL) |
317 | return NULL; | 179 | return NULL; |
@@ -319,141 +181,50 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, | |||
319 | /* Adjust pointers so they point within the memory we just allocated | 181 | /* Adjust pointers so they point within the memory we just allocated |
320 | * rather than an imaginary chunk of memory located at address 0. | 182 | * rather than an imaginary chunk of memory located at address 0. |
321 | */ | 183 | */ |
322 | layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); | 184 | csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi)); |
323 | for (i = 0; i < n_layers; i++) { | 185 | chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi)); |
324 | mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); | ||
325 | mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); | ||
326 | } | ||
327 | pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; | 186 | pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; |
328 | 187 | ||
329 | /* setup index and various internal pointers */ | 188 | /* setup index and various internal pointers */ |
330 | mci->mc_idx = mc_num; | 189 | mci->mc_idx = edac_index; |
331 | mci->tot_dimms = tot_dimms; | 190 | mci->csrows = csi; |
332 | mci->pvt_info = pvt; | 191 | mci->pvt_info = pvt; |
333 | mci->n_layers = n_layers; | 192 | mci->nr_csrows = nr_csrows; |
334 | mci->layers = layer; | 193 | |
335 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); | 194 | for (row = 0; row < nr_csrows; row++) { |
336 | mci->nr_csrows = tot_csrows; | 195 | csrow = &csi[row]; |
337 | mci->num_cschannel = tot_channels; | 196 | csrow->csrow_idx = row; |
338 | mci->mem_is_per_rank = per_rank; | 197 | csrow->mci = mci; |
339 | 198 | csrow->nr_channels = nr_chans; | |
340 | /* | 199 | chp = &chi[row * nr_chans]; |
341 | * Alocate and fill the csrow/channels structs | 200 | csrow->channels = chp; |
342 | */ | 201 | |
343 | mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL); | 202 | for (chn = 0; chn < nr_chans; chn++) { |
344 | if (!mci->csrows) | 203 | chan = &chp[chn]; |
345 | goto error; | ||
346 | for (row = 0; row < tot_csrows; row++) { | ||
347 | csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); | ||
348 | if (!csr) | ||
349 | goto error; | ||
350 | mci->csrows[row] = csr; | ||
351 | csr->csrow_idx = row; | ||
352 | csr->mci = mci; | ||
353 | csr->nr_channels = tot_channels; | ||
354 | csr->channels = kcalloc(sizeof(*csr->channels), tot_channels, | ||
355 | GFP_KERNEL); | ||
356 | if (!csr->channels) | ||
357 | goto error; | ||
358 | |||
359 | for (chn = 0; chn < tot_channels; chn++) { | ||
360 | chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); | ||
361 | if (!chan) | ||
362 | goto error; | ||
363 | csr->channels[chn] = chan; | ||
364 | chan->chan_idx = chn; | 204 | chan->chan_idx = chn; |
365 | chan->csrow = csr; | 205 | chan->csrow = csrow; |
366 | } | 206 | } |
367 | } | 207 | } |
368 | 208 | ||
209 | mci->op_state = OP_ALLOC; | ||
210 | INIT_LIST_HEAD(&mci->grp_kobj_list); | ||
211 | |||
369 | /* | 212 | /* |
370 | * Allocate and fill the dimm structs | 213 | * Initialize the 'root' kobj for the edac_mc controller |
371 | */ | 214 | */ |
372 | mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL); | 215 | err = edac_mc_register_sysfs_main_kobj(mci); |
373 | if (!mci->dimms) | 216 | if (err) { |
374 | goto error; | 217 | kfree(mci); |
375 | 218 | return NULL; | |
376 | memset(&pos, 0, sizeof(pos)); | ||
377 | row = 0; | ||
378 | chn = 0; | ||
379 | for (i = 0; i < tot_dimms; i++) { | ||
380 | chan = mci->csrows[row]->channels[chn]; | ||
381 | off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]); | ||
382 | if (off < 0 || off >= tot_dimms) { | ||
383 | edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n"); | ||
384 | goto error; | ||
385 | } | ||
386 | |||
387 | dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); | ||
388 | if (!dimm) | ||
389 | goto error; | ||
390 | mci->dimms[off] = dimm; | ||
391 | dimm->mci = mci; | ||
392 | |||
393 | /* | ||
394 | * Copy DIMM location and initialize it. | ||
395 | */ | ||
396 | len = sizeof(dimm->label); | ||
397 | p = dimm->label; | ||
398 | n = snprintf(p, len, "mc#%u", mc_num); | ||
399 | p += n; | ||
400 | len -= n; | ||
401 | for (j = 0; j < n_layers; j++) { | ||
402 | n = snprintf(p, len, "%s#%u", | ||
403 | edac_layer_name[layers[j].type], | ||
404 | pos[j]); | ||
405 | p += n; | ||
406 | len -= n; | ||
407 | dimm->location[j] = pos[j]; | ||
408 | |||
409 | if (len <= 0) | ||
410 | break; | ||
411 | } | ||
412 | |||
413 | /* Link it to the csrows old API data */ | ||
414 | chan->dimm = dimm; | ||
415 | dimm->csrow = row; | ||
416 | dimm->cschannel = chn; | ||
417 | |||
418 | /* Increment csrow location */ | ||
419 | if (layers[0].is_virt_csrow) { | ||
420 | chn++; | ||
421 | if (chn == tot_channels) { | ||
422 | chn = 0; | ||
423 | row++; | ||
424 | } | ||
425 | } else { | ||
426 | row++; | ||
427 | if (row == tot_csrows) { | ||
428 | row = 0; | ||
429 | chn++; | ||
430 | } | ||
431 | } | ||
432 | |||
433 | /* Increment dimm location */ | ||
434 | for (j = n_layers - 1; j >= 0; j--) { | ||
435 | pos[j]++; | ||
436 | if (pos[j] < layers[j].size) | ||
437 | break; | ||
438 | pos[j] = 0; | ||
439 | } | ||
440 | } | 219 | } |
441 | 220 | ||
442 | mci->op_state = OP_ALLOC; | ||
443 | |||
444 | /* at this point, the root kobj is valid, and in order to | 221 | /* at this point, the root kobj is valid, and in order to |
445 | * 'free' the object, then the function: | 222 | * 'free' the object, then the function: |
446 | * edac_mc_unregister_sysfs_main_kobj() must be called | 223 | * edac_mc_unregister_sysfs_main_kobj() must be called |
447 | * which will perform kobj unregistration and the actual free | 224 | * which will perform kobj unregistration and the actual free |
448 | * will occur during the kobject callback operation | 225 | * will occur during the kobject callback operation |
449 | */ | 226 | */ |
450 | |||
451 | return mci; | 227 | return mci; |
452 | |||
453 | error: | ||
454 | _edac_mc_free(mci); | ||
455 | |||
456 | return NULL; | ||
457 | } | 228 | } |
458 | EXPORT_SYMBOL_GPL(edac_mc_alloc); | 229 | EXPORT_SYMBOL_GPL(edac_mc_alloc); |
459 | 230 | ||
@@ -464,18 +235,12 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc); | |||
464 | */ | 235 | */ |
465 | void edac_mc_free(struct mem_ctl_info *mci) | 236 | void edac_mc_free(struct mem_ctl_info *mci) |
466 | { | 237 | { |
467 | edac_dbg(1, "\n"); | 238 | debugf1("%s()\n", __func__); |
468 | 239 | ||
469 | /* If we're not yet registered with sysfs free only what was allocated | 240 | edac_mc_unregister_sysfs_main_kobj(mci); |
470 | * in edac_mc_alloc(). | ||
471 | */ | ||
472 | if (!device_is_registered(&mci->dev)) { | ||
473 | _edac_mc_free(mci); | ||
474 | return; | ||
475 | } | ||
476 | 241 | ||
477 | /* the mci instance is freed here, when the sysfs object is dropped */ | 242 | /* free the mci instance memory here */ |
478 | edac_unregister_sysfs(mci); | 243 | kfree(mci); |
479 | } | 244 | } |
480 | EXPORT_SYMBOL_GPL(edac_mc_free); | 245 | EXPORT_SYMBOL_GPL(edac_mc_free); |
481 | 246 | ||
@@ -492,12 +257,12 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev) | |||
492 | struct mem_ctl_info *mci; | 257 | struct mem_ctl_info *mci; |
493 | struct list_head *item; | 258 | struct list_head *item; |
494 | 259 | ||
495 | edac_dbg(3, "\n"); | 260 | debugf3("%s()\n", __func__); |
496 | 261 | ||
497 | list_for_each(item, &mc_devices) { | 262 | list_for_each(item, &mc_devices) { |
498 | mci = list_entry(item, struct mem_ctl_info, link); | 263 | mci = list_entry(item, struct mem_ctl_info, link); |
499 | 264 | ||
500 | if (mci->pdev == dev) | 265 | if (mci->dev == dev) |
501 | return mci; | 266 | return mci; |
502 | } | 267 | } |
503 | 268 | ||
@@ -560,14 +325,14 @@ static void edac_mc_workq_function(struct work_struct *work_req) | |||
560 | */ | 325 | */ |
561 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) | 326 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) |
562 | { | 327 | { |
563 | edac_dbg(0, "\n"); | 328 | debugf0("%s()\n", __func__); |
564 | 329 | ||
565 | /* if this instance is not in the POLL state, then simply return */ | 330 | /* if this instance is not in the POLL state, then simply return */ |
566 | if (mci->op_state != OP_RUNNING_POLL) | 331 | if (mci->op_state != OP_RUNNING_POLL) |
567 | return; | 332 | return; |
568 | 333 | ||
569 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); | 334 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); |
570 | mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); | 335 | queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); |
571 | } | 336 | } |
572 | 337 | ||
573 | /* | 338 | /* |
@@ -587,7 +352,8 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) | |||
587 | 352 | ||
588 | status = cancel_delayed_work(&mci->work); | 353 | status = cancel_delayed_work(&mci->work); |
589 | if (status == 0) { | 354 | if (status == 0) { |
590 | edac_dbg(0, "not canceled, flush the queue\n"); | 355 | debugf0("%s() not canceled, flush the queue\n", |
356 | __func__); | ||
591 | 357 | ||
592 | /* workq instance might be running, wait for it */ | 358 | /* workq instance might be running, wait for it */ |
593 | flush_workqueue(edac_workqueue); | 359 | flush_workqueue(edac_workqueue); |
@@ -607,6 +373,21 @@ void edac_mc_reset_delay_period(int value) | |||
607 | 373 | ||
608 | mutex_lock(&mem_ctls_mutex); | 374 | mutex_lock(&mem_ctls_mutex); |
609 | 375 | ||
376 | /* scan the list and turn off all workq timers, doing so under lock | ||
377 | */ | ||
378 | list_for_each(item, &mc_devices) { | ||
379 | mci = list_entry(item, struct mem_ctl_info, link); | ||
380 | |||
381 | if (mci->op_state == OP_RUNNING_POLL) | ||
382 | cancel_delayed_work(&mci->work); | ||
383 | } | ||
384 | |||
385 | mutex_unlock(&mem_ctls_mutex); | ||
386 | |||
387 | |||
388 | /* re-walk the list, and reset the poll delay */ | ||
389 | mutex_lock(&mem_ctls_mutex); | ||
390 | |||
610 | list_for_each(item, &mc_devices) { | 391 | list_for_each(item, &mc_devices) { |
611 | mci = list_entry(item, struct mem_ctl_info, link); | 392 | mci = list_entry(item, struct mem_ctl_info, link); |
612 | 393 | ||
@@ -633,7 +414,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci) | |||
633 | 414 | ||
634 | insert_before = &mc_devices; | 415 | insert_before = &mc_devices; |
635 | 416 | ||
636 | p = find_mci_by_dev(mci->pdev); | 417 | p = find_mci_by_dev(mci->dev); |
637 | if (unlikely(p != NULL)) | 418 | if (unlikely(p != NULL)) |
638 | goto fail0; | 419 | goto fail0; |
639 | 420 | ||
@@ -655,7 +436,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci) | |||
655 | 436 | ||
656 | fail0: | 437 | fail0: |
657 | edac_printk(KERN_WARNING, EDAC_MC, | 438 | edac_printk(KERN_WARNING, EDAC_MC, |
658 | "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), | 439 | "%s (%s) %s %s already assigned %d\n", dev_name(p->dev), |
659 | edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); | 440 | edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); |
660 | return 1; | 441 | return 1; |
661 | 442 | ||
@@ -710,6 +491,7 @@ EXPORT_SYMBOL(edac_mc_find); | |||
710 | * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and | 491 | * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and |
711 | * create sysfs entries associated with mci structure | 492 | * create sysfs entries associated with mci structure |
712 | * @mci: pointer to the mci structure to be added to the list | 493 | * @mci: pointer to the mci structure to be added to the list |
494 | * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure. | ||
713 | * | 495 | * |
714 | * Return: | 496 | * Return: |
715 | * 0 Success | 497 | * 0 Success |
@@ -719,7 +501,7 @@ EXPORT_SYMBOL(edac_mc_find); | |||
719 | /* FIXME - should a warning be printed if no error detection? correction? */ | 501 | /* FIXME - should a warning be printed if no error detection? correction? */ |
720 | int edac_mc_add_mc(struct mem_ctl_info *mci) | 502 | int edac_mc_add_mc(struct mem_ctl_info *mci) |
721 | { | 503 | { |
722 | edac_dbg(0, "\n"); | 504 | debugf0("%s()\n", __func__); |
723 | 505 | ||
724 | #ifdef CONFIG_EDAC_DEBUG | 506 | #ifdef CONFIG_EDAC_DEBUG |
725 | if (edac_debug_level >= 3) | 507 | if (edac_debug_level >= 3) |
@@ -729,22 +511,13 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) | |||
729 | int i; | 511 | int i; |
730 | 512 | ||
731 | for (i = 0; i < mci->nr_csrows; i++) { | 513 | for (i = 0; i < mci->nr_csrows; i++) { |
732 | struct csrow_info *csrow = mci->csrows[i]; | ||
733 | u32 nr_pages = 0; | ||
734 | int j; | 514 | int j; |
735 | 515 | ||
736 | for (j = 0; j < csrow->nr_channels; j++) | 516 | edac_mc_dump_csrow(&mci->csrows[i]); |
737 | nr_pages += csrow->channels[j]->dimm->nr_pages; | 517 | for (j = 0; j < mci->csrows[i].nr_channels; j++) |
738 | if (!nr_pages) | 518 | edac_mc_dump_channel(&mci->csrows[i]. |
739 | continue; | 519 | channels[j]); |
740 | edac_mc_dump_csrow(csrow); | ||
741 | for (j = 0; j < csrow->nr_channels; j++) | ||
742 | if (csrow->channels[j]->dimm->nr_pages) | ||
743 | edac_mc_dump_channel(csrow->channels[j]); | ||
744 | } | 520 | } |
745 | for (i = 0; i < mci->tot_dimms; i++) | ||
746 | if (mci->dimms[i]->nr_pages) | ||
747 | edac_mc_dump_dimm(mci->dimms[i], i); | ||
748 | } | 521 | } |
749 | #endif | 522 | #endif |
750 | mutex_lock(&mem_ctls_mutex); | 523 | mutex_lock(&mem_ctls_mutex); |
@@ -798,7 +571,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) | |||
798 | { | 571 | { |
799 | struct mem_ctl_info *mci; | 572 | struct mem_ctl_info *mci; |
800 | 573 | ||
801 | edac_dbg(0, "\n"); | 574 | debugf0("%s()\n", __func__); |
802 | 575 | ||
803 | mutex_lock(&mem_ctls_mutex); | 576 | mutex_lock(&mem_ctls_mutex); |
804 | 577 | ||
@@ -836,7 +609,7 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset, | |||
836 | void *virt_addr; | 609 | void *virt_addr; |
837 | unsigned long flags = 0; | 610 | unsigned long flags = 0; |
838 | 611 | ||
839 | edac_dbg(3, "\n"); | 612 | debugf3("%s()\n", __func__); |
840 | 613 | ||
841 | /* ECC error page was not in our memory. Ignore it. */ | 614 | /* ECC error page was not in our memory. Ignore it. */ |
842 | if (!pfn_valid(page)) | 615 | if (!pfn_valid(page)) |
@@ -848,13 +621,13 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset, | |||
848 | if (PageHighMem(pg)) | 621 | if (PageHighMem(pg)) |
849 | local_irq_save(flags); | 622 | local_irq_save(flags); |
850 | 623 | ||
851 | virt_addr = kmap_atomic(pg); | 624 | virt_addr = kmap_atomic(pg, KM_BOUNCE_READ); |
852 | 625 | ||
853 | /* Perform architecture specific atomic scrub operation */ | 626 | /* Perform architecture specific atomic scrub operation */ |
854 | atomic_scrub(virt_addr + offset, size); | 627 | atomic_scrub(virt_addr + offset, size); |
855 | 628 | ||
856 | /* Unmap and complete */ | 629 | /* Unmap and complete */ |
857 | kunmap_atomic(virt_addr); | 630 | kunmap_atomic(virt_addr, KM_BOUNCE_READ); |
858 | 631 | ||
859 | if (PageHighMem(pg)) | 632 | if (PageHighMem(pg)) |
860 | local_irq_restore(flags); | 633 | local_irq_restore(flags); |
@@ -863,26 +636,22 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset, | |||
863 | /* FIXME - should return -1 */ | 636 | /* FIXME - should return -1 */ |
864 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) | 637 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) |
865 | { | 638 | { |
866 | struct csrow_info **csrows = mci->csrows; | 639 | struct csrow_info *csrows = mci->csrows; |
867 | int row, i, j, n; | 640 | int row, i; |
868 | 641 | ||
869 | edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); | 642 | debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); |
870 | row = -1; | 643 | row = -1; |
871 | 644 | ||
872 | for (i = 0; i < mci->nr_csrows; i++) { | 645 | for (i = 0; i < mci->nr_csrows; i++) { |
873 | struct csrow_info *csrow = csrows[i]; | 646 | struct csrow_info *csrow = &csrows[i]; |
874 | n = 0; | 647 | |
875 | for (j = 0; j < csrow->nr_channels; j++) { | 648 | if (csrow->nr_pages == 0) |
876 | struct dimm_info *dimm = csrow->channels[j]->dimm; | ||
877 | n += dimm->nr_pages; | ||
878 | } | ||
879 | if (n == 0) | ||
880 | continue; | 649 | continue; |
881 | 650 | ||
882 | edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n", | 651 | debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " |
883 | mci->mc_idx, | 652 | "mask(0x%lx)\n", mci->mc_idx, __func__, |
884 | csrow->first_page, page, csrow->last_page, | 653 | csrow->first_page, page, csrow->last_page, |
885 | csrow->page_mask); | 654 | csrow->page_mask); |
886 | 655 | ||
887 | if ((page >= csrow->first_page) && | 656 | if ((page >= csrow->first_page) && |
888 | (page <= csrow->last_page) && | 657 | (page <= csrow->last_page) && |
@@ -902,345 +671,249 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) | |||
902 | } | 671 | } |
903 | EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); | 672 | EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); |
904 | 673 | ||
905 | const char *edac_layer_name[] = { | 674 | /* FIXME - setable log (warning/emerg) levels */ |
906 | [EDAC_MC_LAYER_BRANCH] = "branch", | 675 | /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ |
907 | [EDAC_MC_LAYER_CHANNEL] = "channel", | 676 | void edac_mc_handle_ce(struct mem_ctl_info *mci, |
908 | [EDAC_MC_LAYER_SLOT] = "slot", | 677 | unsigned long page_frame_number, |
909 | [EDAC_MC_LAYER_CHIP_SELECT] = "csrow", | 678 | unsigned long offset_in_page, unsigned long syndrome, |
910 | }; | 679 | int row, int channel, const char *msg) |
911 | EXPORT_SYMBOL_GPL(edac_layer_name); | ||
912 | |||
913 | static void edac_inc_ce_error(struct mem_ctl_info *mci, | ||
914 | bool enable_per_layer_report, | ||
915 | const int pos[EDAC_MAX_LAYERS], | ||
916 | const u16 count) | ||
917 | { | 680 | { |
918 | int i, index = 0; | 681 | unsigned long remapped_page; |
919 | 682 | ||
920 | mci->ce_mc += count; | 683 | debugf3("MC%d: %s()\n", mci->mc_idx, __func__); |
921 | 684 | ||
922 | if (!enable_per_layer_report) { | 685 | /* FIXME - maybe make panic on INTERNAL ERROR an option */ |
923 | mci->ce_noinfo_count += count; | 686 | if (row >= mci->nr_csrows || row < 0) { |
687 | /* something is wrong */ | ||
688 | edac_mc_printk(mci, KERN_ERR, | ||
689 | "INTERNAL ERROR: row out of range " | ||
690 | "(%d >= %d)\n", row, mci->nr_csrows); | ||
691 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); | ||
924 | return; | 692 | return; |
925 | } | 693 | } |
926 | 694 | ||
927 | for (i = 0; i < mci->n_layers; i++) { | 695 | if (channel >= mci->csrows[row].nr_channels || channel < 0) { |
928 | if (pos[i] < 0) | 696 | /* something is wrong */ |
929 | break; | 697 | edac_mc_printk(mci, KERN_ERR, |
930 | index += pos[i]; | 698 | "INTERNAL ERROR: channel out of range " |
931 | mci->ce_per_layer[i][index] += count; | 699 | "(%d >= %d)\n", channel, |
932 | 700 | mci->csrows[row].nr_channels); | |
933 | if (i < mci->n_layers - 1) | 701 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); |
934 | index *= mci->layers[i + 1].size; | ||
935 | } | ||
936 | } | ||
937 | |||
938 | static void edac_inc_ue_error(struct mem_ctl_info *mci, | ||
939 | bool enable_per_layer_report, | ||
940 | const int pos[EDAC_MAX_LAYERS], | ||
941 | const u16 count) | ||
942 | { | ||
943 | int i, index = 0; | ||
944 | |||
945 | mci->ue_mc += count; | ||
946 | |||
947 | if (!enable_per_layer_report) { | ||
948 | mci->ce_noinfo_count += count; | ||
949 | return; | 702 | return; |
950 | } | 703 | } |
951 | 704 | ||
952 | for (i = 0; i < mci->n_layers; i++) { | 705 | if (edac_mc_get_log_ce()) |
953 | if (pos[i] < 0) | 706 | /* FIXME - put in DIMM location */ |
954 | break; | 707 | edac_mc_printk(mci, KERN_WARNING, |
955 | index += pos[i]; | 708 | "CE page 0x%lx, offset 0x%lx, grain %d, syndrome " |
956 | mci->ue_per_layer[i][index] += count; | 709 | "0x%lx, row %d, channel %d, label \"%s\": %s\n", |
710 | page_frame_number, offset_in_page, | ||
711 | mci->csrows[row].grain, syndrome, row, channel, | ||
712 | mci->csrows[row].channels[channel].label, msg); | ||
957 | 713 | ||
958 | if (i < mci->n_layers - 1) | 714 | mci->ce_count++; |
959 | index *= mci->layers[i + 1].size; | 715 | mci->csrows[row].ce_count++; |
960 | } | 716 | mci->csrows[row].channels[channel].ce_count++; |
961 | } | ||
962 | |||
963 | static void edac_ce_error(struct mem_ctl_info *mci, | ||
964 | const u16 error_count, | ||
965 | const int pos[EDAC_MAX_LAYERS], | ||
966 | const char *msg, | ||
967 | const char *location, | ||
968 | const char *label, | ||
969 | const char *detail, | ||
970 | const char *other_detail, | ||
971 | const bool enable_per_layer_report, | ||
972 | const unsigned long page_frame_number, | ||
973 | const unsigned long offset_in_page, | ||
974 | long grain) | ||
975 | { | ||
976 | unsigned long remapped_page; | ||
977 | char *msg_aux = ""; | ||
978 | |||
979 | if (*msg) | ||
980 | msg_aux = " "; | ||
981 | |||
982 | if (edac_mc_get_log_ce()) { | ||
983 | if (other_detail && *other_detail) | ||
984 | edac_mc_printk(mci, KERN_WARNING, | ||
985 | "%d CE %s%son %s (%s %s - %s)\n", | ||
986 | error_count, msg, msg_aux, label, | ||
987 | location, detail, other_detail); | ||
988 | else | ||
989 | edac_mc_printk(mci, KERN_WARNING, | ||
990 | "%d CE %s%son %s (%s %s)\n", | ||
991 | error_count, msg, msg_aux, label, | ||
992 | location, detail); | ||
993 | } | ||
994 | edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); | ||
995 | 717 | ||
996 | if (mci->scrub_mode & SCRUB_SW_SRC) { | 718 | if (mci->scrub_mode & SCRUB_SW_SRC) { |
997 | /* | 719 | /* |
998 | * Some memory controllers (called MCs below) can remap | 720 | * Some MC's can remap memory so that it is still available |
999 | * memory so that it is still available at a different | 721 | * at a different address when PCI devices map into memory. |
1000 | * address when PCI devices map into memory. | 722 | * MC's that can't do this lose the memory where PCI devices |
1001 | * MC's that can't do this, lose the memory where PCI | 723 | * are mapped. This mapping is MC dependent and so we call |
1002 | * devices are mapped. This mapping is MC-dependent | 724 | * back into the MC driver for it to map the MC page to |
1003 | * and so we call back into the MC driver for it to | 725 | * a physical (CPU) page which can then be mapped to a virtual |
1004 | * map the MC page to a physical (CPU) page which can | 726 | * page - which can then be scrubbed. |
1005 | * then be mapped to a virtual page - which can then | 727 | */ |
1006 | * be scrubbed. | ||
1007 | */ | ||
1008 | remapped_page = mci->ctl_page_to_phys ? | 728 | remapped_page = mci->ctl_page_to_phys ? |
1009 | mci->ctl_page_to_phys(mci, page_frame_number) : | 729 | mci->ctl_page_to_phys(mci, page_frame_number) : |
1010 | page_frame_number; | 730 | page_frame_number; |
1011 | 731 | ||
1012 | edac_mc_scrub_block(remapped_page, | 732 | edac_mc_scrub_block(remapped_page, offset_in_page, |
1013 | offset_in_page, grain); | 733 | mci->csrows[row].grain); |
1014 | } | 734 | } |
1015 | } | 735 | } |
736 | EXPORT_SYMBOL_GPL(edac_mc_handle_ce); | ||
1016 | 737 | ||
1017 | static void edac_ue_error(struct mem_ctl_info *mci, | 738 | void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) |
1018 | const u16 error_count, | ||
1019 | const int pos[EDAC_MAX_LAYERS], | ||
1020 | const char *msg, | ||
1021 | const char *location, | ||
1022 | const char *label, | ||
1023 | const char *detail, | ||
1024 | const char *other_detail, | ||
1025 | const bool enable_per_layer_report) | ||
1026 | { | 739 | { |
1027 | char *msg_aux = ""; | 740 | if (edac_mc_get_log_ce()) |
1028 | 741 | edac_mc_printk(mci, KERN_WARNING, | |
1029 | if (*msg) | 742 | "CE - no information available: %s\n", msg); |
1030 | msg_aux = " "; | 743 | |
1031 | 744 | mci->ce_noinfo_count++; | |
1032 | if (edac_mc_get_log_ue()) { | 745 | mci->ce_count++; |
1033 | if (other_detail && *other_detail) | 746 | } |
1034 | edac_mc_printk(mci, KERN_WARNING, | 747 | EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info); |
1035 | "%d UE %s%son %s (%s %s - %s)\n", | 748 | |
1036 | error_count, msg, msg_aux, label, | 749 | void edac_mc_handle_ue(struct mem_ctl_info *mci, |
1037 | location, detail, other_detail); | 750 | unsigned long page_frame_number, |
1038 | else | 751 | unsigned long offset_in_page, int row, const char *msg) |
1039 | edac_mc_printk(mci, KERN_WARNING, | 752 | { |
1040 | "%d UE %s%son %s (%s %s)\n", | 753 | int len = EDAC_MC_LABEL_LEN * 4; |
1041 | error_count, msg, msg_aux, label, | 754 | char labels[len + 1]; |
1042 | location, detail); | 755 | char *pos = labels; |
756 | int chan; | ||
757 | int chars; | ||
758 | |||
759 | debugf3("MC%d: %s()\n", mci->mc_idx, __func__); | ||
760 | |||
761 | /* FIXME - maybe make panic on INTERNAL ERROR an option */ | ||
762 | if (row >= mci->nr_csrows || row < 0) { | ||
763 | /* something is wrong */ | ||
764 | edac_mc_printk(mci, KERN_ERR, | ||
765 | "INTERNAL ERROR: row out of range " | ||
766 | "(%d >= %d)\n", row, mci->nr_csrows); | ||
767 | edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); | ||
768 | return; | ||
1043 | } | 769 | } |
1044 | 770 | ||
1045 | if (edac_mc_get_panic_on_ue()) { | 771 | chars = snprintf(pos, len + 1, "%s", |
1046 | if (other_detail && *other_detail) | 772 | mci->csrows[row].channels[0].label); |
1047 | panic("UE %s%son %s (%s%s - %s)\n", | 773 | len -= chars; |
1048 | msg, msg_aux, label, location, detail, other_detail); | 774 | pos += chars; |
1049 | else | 775 | |
1050 | panic("UE %s%son %s (%s%s)\n", | 776 | for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); |
1051 | msg, msg_aux, label, location, detail); | 777 | chan++) { |
778 | chars = snprintf(pos, len + 1, ":%s", | ||
779 | mci->csrows[row].channels[chan].label); | ||
780 | len -= chars; | ||
781 | pos += chars; | ||
1052 | } | 782 | } |
1053 | 783 | ||
1054 | edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); | 784 | if (edac_mc_get_log_ue()) |
785 | edac_mc_printk(mci, KERN_EMERG, | ||
786 | "UE page 0x%lx, offset 0x%lx, grain %d, row %d, " | ||
787 | "labels \"%s\": %s\n", page_frame_number, | ||
788 | offset_in_page, mci->csrows[row].grain, row, | ||
789 | labels, msg); | ||
790 | |||
791 | if (edac_mc_get_panic_on_ue()) | ||
792 | panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, " | ||
793 | "row %d, labels \"%s\": %s\n", mci->mc_idx, | ||
794 | page_frame_number, offset_in_page, | ||
795 | mci->csrows[row].grain, row, labels, msg); | ||
796 | |||
797 | mci->ue_count++; | ||
798 | mci->csrows[row].ue_count++; | ||
1055 | } | 799 | } |
800 | EXPORT_SYMBOL_GPL(edac_mc_handle_ue); | ||
1056 | 801 | ||
1057 | #define OTHER_LABEL " or " | 802 | void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) |
803 | { | ||
804 | if (edac_mc_get_panic_on_ue()) | ||
805 | panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); | ||
1058 | 806 | ||
1059 | /** | 807 | if (edac_mc_get_log_ue()) |
1060 | * edac_mc_handle_error - reports a memory event to userspace | 808 | edac_mc_printk(mci, KERN_WARNING, |
1061 | * | 809 | "UE - no information available: %s\n", msg); |
1062 | * @type: severity of the error (CE/UE/Fatal) | 810 | mci->ue_noinfo_count++; |
1063 | * @mci: a struct mem_ctl_info pointer | 811 | mci->ue_count++; |
1064 | * @error_count: Number of errors of the same type | 812 | } |
1065 | * @page_frame_number: mem page where the error occurred | 813 | EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info); |
1066 | * @offset_in_page: offset of the error inside the page | 814 | |
1067 | * @syndrome: ECC syndrome | 815 | /************************************************************* |
1068 | * @top_layer: Memory layer[0] position | 816 | * On Fully Buffered DIMM modules, this help function is |
1069 | * @mid_layer: Memory layer[1] position | 817 | * called to process UE events |
1070 | * @low_layer: Memory layer[2] position | ||
1071 | * @msg: Message meaningful to the end users that | ||
1072 | * explains the event | ||
1073 | * @other_detail: Technical details about the event that | ||
1074 | * may help hardware manufacturers and | ||
1075 | * EDAC developers to analyse the event | ||
1076 | */ | 818 | */ |
1077 | void edac_mc_handle_error(const enum hw_event_mc_err_type type, | 819 | void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, |
1078 | struct mem_ctl_info *mci, | 820 | unsigned int csrow, |
1079 | const u16 error_count, | 821 | unsigned int channela, |
1080 | const unsigned long page_frame_number, | 822 | unsigned int channelb, char *msg) |
1081 | const unsigned long offset_in_page, | ||
1082 | const unsigned long syndrome, | ||
1083 | const int top_layer, | ||
1084 | const int mid_layer, | ||
1085 | const int low_layer, | ||
1086 | const char *msg, | ||
1087 | const char *other_detail) | ||
1088 | { | 823 | { |
1089 | /* FIXME: too much for stack: move it to some pre-alocated area */ | 824 | int len = EDAC_MC_LABEL_LEN * 4; |
1090 | char detail[80], location[80]; | 825 | char labels[len + 1]; |
1091 | char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms]; | 826 | char *pos = labels; |
1092 | char *p; | 827 | int chars; |
1093 | int row = -1, chan = -1; | ||
1094 | int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; | ||
1095 | int i; | ||
1096 | long grain; | ||
1097 | bool enable_per_layer_report = false; | ||
1098 | u8 grain_bits; | ||
1099 | |||
1100 | edac_dbg(3, "MC%d\n", mci->mc_idx); | ||
1101 | 828 | ||
1102 | /* | 829 | if (csrow >= mci->nr_csrows) { |
1103 | * Check if the event report is consistent and if the memory | 830 | /* something is wrong */ |
1104 | * location is known. If it is known, enable_per_layer_report will be | 831 | edac_mc_printk(mci, KERN_ERR, |
1105 | * true, the DIMM(s) label info will be filled and the per-layer | 832 | "INTERNAL ERROR: row out of range (%d >= %d)\n", |
1106 | * error counters will be incremented. | 833 | csrow, mci->nr_csrows); |
1107 | */ | 834 | edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); |
1108 | for (i = 0; i < mci->n_layers; i++) { | 835 | return; |
1109 | if (pos[i] >= (int)mci->layers[i].size) { | ||
1110 | |||
1111 | edac_mc_printk(mci, KERN_ERR, | ||
1112 | "INTERNAL ERROR: %s value is out of range (%d >= %d)\n", | ||
1113 | edac_layer_name[mci->layers[i].type], | ||
1114 | pos[i], mci->layers[i].size); | ||
1115 | /* | ||
1116 | * Instead of just returning it, let's use what's | ||
1117 | * known about the error. The increment routines and | ||
1118 | * the DIMM filter logic will do the right thing by | ||
1119 | * pointing the likely damaged DIMMs. | ||
1120 | */ | ||
1121 | pos[i] = -1; | ||
1122 | } | ||
1123 | if (pos[i] >= 0) | ||
1124 | enable_per_layer_report = true; | ||
1125 | } | 836 | } |
1126 | 837 | ||
1127 | /* | 838 | if (channela >= mci->csrows[csrow].nr_channels) { |
1128 | * Get the dimm label/grain that applies to the match criteria. | 839 | /* something is wrong */ |
1129 | * As the error algorithm may not be able to point to just one memory | 840 | edac_mc_printk(mci, KERN_ERR, |
1130 | * stick, the logic here will get all possible labels that could | 841 | "INTERNAL ERROR: channel-a out of range " |
1131 | * pottentially be affected by the error. | 842 | "(%d >= %d)\n", |
1132 | * On FB-DIMM memory controllers, for uncorrected errors, it is common | 843 | channela, mci->csrows[csrow].nr_channels); |
1133 | * to have only the MC channel and the MC dimm (also called "branch") | 844 | edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); |
1134 | * but the channel is not known, as the memory is arranged in pairs, | 845 | return; |
1135 | * where each memory belongs to a separate channel within the same | 846 | } |
1136 | * branch. | ||
1137 | */ | ||
1138 | grain = 0; | ||
1139 | p = label; | ||
1140 | *p = '\0'; | ||
1141 | 847 | ||
1142 | for (i = 0; i < mci->tot_dimms; i++) { | 848 | if (channelb >= mci->csrows[csrow].nr_channels) { |
1143 | struct dimm_info *dimm = mci->dimms[i]; | 849 | /* something is wrong */ |
850 | edac_mc_printk(mci, KERN_ERR, | ||
851 | "INTERNAL ERROR: channel-b out of range " | ||
852 | "(%d >= %d)\n", | ||
853 | channelb, mci->csrows[csrow].nr_channels); | ||
854 | edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); | ||
855 | return; | ||
856 | } | ||
1144 | 857 | ||
1145 | if (top_layer >= 0 && top_layer != dimm->location[0]) | 858 | mci->ue_count++; |
1146 | continue; | 859 | mci->csrows[csrow].ue_count++; |
1147 | if (mid_layer >= 0 && mid_layer != dimm->location[1]) | ||
1148 | continue; | ||
1149 | if (low_layer >= 0 && low_layer != dimm->location[2]) | ||
1150 | continue; | ||
1151 | 860 | ||
1152 | /* get the max grain, over the error match range */ | 861 | /* Generate the DIMM labels from the specified channels */ |
1153 | if (dimm->grain > grain) | 862 | chars = snprintf(pos, len + 1, "%s", |
1154 | grain = dimm->grain; | 863 | mci->csrows[csrow].channels[channela].label); |
864 | len -= chars; | ||
865 | pos += chars; | ||
866 | chars = snprintf(pos, len + 1, "-%s", | ||
867 | mci->csrows[csrow].channels[channelb].label); | ||
1155 | 868 | ||
1156 | /* | 869 | if (edac_mc_get_log_ue()) |
1157 | * If the error is memory-controller wide, there's no need to | 870 | edac_mc_printk(mci, KERN_EMERG, |
1158 | * seek for the affected DIMMs because the whole | 871 | "UE row %d, channel-a= %d channel-b= %d " |
1159 | * channel/memory controller/... may be affected. | 872 | "labels \"%s\": %s\n", csrow, channela, channelb, |
1160 | * Also, don't show errors for empty DIMM slots. | 873 | labels, msg); |
1161 | */ | ||
1162 | if (enable_per_layer_report && dimm->nr_pages) { | ||
1163 | if (p != label) { | ||
1164 | strcpy(p, OTHER_LABEL); | ||
1165 | p += strlen(OTHER_LABEL); | ||
1166 | } | ||
1167 | strcpy(p, dimm->label); | ||
1168 | p += strlen(p); | ||
1169 | *p = '\0'; | ||
1170 | |||
1171 | /* | ||
1172 | * get csrow/channel of the DIMM, in order to allow | ||
1173 | * incrementing the compat API counters | ||
1174 | */ | ||
1175 | edac_dbg(4, "%s csrows map: (%d,%d)\n", | ||
1176 | mci->mem_is_per_rank ? "rank" : "dimm", | ||
1177 | dimm->csrow, dimm->cschannel); | ||
1178 | if (row == -1) | ||
1179 | row = dimm->csrow; | ||
1180 | else if (row >= 0 && row != dimm->csrow) | ||
1181 | row = -2; | ||
1182 | |||
1183 | if (chan == -1) | ||
1184 | chan = dimm->cschannel; | ||
1185 | else if (chan >= 0 && chan != dimm->cschannel) | ||
1186 | chan = -2; | ||
1187 | } | ||
1188 | } | ||
1189 | 874 | ||
1190 | if (!enable_per_layer_report) { | 875 | if (edac_mc_get_panic_on_ue()) |
1191 | strcpy(label, "any memory"); | 876 | panic("UE row %d, channel-a= %d channel-b= %d " |
1192 | } else { | 877 | "labels \"%s\": %s\n", csrow, channela, |
1193 | edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); | 878 | channelb, labels, msg); |
1194 | if (p == label) | 879 | } |
1195 | strcpy(label, "unknown memory"); | 880 | EXPORT_SYMBOL(edac_mc_handle_fbd_ue); |
1196 | if (type == HW_EVENT_ERR_CORRECTED) { | ||
1197 | if (row >= 0) { | ||
1198 | mci->csrows[row]->ce_count += error_count; | ||
1199 | if (chan >= 0) | ||
1200 | mci->csrows[row]->channels[chan]->ce_count += error_count; | ||
1201 | } | ||
1202 | } else | ||
1203 | if (row >= 0) | ||
1204 | mci->csrows[row]->ue_count += error_count; | ||
1205 | } | ||
1206 | |||
1207 | /* Fill the RAM location data */ | ||
1208 | p = location; | ||
1209 | 881 | ||
1210 | for (i = 0; i < mci->n_layers; i++) { | 882 | /************************************************************* |
1211 | if (pos[i] < 0) | 883 | * On Fully Buffered DIMM modules, this help function is |
1212 | continue; | 884 | * called to process CE events |
885 | */ | ||
886 | void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, | ||
887 | unsigned int csrow, unsigned int channel, char *msg) | ||
888 | { | ||
1213 | 889 | ||
1214 | p += sprintf(p, "%s:%d ", | 890 | /* Ensure boundary values */ |
1215 | edac_layer_name[mci->layers[i].type], | 891 | if (csrow >= mci->nr_csrows) { |
1216 | pos[i]); | 892 | /* something is wrong */ |
893 | edac_mc_printk(mci, KERN_ERR, | ||
894 | "INTERNAL ERROR: row out of range (%d >= %d)\n", | ||
895 | csrow, mci->nr_csrows); | ||
896 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); | ||
897 | return; | ||
1217 | } | 898 | } |
1218 | if (p > location) | 899 | if (channel >= mci->csrows[csrow].nr_channels) { |
1219 | *(p - 1) = '\0'; | 900 | /* something is wrong */ |
1220 | 901 | edac_mc_printk(mci, KERN_ERR, | |
1221 | /* Report the error via the trace interface */ | 902 | "INTERNAL ERROR: channel out of range (%d >= %d)\n", |
1222 | grain_bits = fls_long(grain) + 1; | 903 | channel, mci->csrows[csrow].nr_channels); |
1223 | trace_mc_event(type, msg, label, error_count, | 904 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); |
1224 | mci->mc_idx, top_layer, mid_layer, low_layer, | 905 | return; |
1225 | PAGES_TO_MiB(page_frame_number) | offset_in_page, | ||
1226 | grain_bits, syndrome, other_detail); | ||
1227 | |||
1228 | /* Memory type dependent details about the error */ | ||
1229 | if (type == HW_EVENT_ERR_CORRECTED) { | ||
1230 | snprintf(detail, sizeof(detail), | ||
1231 | "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", | ||
1232 | page_frame_number, offset_in_page, | ||
1233 | grain, syndrome); | ||
1234 | edac_ce_error(mci, error_count, pos, msg, location, label, | ||
1235 | detail, other_detail, enable_per_layer_report, | ||
1236 | page_frame_number, offset_in_page, grain); | ||
1237 | } else { | ||
1238 | snprintf(detail, sizeof(detail), | ||
1239 | "page:0x%lx offset:0x%lx grain:%ld", | ||
1240 | page_frame_number, offset_in_page, grain); | ||
1241 | |||
1242 | edac_ue_error(mci, error_count, pos, msg, location, label, | ||
1243 | detail, other_detail, enable_per_layer_report); | ||
1244 | } | 906 | } |
907 | |||
908 | if (edac_mc_get_log_ce()) | ||
909 | /* FIXME - put in DIMM location */ | ||
910 | edac_mc_printk(mci, KERN_WARNING, | ||
911 | "CE row %d, channel %d, label \"%s\": %s\n", | ||
912 | csrow, channel, | ||
913 | mci->csrows[csrow].channels[channel].label, msg); | ||
914 | |||
915 | mci->ce_count++; | ||
916 | mci->csrows[csrow].ce_count++; | ||
917 | mci->csrows[csrow].channels[channel].ce_count++; | ||
1245 | } | 918 | } |
1246 | EXPORT_SYMBOL_GPL(edac_mc_handle_error); | 919 | EXPORT_SYMBOL(edac_mc_handle_fbd_ce); |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 0ca1ca71157..29ffa350bfb 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -7,21 +7,17 @@ | |||
7 | * | 7 | * |
8 | * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com | 8 | * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com |
9 | * | 9 | * |
10 | * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com> | ||
11 | * The entire API were re-written, and ported to use struct device | ||
12 | * | ||
13 | */ | 10 | */ |
14 | 11 | ||
15 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
16 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
17 | #include <linux/edac.h> | 14 | #include <linux/edac.h> |
18 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
19 | #include <linux/pm_runtime.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | 16 | ||
22 | #include "edac_core.h" | 17 | #include "edac_core.h" |
23 | #include "edac_module.h" | 18 | #include "edac_module.h" |
24 | 19 | ||
20 | |||
25 | /* MC EDAC Controls, setable by module parameter, and sysfs */ | 21 | /* MC EDAC Controls, setable by module parameter, and sysfs */ |
26 | static int edac_mc_log_ue = 1; | 22 | static int edac_mc_log_ue = 1; |
27 | static int edac_mc_log_ce = 1; | 23 | static int edac_mc_log_ce = 1; |
@@ -82,8 +78,6 @@ module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, | |||
82 | &edac_mc_poll_msec, 0644); | 78 | &edac_mc_poll_msec, 0644); |
83 | MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); | 79 | MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); |
84 | 80 | ||
85 | static struct device *mci_pdev; | ||
86 | |||
87 | /* | 81 | /* |
88 | * various constants for Memory Controllers | 82 | * various constants for Memory Controllers |
89 | */ | 83 | */ |
@@ -131,529 +125,311 @@ static const char *edac_caps[] = { | |||
131 | [EDAC_S16ECD16ED] = "S16ECD16ED" | 125 | [EDAC_S16ECD16ED] = "S16ECD16ED" |
132 | }; | 126 | }; |
133 | 127 | ||
134 | #ifdef CONFIG_EDAC_LEGACY_SYSFS | 128 | /* EDAC sysfs CSROW data structures and methods |
135 | /* | ||
136 | * EDAC sysfs CSROW data structures and methods | ||
137 | */ | ||
138 | |||
139 | #define to_csrow(k) container_of(k, struct csrow_info, dev) | ||
140 | |||
141 | /* | ||
142 | * We need it to avoid namespace conflicts between the legacy API | ||
143 | * and the per-dimm/per-rank one | ||
144 | */ | 129 | */ |
145 | #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ | ||
146 | struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) | ||
147 | |||
148 | struct dev_ch_attribute { | ||
149 | struct device_attribute attr; | ||
150 | int channel; | ||
151 | }; | ||
152 | |||
153 | #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ | ||
154 | struct dev_ch_attribute dev_attr_legacy_##_name = \ | ||
155 | { __ATTR(_name, _mode, _show, _store), (_var) } | ||
156 | |||
157 | #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) | ||
158 | 130 | ||
159 | /* Set of more default csrow<id> attribute show/store functions */ | 131 | /* Set of more default csrow<id> attribute show/store functions */ |
160 | static ssize_t csrow_ue_count_show(struct device *dev, | 132 | static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, |
161 | struct device_attribute *mattr, char *data) | 133 | int private) |
162 | { | 134 | { |
163 | struct csrow_info *csrow = to_csrow(dev); | ||
164 | |||
165 | return sprintf(data, "%u\n", csrow->ue_count); | 135 | return sprintf(data, "%u\n", csrow->ue_count); |
166 | } | 136 | } |
167 | 137 | ||
168 | static ssize_t csrow_ce_count_show(struct device *dev, | 138 | static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, |
169 | struct device_attribute *mattr, char *data) | 139 | int private) |
170 | { | 140 | { |
171 | struct csrow_info *csrow = to_csrow(dev); | ||
172 | |||
173 | return sprintf(data, "%u\n", csrow->ce_count); | 141 | return sprintf(data, "%u\n", csrow->ce_count); |
174 | } | 142 | } |
175 | 143 | ||
176 | static ssize_t csrow_size_show(struct device *dev, | 144 | static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, |
177 | struct device_attribute *mattr, char *data) | 145 | int private) |
178 | { | 146 | { |
179 | struct csrow_info *csrow = to_csrow(dev); | 147 | return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); |
180 | int i; | ||
181 | u32 nr_pages = 0; | ||
182 | |||
183 | if (csrow->mci->csbased) | ||
184 | return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); | ||
185 | |||
186 | for (i = 0; i < csrow->nr_channels; i++) | ||
187 | nr_pages += csrow->channels[i]->dimm->nr_pages; | ||
188 | return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); | ||
189 | } | 148 | } |
190 | 149 | ||
191 | static ssize_t csrow_mem_type_show(struct device *dev, | 150 | static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, |
192 | struct device_attribute *mattr, char *data) | 151 | int private) |
193 | { | 152 | { |
194 | struct csrow_info *csrow = to_csrow(dev); | 153 | return sprintf(data, "%s\n", mem_types[csrow->mtype]); |
195 | |||
196 | return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]); | ||
197 | } | 154 | } |
198 | 155 | ||
199 | static ssize_t csrow_dev_type_show(struct device *dev, | 156 | static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, |
200 | struct device_attribute *mattr, char *data) | 157 | int private) |
201 | { | 158 | { |
202 | struct csrow_info *csrow = to_csrow(dev); | 159 | return sprintf(data, "%s\n", dev_types[csrow->dtype]); |
203 | |||
204 | return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]); | ||
205 | } | 160 | } |
206 | 161 | ||
207 | static ssize_t csrow_edac_mode_show(struct device *dev, | 162 | static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, |
208 | struct device_attribute *mattr, | 163 | int private) |
209 | char *data) | ||
210 | { | 164 | { |
211 | struct csrow_info *csrow = to_csrow(dev); | 165 | return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]); |
212 | |||
213 | return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]); | ||
214 | } | 166 | } |
215 | 167 | ||
216 | /* show/store functions for DIMM Label attributes */ | 168 | /* show/store functions for DIMM Label attributes */ |
217 | static ssize_t channel_dimm_label_show(struct device *dev, | 169 | static ssize_t channel_dimm_label_show(struct csrow_info *csrow, |
218 | struct device_attribute *mattr, | 170 | char *data, int channel) |
219 | char *data) | ||
220 | { | 171 | { |
221 | struct csrow_info *csrow = to_csrow(dev); | ||
222 | unsigned chan = to_channel(mattr); | ||
223 | struct rank_info *rank = csrow->channels[chan]; | ||
224 | |||
225 | /* if field has not been initialized, there is nothing to send */ | 172 | /* if field has not been initialized, there is nothing to send */ |
226 | if (!rank->dimm->label[0]) | 173 | if (!csrow->channels[channel].label[0]) |
227 | return 0; | 174 | return 0; |
228 | 175 | ||
229 | return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", | 176 | return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", |
230 | rank->dimm->label); | 177 | csrow->channels[channel].label); |
231 | } | 178 | } |
232 | 179 | ||
233 | static ssize_t channel_dimm_label_store(struct device *dev, | 180 | static ssize_t channel_dimm_label_store(struct csrow_info *csrow, |
234 | struct device_attribute *mattr, | 181 | const char *data, |
235 | const char *data, size_t count) | 182 | size_t count, int channel) |
236 | { | 183 | { |
237 | struct csrow_info *csrow = to_csrow(dev); | ||
238 | unsigned chan = to_channel(mattr); | ||
239 | struct rank_info *rank = csrow->channels[chan]; | ||
240 | |||
241 | ssize_t max_size = 0; | 184 | ssize_t max_size = 0; |
242 | 185 | ||
243 | max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); | 186 | max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); |
244 | strncpy(rank->dimm->label, data, max_size); | 187 | strncpy(csrow->channels[channel].label, data, max_size); |
245 | rank->dimm->label[max_size] = '\0'; | 188 | csrow->channels[channel].label[max_size] = '\0'; |
246 | 189 | ||
247 | return max_size; | 190 | return max_size; |
248 | } | 191 | } |
249 | 192 | ||
250 | /* show function for dynamic chX_ce_count attribute */ | 193 | /* show function for dynamic chX_ce_count attribute */ |
251 | static ssize_t channel_ce_count_show(struct device *dev, | 194 | static ssize_t channel_ce_count_show(struct csrow_info *csrow, |
252 | struct device_attribute *mattr, char *data) | 195 | char *data, int channel) |
253 | { | 196 | { |
254 | struct csrow_info *csrow = to_csrow(dev); | 197 | return sprintf(data, "%u\n", csrow->channels[channel].ce_count); |
255 | unsigned chan = to_channel(mattr); | ||
256 | struct rank_info *rank = csrow->channels[chan]; | ||
257 | |||
258 | return sprintf(data, "%u\n", rank->ce_count); | ||
259 | } | 198 | } |
260 | 199 | ||
261 | /* cwrow<id>/attribute files */ | 200 | /* csrow specific attribute structure */ |
262 | DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL); | 201 | struct csrowdev_attribute { |
263 | DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL); | 202 | struct attribute attr; |
264 | DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL); | 203 | ssize_t(*show) (struct csrow_info *, char *, int); |
265 | DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL); | 204 | ssize_t(*store) (struct csrow_info *, const char *, size_t, int); |
266 | DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL); | 205 | int private; |
267 | DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL); | ||
268 | |||
269 | /* default attributes of the CSROW<id> object */ | ||
270 | static struct attribute *csrow_attrs[] = { | ||
271 | &dev_attr_legacy_dev_type.attr, | ||
272 | &dev_attr_legacy_mem_type.attr, | ||
273 | &dev_attr_legacy_edac_mode.attr, | ||
274 | &dev_attr_legacy_size_mb.attr, | ||
275 | &dev_attr_legacy_ue_count.attr, | ||
276 | &dev_attr_legacy_ce_count.attr, | ||
277 | NULL, | ||
278 | }; | 206 | }; |
279 | 207 | ||
280 | static struct attribute_group csrow_attr_grp = { | 208 | #define to_csrow(k) container_of(k, struct csrow_info, kobj) |
281 | .attrs = csrow_attrs, | 209 | #define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) |
282 | }; | ||
283 | 210 | ||
284 | static const struct attribute_group *csrow_attr_groups[] = { | 211 | /* Set of show/store higher level functions for default csrow attributes */ |
285 | &csrow_attr_grp, | 212 | static ssize_t csrowdev_show(struct kobject *kobj, |
286 | NULL | 213 | struct attribute *attr, char *buffer) |
287 | }; | ||
288 | |||
289 | static void csrow_attr_release(struct device *dev) | ||
290 | { | 214 | { |
291 | struct csrow_info *csrow = container_of(dev, struct csrow_info, dev); | 215 | struct csrow_info *csrow = to_csrow(kobj); |
216 | struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); | ||
292 | 217 | ||
293 | edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); | 218 | if (csrowdev_attr->show) |
294 | kfree(csrow); | 219 | return csrowdev_attr->show(csrow, |
220 | buffer, csrowdev_attr->private); | ||
221 | return -EIO; | ||
222 | } | ||
223 | |||
224 | static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, | ||
225 | const char *buffer, size_t count) | ||
226 | { | ||
227 | struct csrow_info *csrow = to_csrow(kobj); | ||
228 | struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); | ||
229 | |||
230 | if (csrowdev_attr->store) | ||
231 | return csrowdev_attr->store(csrow, | ||
232 | buffer, | ||
233 | count, csrowdev_attr->private); | ||
234 | return -EIO; | ||
295 | } | 235 | } |
296 | 236 | ||
297 | static struct device_type csrow_attr_type = { | 237 | static const struct sysfs_ops csrowfs_ops = { |
298 | .groups = csrow_attr_groups, | 238 | .show = csrowdev_show, |
299 | .release = csrow_attr_release, | 239 | .store = csrowdev_store |
300 | }; | 240 | }; |
301 | 241 | ||
302 | /* | 242 | #define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \ |
303 | * possible dynamic channel DIMM Label attribute files | 243 | static struct csrowdev_attribute attr_##_name = { \ |
304 | * | 244 | .attr = {.name = __stringify(_name), .mode = _mode }, \ |
305 | */ | 245 | .show = _show, \ |
246 | .store = _store, \ | ||
247 | .private = _private, \ | ||
248 | }; | ||
306 | 249 | ||
307 | #define EDAC_NR_CHANNELS 6 | 250 | /* default cwrow<id>/attribute files */ |
251 | CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0); | ||
252 | CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0); | ||
253 | CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0); | ||
254 | CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0); | ||
255 | CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0); | ||
256 | CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0); | ||
257 | |||
258 | /* default attributes of the CSROW<id> object */ | ||
259 | static struct csrowdev_attribute *default_csrow_attr[] = { | ||
260 | &attr_dev_type, | ||
261 | &attr_mem_type, | ||
262 | &attr_edac_mode, | ||
263 | &attr_size_mb, | ||
264 | &attr_ue_count, | ||
265 | &attr_ce_count, | ||
266 | NULL, | ||
267 | }; | ||
308 | 268 | ||
309 | DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, | 269 | /* possible dynamic channel DIMM Label attribute files */ |
270 | CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR, | ||
310 | channel_dimm_label_show, channel_dimm_label_store, 0); | 271 | channel_dimm_label_show, channel_dimm_label_store, 0); |
311 | DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, | 272 | CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR, |
312 | channel_dimm_label_show, channel_dimm_label_store, 1); | 273 | channel_dimm_label_show, channel_dimm_label_store, 1); |
313 | DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR, | 274 | CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR, |
314 | channel_dimm_label_show, channel_dimm_label_store, 2); | 275 | channel_dimm_label_show, channel_dimm_label_store, 2); |
315 | DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR, | 276 | CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR, |
316 | channel_dimm_label_show, channel_dimm_label_store, 3); | 277 | channel_dimm_label_show, channel_dimm_label_store, 3); |
317 | DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, | 278 | CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR, |
318 | channel_dimm_label_show, channel_dimm_label_store, 4); | 279 | channel_dimm_label_show, channel_dimm_label_store, 4); |
319 | DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, | 280 | CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR, |
320 | channel_dimm_label_show, channel_dimm_label_store, 5); | 281 | channel_dimm_label_show, channel_dimm_label_store, 5); |
321 | 282 | ||
322 | /* Total possible dynamic DIMM Label attribute file table */ | 283 | /* Total possible dynamic DIMM Label attribute file table */ |
323 | static struct device_attribute *dynamic_csrow_dimm_attr[] = { | 284 | static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = { |
324 | &dev_attr_legacy_ch0_dimm_label.attr, | 285 | &attr_ch0_dimm_label, |
325 | &dev_attr_legacy_ch1_dimm_label.attr, | 286 | &attr_ch1_dimm_label, |
326 | &dev_attr_legacy_ch2_dimm_label.attr, | 287 | &attr_ch2_dimm_label, |
327 | &dev_attr_legacy_ch3_dimm_label.attr, | 288 | &attr_ch3_dimm_label, |
328 | &dev_attr_legacy_ch4_dimm_label.attr, | 289 | &attr_ch4_dimm_label, |
329 | &dev_attr_legacy_ch5_dimm_label.attr | 290 | &attr_ch5_dimm_label |
330 | }; | 291 | }; |
331 | 292 | ||
332 | /* possible dynamic channel ce_count attribute files */ | 293 | /* possible dynamic channel ce_count attribute files */ |
333 | DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR, | 294 | CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0); |
334 | channel_ce_count_show, NULL, 0); | 295 | CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1); |
335 | DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR, | 296 | CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2); |
336 | channel_ce_count_show, NULL, 1); | 297 | CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3); |
337 | DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR, | 298 | CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4); |
338 | channel_ce_count_show, NULL, 2); | 299 | CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5); |
339 | DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR, | ||
340 | channel_ce_count_show, NULL, 3); | ||
341 | DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR, | ||
342 | channel_ce_count_show, NULL, 4); | ||
343 | DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR, | ||
344 | channel_ce_count_show, NULL, 5); | ||
345 | 300 | ||
346 | /* Total possible dynamic ce_count attribute file table */ | 301 | /* Total possible dynamic ce_count attribute file table */ |
347 | static struct device_attribute *dynamic_csrow_ce_count_attr[] = { | 302 | static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = { |
348 | &dev_attr_legacy_ch0_ce_count.attr, | 303 | &attr_ch0_ce_count, |
349 | &dev_attr_legacy_ch1_ce_count.attr, | 304 | &attr_ch1_ce_count, |
350 | &dev_attr_legacy_ch2_ce_count.attr, | 305 | &attr_ch2_ce_count, |
351 | &dev_attr_legacy_ch3_ce_count.attr, | 306 | &attr_ch3_ce_count, |
352 | &dev_attr_legacy_ch4_ce_count.attr, | 307 | &attr_ch4_ce_count, |
353 | &dev_attr_legacy_ch5_ce_count.attr | 308 | &attr_ch5_ce_count |
354 | }; | 309 | }; |
355 | 310 | ||
356 | static inline int nr_pages_per_csrow(struct csrow_info *csrow) | 311 | #define EDAC_NR_CHANNELS 6 |
357 | { | ||
358 | int chan, nr_pages = 0; | ||
359 | |||
360 | for (chan = 0; chan < csrow->nr_channels; chan++) | ||
361 | nr_pages += csrow->channels[chan]->dimm->nr_pages; | ||
362 | |||
363 | return nr_pages; | ||
364 | } | ||
365 | 312 | ||
366 | /* Create a CSROW object under specifed edac_mc_device */ | 313 | /* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */ |
367 | static int edac_create_csrow_object(struct mem_ctl_info *mci, | 314 | static int edac_create_channel_files(struct kobject *kobj, int chan) |
368 | struct csrow_info *csrow, int index) | ||
369 | { | 315 | { |
370 | int err, chan; | 316 | int err = -ENODEV; |
371 | |||
372 | if (csrow->nr_channels >= EDAC_NR_CHANNELS) | ||
373 | return -ENODEV; | ||
374 | |||
375 | csrow->dev.type = &csrow_attr_type; | ||
376 | csrow->dev.bus = &mci->bus; | ||
377 | device_initialize(&csrow->dev); | ||
378 | csrow->dev.parent = &mci->dev; | ||
379 | csrow->mci = mci; | ||
380 | dev_set_name(&csrow->dev, "csrow%d", index); | ||
381 | dev_set_drvdata(&csrow->dev, csrow); | ||
382 | |||
383 | edac_dbg(0, "creating (virtual) csrow node %s\n", | ||
384 | dev_name(&csrow->dev)); | ||
385 | 317 | ||
386 | err = device_add(&csrow->dev); | 318 | if (chan >= EDAC_NR_CHANNELS) |
387 | if (err < 0) | ||
388 | return err; | 319 | return err; |
389 | 320 | ||
390 | for (chan = 0; chan < csrow->nr_channels; chan++) { | 321 | /* create the DIMM label attribute file */ |
391 | /* Only expose populated DIMMs */ | 322 | err = sysfs_create_file(kobj, |
392 | if (!csrow->channels[chan]->dimm->nr_pages) | 323 | (struct attribute *) |
393 | continue; | 324 | dynamic_csrow_dimm_attr[chan]); |
394 | err = device_create_file(&csrow->dev, | 325 | |
395 | dynamic_csrow_dimm_attr[chan]); | 326 | if (!err) { |
396 | if (err < 0) | 327 | /* create the CE Count attribute file */ |
397 | goto error; | 328 | err = sysfs_create_file(kobj, |
398 | err = device_create_file(&csrow->dev, | 329 | (struct attribute *) |
399 | dynamic_csrow_ce_count_attr[chan]); | 330 | dynamic_csrow_ce_count_attr[chan]); |
400 | if (err < 0) { | 331 | } else { |
401 | device_remove_file(&csrow->dev, | 332 | debugf1("%s() dimm labels and ce_count files created", |
402 | dynamic_csrow_dimm_attr[chan]); | 333 | __func__); |
403 | goto error; | ||
404 | } | ||
405 | } | ||
406 | |||
407 | return 0; | ||
408 | |||
409 | error: | ||
410 | for (--chan; chan >= 0; chan--) { | ||
411 | device_remove_file(&csrow->dev, | ||
412 | dynamic_csrow_dimm_attr[chan]); | ||
413 | device_remove_file(&csrow->dev, | ||
414 | dynamic_csrow_ce_count_attr[chan]); | ||
415 | } | ||
416 | put_device(&csrow->dev); | ||
417 | |||
418 | return err; | ||
419 | } | ||
420 | |||
421 | /* Create a CSROW object under specifed edac_mc_device */ | ||
422 | static int edac_create_csrow_objects(struct mem_ctl_info *mci) | ||
423 | { | ||
424 | int err, i, chan; | ||
425 | struct csrow_info *csrow; | ||
426 | |||
427 | for (i = 0; i < mci->nr_csrows; i++) { | ||
428 | csrow = mci->csrows[i]; | ||
429 | if (!nr_pages_per_csrow(csrow)) | ||
430 | continue; | ||
431 | err = edac_create_csrow_object(mci, mci->csrows[i], i); | ||
432 | if (err < 0) | ||
433 | goto error; | ||
434 | } | ||
435 | return 0; | ||
436 | |||
437 | error: | ||
438 | for (--i; i >= 0; i--) { | ||
439 | csrow = mci->csrows[i]; | ||
440 | if (!nr_pages_per_csrow(csrow)) | ||
441 | continue; | ||
442 | for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { | ||
443 | if (!csrow->channels[chan]->dimm->nr_pages) | ||
444 | continue; | ||
445 | device_remove_file(&csrow->dev, | ||
446 | dynamic_csrow_dimm_attr[chan]); | ||
447 | device_remove_file(&csrow->dev, | ||
448 | dynamic_csrow_ce_count_attr[chan]); | ||
449 | } | ||
450 | put_device(&mci->csrows[i]->dev); | ||
451 | } | 334 | } |
452 | 335 | ||
453 | return err; | 336 | return err; |
454 | } | 337 | } |
455 | 338 | ||
456 | static void edac_delete_csrow_objects(struct mem_ctl_info *mci) | 339 | /* No memory to release for this kobj */ |
457 | { | 340 | static void edac_csrow_instance_release(struct kobject *kobj) |
458 | int i, chan; | ||
459 | struct csrow_info *csrow; | ||
460 | |||
461 | for (i = mci->nr_csrows - 1; i >= 0; i--) { | ||
462 | csrow = mci->csrows[i]; | ||
463 | if (!nr_pages_per_csrow(csrow)) | ||
464 | continue; | ||
465 | for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { | ||
466 | if (!csrow->channels[chan]->dimm->nr_pages) | ||
467 | continue; | ||
468 | edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n", | ||
469 | i, chan); | ||
470 | device_remove_file(&csrow->dev, | ||
471 | dynamic_csrow_dimm_attr[chan]); | ||
472 | device_remove_file(&csrow->dev, | ||
473 | dynamic_csrow_ce_count_attr[chan]); | ||
474 | } | ||
475 | device_unregister(&mci->csrows[i]->dev); | ||
476 | } | ||
477 | } | ||
478 | #endif | ||
479 | |||
480 | /* | ||
481 | * Per-dimm (or per-rank) devices | ||
482 | */ | ||
483 | |||
484 | #define to_dimm(k) container_of(k, struct dimm_info, dev) | ||
485 | |||
486 | /* show/store functions for DIMM Label attributes */ | ||
487 | static ssize_t dimmdev_location_show(struct device *dev, | ||
488 | struct device_attribute *mattr, char *data) | ||
489 | { | ||
490 | struct dimm_info *dimm = to_dimm(dev); | ||
491 | |||
492 | return edac_dimm_info_location(dimm, data, PAGE_SIZE); | ||
493 | } | ||
494 | |||
495 | static ssize_t dimmdev_label_show(struct device *dev, | ||
496 | struct device_attribute *mattr, char *data) | ||
497 | { | ||
498 | struct dimm_info *dimm = to_dimm(dev); | ||
499 | |||
500 | /* if field has not been initialized, there is nothing to send */ | ||
501 | if (!dimm->label[0]) | ||
502 | return 0; | ||
503 | |||
504 | return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label); | ||
505 | } | ||
506 | |||
507 | static ssize_t dimmdev_label_store(struct device *dev, | ||
508 | struct device_attribute *mattr, | ||
509 | const char *data, | ||
510 | size_t count) | ||
511 | { | 341 | { |
512 | struct dimm_info *dimm = to_dimm(dev); | 342 | struct mem_ctl_info *mci; |
343 | struct csrow_info *cs; | ||
513 | 344 | ||
514 | ssize_t max_size = 0; | 345 | debugf1("%s()\n", __func__); |
515 | 346 | ||
516 | max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); | 347 | cs = container_of(kobj, struct csrow_info, kobj); |
517 | strncpy(dimm->label, data, max_size); | 348 | mci = cs->mci; |
518 | dimm->label[max_size] = '\0'; | ||
519 | 349 | ||
520 | return max_size; | 350 | kobject_put(&mci->edac_mci_kobj); |
521 | } | 351 | } |
522 | 352 | ||
523 | static ssize_t dimmdev_size_show(struct device *dev, | 353 | /* the kobj_type instance for a CSROW */ |
524 | struct device_attribute *mattr, char *data) | 354 | static struct kobj_type ktype_csrow = { |
525 | { | 355 | .release = edac_csrow_instance_release, |
526 | struct dimm_info *dimm = to_dimm(dev); | 356 | .sysfs_ops = &csrowfs_ops, |
527 | 357 | .default_attrs = (struct attribute **)default_csrow_attr, | |
528 | return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); | ||
529 | } | ||
530 | |||
531 | static ssize_t dimmdev_mem_type_show(struct device *dev, | ||
532 | struct device_attribute *mattr, char *data) | ||
533 | { | ||
534 | struct dimm_info *dimm = to_dimm(dev); | ||
535 | |||
536 | return sprintf(data, "%s\n", mem_types[dimm->mtype]); | ||
537 | } | ||
538 | |||
539 | static ssize_t dimmdev_dev_type_show(struct device *dev, | ||
540 | struct device_attribute *mattr, char *data) | ||
541 | { | ||
542 | struct dimm_info *dimm = to_dimm(dev); | ||
543 | |||
544 | return sprintf(data, "%s\n", dev_types[dimm->dtype]); | ||
545 | } | ||
546 | |||
547 | static ssize_t dimmdev_edac_mode_show(struct device *dev, | ||
548 | struct device_attribute *mattr, | ||
549 | char *data) | ||
550 | { | ||
551 | struct dimm_info *dimm = to_dimm(dev); | ||
552 | |||
553 | return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]); | ||
554 | } | ||
555 | |||
556 | /* dimm/rank attribute files */ | ||
557 | static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR, | ||
558 | dimmdev_label_show, dimmdev_label_store); | ||
559 | static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL); | ||
560 | static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL); | ||
561 | static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL); | ||
562 | static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL); | ||
563 | static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL); | ||
564 | |||
565 | /* attributes of the dimm<id>/rank<id> object */ | ||
566 | static struct attribute *dimm_attrs[] = { | ||
567 | &dev_attr_dimm_label.attr, | ||
568 | &dev_attr_dimm_location.attr, | ||
569 | &dev_attr_size.attr, | ||
570 | &dev_attr_dimm_mem_type.attr, | ||
571 | &dev_attr_dimm_dev_type.attr, | ||
572 | &dev_attr_dimm_edac_mode.attr, | ||
573 | NULL, | ||
574 | }; | 358 | }; |
575 | 359 | ||
576 | static struct attribute_group dimm_attr_grp = { | 360 | /* Create a CSROW object under specifed edac_mc_device */ |
577 | .attrs = dimm_attrs, | 361 | static int edac_create_csrow_object(struct mem_ctl_info *mci, |
578 | }; | 362 | struct csrow_info *csrow, int index) |
579 | |||
580 | static const struct attribute_group *dimm_attr_groups[] = { | ||
581 | &dimm_attr_grp, | ||
582 | NULL | ||
583 | }; | ||
584 | |||
585 | static void dimm_attr_release(struct device *dev) | ||
586 | { | 363 | { |
587 | struct dimm_info *dimm = container_of(dev, struct dimm_info, dev); | 364 | struct kobject *kobj_mci = &mci->edac_mci_kobj; |
588 | 365 | struct kobject *kobj; | |
589 | edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev)); | 366 | int chan; |
590 | kfree(dimm); | 367 | int err; |
591 | } | ||
592 | 368 | ||
593 | static struct device_type dimm_attr_type = { | 369 | /* generate ..../edac/mc/mc<id>/csrow<index> */ |
594 | .groups = dimm_attr_groups, | 370 | memset(&csrow->kobj, 0, sizeof(csrow->kobj)); |
595 | .release = dimm_attr_release, | 371 | csrow->mci = mci; /* include container up link */ |
596 | }; | ||
597 | 372 | ||
598 | /* Create a DIMM object under specifed memory controller device */ | 373 | /* bump the mci instance's kobject's ref count */ |
599 | static int edac_create_dimm_object(struct mem_ctl_info *mci, | 374 | kobj = kobject_get(&mci->edac_mci_kobj); |
600 | struct dimm_info *dimm, | 375 | if (!kobj) { |
601 | int index) | 376 | err = -ENODEV; |
602 | { | 377 | goto err_out; |
603 | int err; | 378 | } |
604 | dimm->mci = mci; | ||
605 | 379 | ||
606 | dimm->dev.type = &dimm_attr_type; | 380 | /* Instanstiate the csrow object */ |
607 | dimm->dev.bus = &mci->bus; | 381 | err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci, |
608 | device_initialize(&dimm->dev); | 382 | "csrow%d", index); |
383 | if (err) | ||
384 | goto err_release_top_kobj; | ||
609 | 385 | ||
610 | dimm->dev.parent = &mci->dev; | 386 | /* At this point, to release a csrow kobj, one must |
611 | if (mci->mem_is_per_rank) | 387 | * call the kobject_put and allow that tear down |
612 | dev_set_name(&dimm->dev, "rank%d", index); | 388 | * to work the releasing |
613 | else | 389 | */ |
614 | dev_set_name(&dimm->dev, "dimm%d", index); | ||
615 | dev_set_drvdata(&dimm->dev, dimm); | ||
616 | pm_runtime_forbid(&mci->dev); | ||
617 | 390 | ||
618 | err = device_add(&dimm->dev); | 391 | /* Create the dyanmic attribute files on this csrow, |
392 | * namely, the DIMM labels and the channel ce_count | ||
393 | */ | ||
394 | for (chan = 0; chan < csrow->nr_channels; chan++) { | ||
395 | err = edac_create_channel_files(&csrow->kobj, chan); | ||
396 | if (err) { | ||
397 | /* special case the unregister here */ | ||
398 | kobject_put(&csrow->kobj); | ||
399 | goto err_out; | ||
400 | } | ||
401 | } | ||
402 | kobject_uevent(&csrow->kobj, KOBJ_ADD); | ||
403 | return 0; | ||
619 | 404 | ||
620 | edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev)); | 405 | /* error unwind stack */ |
406 | err_release_top_kobj: | ||
407 | kobject_put(&mci->edac_mci_kobj); | ||
621 | 408 | ||
409 | err_out: | ||
622 | return err; | 410 | return err; |
623 | } | 411 | } |
624 | 412 | ||
625 | /* | 413 | /* default sysfs methods and data structures for the main MCI kobject */ |
626 | * Memory controller device | ||
627 | */ | ||
628 | |||
629 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) | ||
630 | 414 | ||
631 | static ssize_t mci_reset_counters_store(struct device *dev, | 415 | static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, |
632 | struct device_attribute *mattr, | ||
633 | const char *data, size_t count) | 416 | const char *data, size_t count) |
634 | { | 417 | { |
635 | struct mem_ctl_info *mci = to_mci(dev); | 418 | int row, chan; |
636 | int cnt, row, chan, i; | 419 | |
637 | mci->ue_mc = 0; | ||
638 | mci->ce_mc = 0; | ||
639 | mci->ue_noinfo_count = 0; | 420 | mci->ue_noinfo_count = 0; |
640 | mci->ce_noinfo_count = 0; | 421 | mci->ce_noinfo_count = 0; |
422 | mci->ue_count = 0; | ||
423 | mci->ce_count = 0; | ||
641 | 424 | ||
642 | for (row = 0; row < mci->nr_csrows; row++) { | 425 | for (row = 0; row < mci->nr_csrows; row++) { |
643 | struct csrow_info *ri = mci->csrows[row]; | 426 | struct csrow_info *ri = &mci->csrows[row]; |
644 | 427 | ||
645 | ri->ue_count = 0; | 428 | ri->ue_count = 0; |
646 | ri->ce_count = 0; | 429 | ri->ce_count = 0; |
647 | 430 | ||
648 | for (chan = 0; chan < ri->nr_channels; chan++) | 431 | for (chan = 0; chan < ri->nr_channels; chan++) |
649 | ri->channels[chan]->ce_count = 0; | 432 | ri->channels[chan].ce_count = 0; |
650 | } | ||
651 | |||
652 | cnt = 1; | ||
653 | for (i = 0; i < mci->n_layers; i++) { | ||
654 | cnt *= mci->layers[i].size; | ||
655 | memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32)); | ||
656 | memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32)); | ||
657 | } | 433 | } |
658 | 434 | ||
659 | mci->start_time = jiffies; | 435 | mci->start_time = jiffies; |
@@ -669,16 +445,14 @@ static ssize_t mci_reset_counters_store(struct device *dev, | |||
669 | * Negative value still means that an error has occurred while setting | 445 | * Negative value still means that an error has occurred while setting |
670 | * the scrub rate. | 446 | * the scrub rate. |
671 | */ | 447 | */ |
672 | static ssize_t mci_sdram_scrub_rate_store(struct device *dev, | 448 | static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, |
673 | struct device_attribute *mattr, | ||
674 | const char *data, size_t count) | 449 | const char *data, size_t count) |
675 | { | 450 | { |
676 | struct mem_ctl_info *mci = to_mci(dev); | ||
677 | unsigned long bandwidth = 0; | 451 | unsigned long bandwidth = 0; |
678 | int new_bw = 0; | 452 | int new_bw = 0; |
679 | 453 | ||
680 | if (!mci->set_sdram_scrub_rate) | 454 | if (!mci->set_sdram_scrub_rate) |
681 | return -ENODEV; | 455 | return -EINVAL; |
682 | 456 | ||
683 | if (strict_strtoul(data, 10, &bandwidth) < 0) | 457 | if (strict_strtoul(data, 10, &bandwidth) < 0) |
684 | return -EINVAL; | 458 | return -EINVAL; |
@@ -696,15 +470,12 @@ static ssize_t mci_sdram_scrub_rate_store(struct device *dev, | |||
696 | /* | 470 | /* |
697 | * ->get_sdram_scrub_rate() return value semantics same as above. | 471 | * ->get_sdram_scrub_rate() return value semantics same as above. |
698 | */ | 472 | */ |
699 | static ssize_t mci_sdram_scrub_rate_show(struct device *dev, | 473 | static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) |
700 | struct device_attribute *mattr, | ||
701 | char *data) | ||
702 | { | 474 | { |
703 | struct mem_ctl_info *mci = to_mci(dev); | ||
704 | int bandwidth = 0; | 475 | int bandwidth = 0; |
705 | 476 | ||
706 | if (!mci->get_sdram_scrub_rate) | 477 | if (!mci->get_sdram_scrub_rate) |
707 | return -ENODEV; | 478 | return -EINVAL; |
708 | 479 | ||
709 | bandwidth = mci->get_sdram_scrub_rate(mci); | 480 | bandwidth = mci->get_sdram_scrub_rate(mci); |
710 | if (bandwidth < 0) { | 481 | if (bandwidth < 0) { |
@@ -716,259 +487,408 @@ static ssize_t mci_sdram_scrub_rate_show(struct device *dev, | |||
716 | } | 487 | } |
717 | 488 | ||
718 | /* default attribute files for the MCI object */ | 489 | /* default attribute files for the MCI object */ |
719 | static ssize_t mci_ue_count_show(struct device *dev, | 490 | static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) |
720 | struct device_attribute *mattr, | ||
721 | char *data) | ||
722 | { | 491 | { |
723 | struct mem_ctl_info *mci = to_mci(dev); | 492 | return sprintf(data, "%d\n", mci->ue_count); |
724 | |||
725 | return sprintf(data, "%d\n", mci->ue_mc); | ||
726 | } | 493 | } |
727 | 494 | ||
728 | static ssize_t mci_ce_count_show(struct device *dev, | 495 | static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) |
729 | struct device_attribute *mattr, | ||
730 | char *data) | ||
731 | { | 496 | { |
732 | struct mem_ctl_info *mci = to_mci(dev); | 497 | return sprintf(data, "%d\n", mci->ce_count); |
733 | |||
734 | return sprintf(data, "%d\n", mci->ce_mc); | ||
735 | } | 498 | } |
736 | 499 | ||
737 | static ssize_t mci_ce_noinfo_show(struct device *dev, | 500 | static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) |
738 | struct device_attribute *mattr, | ||
739 | char *data) | ||
740 | { | 501 | { |
741 | struct mem_ctl_info *mci = to_mci(dev); | ||
742 | |||
743 | return sprintf(data, "%d\n", mci->ce_noinfo_count); | 502 | return sprintf(data, "%d\n", mci->ce_noinfo_count); |
744 | } | 503 | } |
745 | 504 | ||
746 | static ssize_t mci_ue_noinfo_show(struct device *dev, | 505 | static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) |
747 | struct device_attribute *mattr, | ||
748 | char *data) | ||
749 | { | 506 | { |
750 | struct mem_ctl_info *mci = to_mci(dev); | ||
751 | |||
752 | return sprintf(data, "%d\n", mci->ue_noinfo_count); | 507 | return sprintf(data, "%d\n", mci->ue_noinfo_count); |
753 | } | 508 | } |
754 | 509 | ||
755 | static ssize_t mci_seconds_show(struct device *dev, | 510 | static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) |
756 | struct device_attribute *mattr, | ||
757 | char *data) | ||
758 | { | 511 | { |
759 | struct mem_ctl_info *mci = to_mci(dev); | ||
760 | |||
761 | return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); | 512 | return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); |
762 | } | 513 | } |
763 | 514 | ||
764 | static ssize_t mci_ctl_name_show(struct device *dev, | 515 | static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) |
765 | struct device_attribute *mattr, | ||
766 | char *data) | ||
767 | { | 516 | { |
768 | struct mem_ctl_info *mci = to_mci(dev); | ||
769 | |||
770 | return sprintf(data, "%s\n", mci->ctl_name); | 517 | return sprintf(data, "%s\n", mci->ctl_name); |
771 | } | 518 | } |
772 | 519 | ||
773 | static ssize_t mci_size_mb_show(struct device *dev, | 520 | static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) |
774 | struct device_attribute *mattr, | ||
775 | char *data) | ||
776 | { | 521 | { |
777 | struct mem_ctl_info *mci = to_mci(dev); | 522 | int total_pages, csrow_idx; |
778 | int total_pages = 0, csrow_idx, j; | ||
779 | 523 | ||
780 | for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { | 524 | for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows; |
781 | struct csrow_info *csrow = mci->csrows[csrow_idx]; | 525 | csrow_idx++) { |
526 | struct csrow_info *csrow = &mci->csrows[csrow_idx]; | ||
782 | 527 | ||
783 | if (csrow->mci->csbased) { | 528 | if (!csrow->nr_pages) |
784 | total_pages += csrow->nr_pages; | 529 | continue; |
785 | } else { | ||
786 | for (j = 0; j < csrow->nr_channels; j++) { | ||
787 | struct dimm_info *dimm = csrow->channels[j]->dimm; | ||
788 | 530 | ||
789 | total_pages += dimm->nr_pages; | 531 | total_pages += csrow->nr_pages; |
790 | } | ||
791 | } | ||
792 | } | 532 | } |
793 | 533 | ||
794 | return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); | 534 | return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); |
795 | } | 535 | } |
796 | 536 | ||
797 | static ssize_t mci_max_location_show(struct device *dev, | 537 | #define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) |
798 | struct device_attribute *mattr, | 538 | #define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr) |
799 | char *data) | 539 | |
540 | /* MCI show/store functions for top most object */ | ||
541 | static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, | ||
542 | char *buffer) | ||
800 | { | 543 | { |
801 | struct mem_ctl_info *mci = to_mci(dev); | 544 | struct mem_ctl_info *mem_ctl_info = to_mci(kobj); |
802 | int i; | 545 | struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); |
803 | char *p = data; | ||
804 | 546 | ||
805 | for (i = 0; i < mci->n_layers; i++) { | 547 | debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); |
806 | p += sprintf(p, "%s %d ", | 548 | |
807 | edac_layer_name[mci->layers[i].type], | 549 | if (mcidev_attr->show) |
808 | mci->layers[i].size - 1); | 550 | return mcidev_attr->show(mem_ctl_info, buffer); |
809 | } | ||
810 | 551 | ||
811 | return p - data; | 552 | return -EIO; |
812 | } | 553 | } |
813 | 554 | ||
814 | #ifdef CONFIG_EDAC_DEBUG | 555 | static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, |
815 | static ssize_t edac_fake_inject_write(struct file *file, | 556 | const char *buffer, size_t count) |
816 | const char __user *data, | ||
817 | size_t count, loff_t *ppos) | ||
818 | { | 557 | { |
819 | struct device *dev = file->private_data; | 558 | struct mem_ctl_info *mem_ctl_info = to_mci(kobj); |
820 | struct mem_ctl_info *mci = to_mci(dev); | 559 | struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); |
821 | static enum hw_event_mc_err_type type; | ||
822 | u16 errcount = mci->fake_inject_count; | ||
823 | |||
824 | if (!errcount) | ||
825 | errcount = 1; | ||
826 | |||
827 | type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED | ||
828 | : HW_EVENT_ERR_CORRECTED; | ||
829 | |||
830 | printk(KERN_DEBUG | ||
831 | "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n", | ||
832 | errcount, | ||
833 | (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE", | ||
834 | errcount > 1 ? "s" : "", | ||
835 | mci->fake_inject_layer[0], | ||
836 | mci->fake_inject_layer[1], | ||
837 | mci->fake_inject_layer[2] | ||
838 | ); | ||
839 | edac_mc_handle_error(type, mci, errcount, 0, 0, 0, | ||
840 | mci->fake_inject_layer[0], | ||
841 | mci->fake_inject_layer[1], | ||
842 | mci->fake_inject_layer[2], | ||
843 | "FAKE ERROR", "for EDAC testing only"); | ||
844 | 560 | ||
845 | return count; | 561 | debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); |
562 | |||
563 | if (mcidev_attr->store) | ||
564 | return mcidev_attr->store(mem_ctl_info, buffer, count); | ||
565 | |||
566 | return -EIO; | ||
846 | } | 567 | } |
847 | 568 | ||
848 | static const struct file_operations debug_fake_inject_fops = { | 569 | /* Intermediate show/store table */ |
849 | .open = simple_open, | 570 | static const struct sysfs_ops mci_ops = { |
850 | .write = edac_fake_inject_write, | 571 | .show = mcidev_show, |
851 | .llseek = generic_file_llseek, | 572 | .store = mcidev_store |
573 | }; | ||
574 | |||
575 | #define MCIDEV_ATTR(_name,_mode,_show,_store) \ | ||
576 | static struct mcidev_sysfs_attribute mci_attr_##_name = { \ | ||
577 | .attr = {.name = __stringify(_name), .mode = _mode }, \ | ||
578 | .show = _show, \ | ||
579 | .store = _store, \ | ||
852 | }; | 580 | }; |
853 | #endif | ||
854 | 581 | ||
855 | /* default Control file */ | 582 | /* default Control file */ |
856 | DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); | 583 | MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); |
857 | 584 | ||
858 | /* default Attribute files */ | 585 | /* default Attribute files */ |
859 | DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); | 586 | MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); |
860 | DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); | 587 | MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); |
861 | DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); | 588 | MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); |
862 | DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); | 589 | MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); |
863 | DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); | 590 | MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); |
864 | DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); | 591 | MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); |
865 | DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); | 592 | MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); |
866 | DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); | ||
867 | 593 | ||
868 | /* memory scrubber attribute file */ | 594 | /* memory scrubber attribute file */ |
869 | DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, | 595 | MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, |
870 | mci_sdram_scrub_rate_store); | 596 | mci_sdram_scrub_rate_store); |
871 | 597 | ||
872 | static struct attribute *mci_attrs[] = { | 598 | static struct mcidev_sysfs_attribute *mci_attr[] = { |
873 | &dev_attr_reset_counters.attr, | 599 | &mci_attr_reset_counters, |
874 | &dev_attr_mc_name.attr, | 600 | &mci_attr_mc_name, |
875 | &dev_attr_size_mb.attr, | 601 | &mci_attr_size_mb, |
876 | &dev_attr_seconds_since_reset.attr, | 602 | &mci_attr_seconds_since_reset, |
877 | &dev_attr_ue_noinfo_count.attr, | 603 | &mci_attr_ue_noinfo_count, |
878 | &dev_attr_ce_noinfo_count.attr, | 604 | &mci_attr_ce_noinfo_count, |
879 | &dev_attr_ue_count.attr, | 605 | &mci_attr_ue_count, |
880 | &dev_attr_ce_count.attr, | 606 | &mci_attr_ce_count, |
881 | &dev_attr_sdram_scrub_rate.attr, | 607 | &mci_attr_sdram_scrub_rate, |
882 | &dev_attr_max_location.attr, | ||
883 | NULL | 608 | NULL |
884 | }; | 609 | }; |
885 | 610 | ||
886 | static struct attribute_group mci_attr_grp = { | ||
887 | .attrs = mci_attrs, | ||
888 | }; | ||
889 | |||
890 | static const struct attribute_group *mci_attr_groups[] = { | ||
891 | &mci_attr_grp, | ||
892 | NULL | ||
893 | }; | ||
894 | 611 | ||
895 | static void mci_attr_release(struct device *dev) | 612 | /* |
613 | * Release of a MC controlling instance | ||
614 | * | ||
615 | * each MC control instance has the following resources upon entry: | ||
616 | * a) a ref count on the top memctl kobj | ||
617 | * b) a ref count on this module | ||
618 | * | ||
619 | * this function must decrement those ref counts and then | ||
620 | * issue a free on the instance's memory | ||
621 | */ | ||
622 | static void edac_mci_control_release(struct kobject *kobj) | ||
896 | { | 623 | { |
897 | struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); | 624 | struct mem_ctl_info *mci; |
625 | |||
626 | mci = to_mci(kobj); | ||
627 | |||
628 | debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx); | ||
898 | 629 | ||
899 | edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); | 630 | /* decrement the module ref count */ |
900 | kfree(mci); | 631 | module_put(mci->owner); |
901 | } | 632 | } |
902 | 633 | ||
903 | static struct device_type mci_attr_type = { | 634 | static struct kobj_type ktype_mci = { |
904 | .groups = mci_attr_groups, | 635 | .release = edac_mci_control_release, |
905 | .release = mci_attr_release, | 636 | .sysfs_ops = &mci_ops, |
637 | .default_attrs = (struct attribute **)mci_attr, | ||
906 | }; | 638 | }; |
907 | 639 | ||
908 | #ifdef CONFIG_EDAC_DEBUG | 640 | /* EDAC memory controller sysfs kset: |
909 | static struct dentry *edac_debugfs; | 641 | * /sys/devices/system/edac/mc |
642 | */ | ||
643 | static struct kset *mc_kset; | ||
910 | 644 | ||
911 | int __init edac_debugfs_init(void) | 645 | /* |
646 | * edac_mc_register_sysfs_main_kobj | ||
647 | * | ||
648 | * setups and registers the main kobject for each mci | ||
649 | */ | ||
650 | int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci) | ||
912 | { | 651 | { |
913 | edac_debugfs = debugfs_create_dir("edac", NULL); | 652 | struct kobject *kobj_mci; |
914 | if (IS_ERR(edac_debugfs)) { | 653 | int err; |
915 | edac_debugfs = NULL; | 654 | |
916 | return -ENOMEM; | 655 | debugf1("%s()\n", __func__); |
656 | |||
657 | kobj_mci = &mci->edac_mci_kobj; | ||
658 | |||
659 | /* Init the mci's kobject */ | ||
660 | memset(kobj_mci, 0, sizeof(*kobj_mci)); | ||
661 | |||
662 | /* Record which module 'owns' this control structure | ||
663 | * and bump the ref count of the module | ||
664 | */ | ||
665 | mci->owner = THIS_MODULE; | ||
666 | |||
667 | /* bump ref count on this module */ | ||
668 | if (!try_module_get(mci->owner)) { | ||
669 | err = -ENODEV; | ||
670 | goto fail_out; | ||
917 | } | 671 | } |
672 | |||
673 | /* this instance become part of the mc_kset */ | ||
674 | kobj_mci->kset = mc_kset; | ||
675 | |||
676 | /* register the mc<id> kobject to the mc_kset */ | ||
677 | err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL, | ||
678 | "mc%d", mci->mc_idx); | ||
679 | if (err) { | ||
680 | debugf1("%s()Failed to register '.../edac/mc%d'\n", | ||
681 | __func__, mci->mc_idx); | ||
682 | goto kobj_reg_fail; | ||
683 | } | ||
684 | kobject_uevent(kobj_mci, KOBJ_ADD); | ||
685 | |||
686 | /* At this point, to 'free' the control struct, | ||
687 | * edac_mc_unregister_sysfs_main_kobj() must be used | ||
688 | */ | ||
689 | |||
690 | debugf1("%s() Registered '.../edac/mc%d' kobject\n", | ||
691 | __func__, mci->mc_idx); | ||
692 | |||
918 | return 0; | 693 | return 0; |
694 | |||
695 | /* Error exit stack */ | ||
696 | |||
697 | kobj_reg_fail: | ||
698 | module_put(mci->owner); | ||
699 | |||
700 | fail_out: | ||
701 | return err; | ||
919 | } | 702 | } |
920 | 703 | ||
921 | void __exit edac_debugfs_exit(void) | 704 | /* |
705 | * edac_mc_register_sysfs_main_kobj | ||
706 | * | ||
707 | * tears down and the main mci kobject from the mc_kset | ||
708 | */ | ||
709 | void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci) | ||
922 | { | 710 | { |
923 | debugfs_remove(edac_debugfs); | 711 | debugf1("%s()\n", __func__); |
712 | |||
713 | /* delete the kobj from the mc_kset */ | ||
714 | kobject_put(&mci->edac_mci_kobj); | ||
924 | } | 715 | } |
925 | 716 | ||
926 | int edac_create_debug_nodes(struct mem_ctl_info *mci) | 717 | #define EDAC_DEVICE_SYMLINK "device" |
718 | |||
719 | #define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci) | ||
720 | |||
721 | /* MCI show/store functions for top most object */ | ||
722 | static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr, | ||
723 | char *buffer) | ||
927 | { | 724 | { |
928 | struct dentry *d, *parent; | 725 | struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); |
929 | char name[80]; | 726 | struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); |
930 | int i; | ||
931 | 727 | ||
932 | if (!edac_debugfs) | 728 | debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); |
933 | return -ENODEV; | ||
934 | |||
935 | d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs); | ||
936 | if (!d) | ||
937 | return -ENOMEM; | ||
938 | parent = d; | ||
939 | |||
940 | for (i = 0; i < mci->n_layers; i++) { | ||
941 | sprintf(name, "fake_inject_%s", | ||
942 | edac_layer_name[mci->layers[i].type]); | ||
943 | d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, | ||
944 | &mci->fake_inject_layer[i]); | ||
945 | if (!d) | ||
946 | goto nomem; | ||
947 | } | ||
948 | 729 | ||
949 | d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, | 730 | if (mcidev_attr->show) |
950 | &mci->fake_inject_ue); | 731 | return mcidev_attr->show(mem_ctl_info, buffer); |
951 | if (!d) | ||
952 | goto nomem; | ||
953 | 732 | ||
954 | d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent, | 733 | return -EIO; |
955 | &mci->fake_inject_count); | 734 | } |
956 | if (!d) | 735 | |
957 | goto nomem; | 736 | static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr, |
737 | const char *buffer, size_t count) | ||
738 | { | ||
739 | struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); | ||
740 | struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); | ||
741 | |||
742 | debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); | ||
743 | |||
744 | if (mcidev_attr->store) | ||
745 | return mcidev_attr->store(mem_ctl_info, buffer, count); | ||
746 | |||
747 | return -EIO; | ||
748 | } | ||
749 | |||
750 | /* No memory to release for this kobj */ | ||
751 | static void edac_inst_grp_release(struct kobject *kobj) | ||
752 | { | ||
753 | struct mcidev_sysfs_group_kobj *grp; | ||
754 | struct mem_ctl_info *mci; | ||
958 | 755 | ||
959 | d = debugfs_create_file("fake_inject", S_IWUSR, parent, | 756 | debugf1("%s()\n", __func__); |
960 | &mci->dev, | 757 | |
961 | &debug_fake_inject_fops); | 758 | grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj); |
962 | if (!d) | 759 | mci = grp->mci; |
963 | goto nomem; | 760 | } |
761 | |||
762 | /* Intermediate show/store table */ | ||
763 | static struct sysfs_ops inst_grp_ops = { | ||
764 | .show = inst_grp_show, | ||
765 | .store = inst_grp_store | ||
766 | }; | ||
767 | |||
768 | /* the kobj_type instance for a instance group */ | ||
769 | static struct kobj_type ktype_inst_grp = { | ||
770 | .release = edac_inst_grp_release, | ||
771 | .sysfs_ops = &inst_grp_ops, | ||
772 | }; | ||
773 | |||
774 | |||
775 | /* | ||
776 | * edac_create_mci_instance_attributes | ||
777 | * create MC driver specific attributes bellow an specified kobj | ||
778 | * This routine calls itself recursively, in order to create an entire | ||
779 | * object tree. | ||
780 | */ | ||
781 | static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, | ||
782 | const struct mcidev_sysfs_attribute *sysfs_attrib, | ||
783 | struct kobject *kobj) | ||
784 | { | ||
785 | int err; | ||
786 | |||
787 | debugf4("%s()\n", __func__); | ||
788 | |||
789 | while (sysfs_attrib) { | ||
790 | debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); | ||
791 | if (sysfs_attrib->grp) { | ||
792 | struct mcidev_sysfs_group_kobj *grp_kobj; | ||
793 | |||
794 | grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL); | ||
795 | if (!grp_kobj) | ||
796 | return -ENOMEM; | ||
797 | |||
798 | grp_kobj->grp = sysfs_attrib->grp; | ||
799 | grp_kobj->mci = mci; | ||
800 | list_add_tail(&grp_kobj->list, &mci->grp_kobj_list); | ||
801 | |||
802 | debugf0("%s() grp %s, mci %p\n", __func__, | ||
803 | sysfs_attrib->grp->name, mci); | ||
804 | |||
805 | err = kobject_init_and_add(&grp_kobj->kobj, | ||
806 | &ktype_inst_grp, | ||
807 | &mci->edac_mci_kobj, | ||
808 | sysfs_attrib->grp->name); | ||
809 | if (err < 0) { | ||
810 | printk(KERN_ERR "kobject_init_and_add failed: %d\n", err); | ||
811 | return err; | ||
812 | } | ||
813 | err = edac_create_mci_instance_attributes(mci, | ||
814 | grp_kobj->grp->mcidev_attr, | ||
815 | &grp_kobj->kobj); | ||
816 | |||
817 | if (err < 0) | ||
818 | return err; | ||
819 | } else if (sysfs_attrib->attr.name) { | ||
820 | debugf4("%s() file %s\n", __func__, | ||
821 | sysfs_attrib->attr.name); | ||
822 | |||
823 | err = sysfs_create_file(kobj, &sysfs_attrib->attr); | ||
824 | if (err < 0) { | ||
825 | printk(KERN_ERR "sysfs_create_file failed: %d\n", err); | ||
826 | return err; | ||
827 | } | ||
828 | } else | ||
829 | break; | ||
830 | |||
831 | sysfs_attrib++; | ||
832 | } | ||
964 | 833 | ||
965 | mci->debugfs = parent; | ||
966 | return 0; | 834 | return 0; |
967 | nomem: | ||
968 | debugfs_remove(mci->debugfs); | ||
969 | return -ENOMEM; | ||
970 | } | 835 | } |
971 | #endif | 836 | |
837 | /* | ||
838 | * edac_remove_mci_instance_attributes | ||
839 | * remove MC driver specific attributes at the topmost level | ||
840 | * directory of this mci instance. | ||
841 | */ | ||
842 | static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, | ||
843 | const struct mcidev_sysfs_attribute *sysfs_attrib, | ||
844 | struct kobject *kobj, int count) | ||
845 | { | ||
846 | struct mcidev_sysfs_group_kobj *grp_kobj, *tmp; | ||
847 | |||
848 | debugf1("%s()\n", __func__); | ||
849 | |||
850 | /* | ||
851 | * loop if there are attributes and until we hit a NULL entry | ||
852 | * Remove first all the attributes | ||
853 | */ | ||
854 | while (sysfs_attrib) { | ||
855 | debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); | ||
856 | if (sysfs_attrib->grp) { | ||
857 | debugf4("%s() seeking for group %s\n", | ||
858 | __func__, sysfs_attrib->grp->name); | ||
859 | list_for_each_entry(grp_kobj, | ||
860 | &mci->grp_kobj_list, list) { | ||
861 | debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); | ||
862 | if (grp_kobj->grp == sysfs_attrib->grp) { | ||
863 | edac_remove_mci_instance_attributes(mci, | ||
864 | grp_kobj->grp->mcidev_attr, | ||
865 | &grp_kobj->kobj, count + 1); | ||
866 | debugf4("%s() group %s\n", __func__, | ||
867 | sysfs_attrib->grp->name); | ||
868 | kobject_put(&grp_kobj->kobj); | ||
869 | } | ||
870 | } | ||
871 | debugf4("%s() end of seeking for group %s\n", | ||
872 | __func__, sysfs_attrib->grp->name); | ||
873 | } else if (sysfs_attrib->attr.name) { | ||
874 | debugf4("%s() file %s\n", __func__, | ||
875 | sysfs_attrib->attr.name); | ||
876 | sysfs_remove_file(kobj, &sysfs_attrib->attr); | ||
877 | } else | ||
878 | break; | ||
879 | sysfs_attrib++; | ||
880 | } | ||
881 | |||
882 | /* Remove the group objects */ | ||
883 | if (count) | ||
884 | return; | ||
885 | list_for_each_entry_safe(grp_kobj, tmp, | ||
886 | &mci->grp_kobj_list, list) { | ||
887 | list_del(&grp_kobj->list); | ||
888 | kfree(grp_kobj); | ||
889 | } | ||
890 | } | ||
891 | |||
972 | 892 | ||
973 | /* | 893 | /* |
974 | * Create a new Memory Controller kobject instance, | 894 | * Create a new Memory Controller kobject instance, |
@@ -980,85 +900,71 @@ nomem: | |||
980 | */ | 900 | */ |
981 | int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | 901 | int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) |
982 | { | 902 | { |
983 | int i, err; | 903 | int i; |
904 | int err; | ||
905 | struct csrow_info *csrow; | ||
906 | struct kobject *kobj_mci = &mci->edac_mci_kobj; | ||
984 | 907 | ||
985 | /* | 908 | debugf0("%s() idx=%d\n", __func__, mci->mc_idx); |
986 | * The memory controller needs its own bus, in order to avoid | ||
987 | * namespace conflicts at /sys/bus/edac. | ||
988 | */ | ||
989 | mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); | ||
990 | if (!mci->bus.name) | ||
991 | return -ENOMEM; | ||
992 | edac_dbg(0, "creating bus %s\n", mci->bus.name); | ||
993 | err = bus_register(&mci->bus); | ||
994 | if (err < 0) | ||
995 | return err; | ||
996 | 909 | ||
997 | /* get the /sys/devices/system/edac subsys reference */ | 910 | INIT_LIST_HEAD(&mci->grp_kobj_list); |
998 | mci->dev.type = &mci_attr_type; | 911 | |
999 | device_initialize(&mci->dev); | 912 | /* create a symlink for the device */ |
1000 | 913 | err = sysfs_create_link(kobj_mci, &mci->dev->kobj, | |
1001 | mci->dev.parent = mci_pdev; | 914 | EDAC_DEVICE_SYMLINK); |
1002 | mci->dev.bus = &mci->bus; | 915 | if (err) { |
1003 | dev_set_name(&mci->dev, "mc%d", mci->mc_idx); | 916 | debugf1("%s() failure to create symlink\n", __func__); |
1004 | dev_set_drvdata(&mci->dev, mci); | 917 | goto fail0; |
1005 | pm_runtime_forbid(&mci->dev); | ||
1006 | |||
1007 | edac_dbg(0, "creating device %s\n", dev_name(&mci->dev)); | ||
1008 | err = device_add(&mci->dev); | ||
1009 | if (err < 0) { | ||
1010 | bus_unregister(&mci->bus); | ||
1011 | kfree(mci->bus.name); | ||
1012 | return err; | ||
1013 | } | 918 | } |
1014 | 919 | ||
1015 | /* | 920 | /* If the low level driver desires some attributes, |
1016 | * Create the dimm/rank devices | 921 | * then create them now for the driver. |
1017 | */ | 922 | */ |
1018 | for (i = 0; i < mci->tot_dimms; i++) { | 923 | if (mci->mc_driver_sysfs_attributes) { |
1019 | struct dimm_info *dimm = mci->dimms[i]; | 924 | err = edac_create_mci_instance_attributes(mci, |
1020 | /* Only expose populated DIMMs */ | 925 | mci->mc_driver_sysfs_attributes, |
1021 | if (dimm->nr_pages == 0) | 926 | &mci->edac_mci_kobj); |
1022 | continue; | ||
1023 | #ifdef CONFIG_EDAC_DEBUG | ||
1024 | edac_dbg(1, "creating dimm%d, located at ", i); | ||
1025 | if (edac_debug_level >= 1) { | ||
1026 | int lay; | ||
1027 | for (lay = 0; lay < mci->n_layers; lay++) | ||
1028 | printk(KERN_CONT "%s %d ", | ||
1029 | edac_layer_name[mci->layers[lay].type], | ||
1030 | dimm->location[lay]); | ||
1031 | printk(KERN_CONT "\n"); | ||
1032 | } | ||
1033 | #endif | ||
1034 | err = edac_create_dimm_object(mci, dimm, i); | ||
1035 | if (err) { | 927 | if (err) { |
1036 | edac_dbg(1, "failure: create dimm %d obj\n", i); | 928 | debugf1("%s() failure to create mci attributes\n", |
1037 | goto fail; | 929 | __func__); |
930 | goto fail0; | ||
1038 | } | 931 | } |
1039 | } | 932 | } |
1040 | 933 | ||
1041 | #ifdef CONFIG_EDAC_LEGACY_SYSFS | 934 | /* Make directories for each CSROW object under the mc<id> kobject |
1042 | err = edac_create_csrow_objects(mci); | 935 | */ |
1043 | if (err < 0) | 936 | for (i = 0; i < mci->nr_csrows; i++) { |
1044 | goto fail; | 937 | csrow = &mci->csrows[i]; |
1045 | #endif | 938 | |
939 | /* Only expose populated CSROWs */ | ||
940 | if (csrow->nr_pages > 0) { | ||
941 | err = edac_create_csrow_object(mci, csrow, i); | ||
942 | if (err) { | ||
943 | debugf1("%s() failure: create csrow %d obj\n", | ||
944 | __func__, i); | ||
945 | goto fail1; | ||
946 | } | ||
947 | } | ||
948 | } | ||
1046 | 949 | ||
1047 | #ifdef CONFIG_EDAC_DEBUG | ||
1048 | edac_create_debug_nodes(mci); | ||
1049 | #endif | ||
1050 | return 0; | 950 | return 0; |
1051 | 951 | ||
1052 | fail: | 952 | /* CSROW error: backout what has already been registered, */ |
953 | fail1: | ||
1053 | for (i--; i >= 0; i--) { | 954 | for (i--; i >= 0; i--) { |
1054 | struct dimm_info *dimm = mci->dimms[i]; | 955 | if (csrow->nr_pages > 0) { |
1055 | if (dimm->nr_pages == 0) | 956 | kobject_put(&mci->csrows[i].kobj); |
1056 | continue; | 957 | } |
1057 | device_unregister(&dimm->dev); | ||
1058 | } | 958 | } |
1059 | device_unregister(&mci->dev); | 959 | |
1060 | bus_unregister(&mci->bus); | 960 | /* remove the mci instance's attributes, if any */ |
1061 | kfree(mci->bus.name); | 961 | edac_remove_mci_instance_attributes(mci, |
962 | mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0); | ||
963 | |||
964 | /* remove the symlink */ | ||
965 | sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK); | ||
966 | |||
967 | fail0: | ||
1062 | return err; | 968 | return err; |
1063 | } | 969 | } |
1064 | 970 | ||
@@ -1069,91 +975,90 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1069 | { | 975 | { |
1070 | int i; | 976 | int i; |
1071 | 977 | ||
1072 | edac_dbg(0, "\n"); | 978 | debugf0("%s()\n", __func__); |
1073 | 979 | ||
1074 | #ifdef CONFIG_EDAC_DEBUG | 980 | /* remove all csrow kobjects */ |
1075 | debugfs_remove(mci->debugfs); | 981 | debugf4("%s() unregister this mci kobj\n", __func__); |
1076 | #endif | 982 | for (i = 0; i < mci->nr_csrows; i++) { |
1077 | #ifdef CONFIG_EDAC_LEGACY_SYSFS | 983 | if (mci->csrows[i].nr_pages > 0) { |
1078 | edac_delete_csrow_objects(mci); | 984 | debugf0("%s() unreg csrow-%d\n", __func__, i); |
1079 | #endif | 985 | kobject_put(&mci->csrows[i].kobj); |
986 | } | ||
987 | } | ||
1080 | 988 | ||
1081 | for (i = 0; i < mci->tot_dimms; i++) { | 989 | /* remove this mci instance's attribtes */ |
1082 | struct dimm_info *dimm = mci->dimms[i]; | 990 | if (mci->mc_driver_sysfs_attributes) { |
1083 | if (dimm->nr_pages == 0) | 991 | debugf4("%s() unregister mci private attributes\n", __func__); |
1084 | continue; | 992 | edac_remove_mci_instance_attributes(mci, |
1085 | edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev)); | 993 | mci->mc_driver_sysfs_attributes, |
1086 | device_unregister(&dimm->dev); | 994 | &mci->edac_mci_kobj, 0); |
1087 | } | 995 | } |
1088 | } | ||
1089 | 996 | ||
1090 | void edac_unregister_sysfs(struct mem_ctl_info *mci) | 997 | /* remove the symlink */ |
1091 | { | 998 | debugf4("%s() remove_link\n", __func__); |
1092 | edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); | 999 | sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); |
1093 | device_unregister(&mci->dev); | ||
1094 | bus_unregister(&mci->bus); | ||
1095 | kfree(mci->bus.name); | ||
1096 | } | ||
1097 | 1000 | ||
1098 | static void mc_attr_release(struct device *dev) | 1001 | /* unregister this instance's kobject */ |
1099 | { | 1002 | debugf4("%s() remove_mci_instance\n", __func__); |
1100 | /* | 1003 | kobject_put(&mci->edac_mci_kobj); |
1101 | * There's no container structure here, as this is just the mci | ||
1102 | * parent device, used to create the /sys/devices/mc sysfs node. | ||
1103 | * So, there are no attributes on it. | ||
1104 | */ | ||
1105 | edac_dbg(1, "Releasing device %s\n", dev_name(dev)); | ||
1106 | kfree(dev); | ||
1107 | } | 1004 | } |
1108 | 1005 | ||
1109 | static struct device_type mc_attr_type = { | 1006 | |
1110 | .release = mc_attr_release, | 1007 | |
1111 | }; | 1008 | |
1112 | /* | 1009 | /* |
1113 | * Init/exit code for the module. Basically, creates/removes /sys/class/rc | 1010 | * edac_setup_sysfs_mc_kset(void) |
1011 | * | ||
1012 | * Initialize the mc_kset for the 'mc' entry | ||
1013 | * This requires creating the top 'mc' directory with a kset | ||
1014 | * and its controls/attributes. | ||
1015 | * | ||
1016 | * To this 'mc' kset, instance 'mci' will be grouped as children. | ||
1017 | * | ||
1018 | * Return: 0 SUCCESS | ||
1019 | * !0 FAILURE error code | ||
1114 | */ | 1020 | */ |
1115 | int __init edac_mc_sysfs_init(void) | 1021 | int edac_sysfs_setup_mc_kset(void) |
1116 | { | 1022 | { |
1117 | struct bus_type *edac_subsys; | 1023 | int err = -EINVAL; |
1118 | int err; | 1024 | struct sysdev_class *edac_class; |
1025 | |||
1026 | debugf1("%s()\n", __func__); | ||
1119 | 1027 | ||
1120 | /* get the /sys/devices/system/edac subsys reference */ | 1028 | /* get the /sys/devices/system/edac class reference */ |
1121 | edac_subsys = edac_get_sysfs_subsys(); | 1029 | edac_class = edac_get_sysfs_class(); |
1122 | if (edac_subsys == NULL) { | 1030 | if (edac_class == NULL) { |
1123 | edac_dbg(1, "no edac_subsys\n"); | 1031 | debugf1("%s() no edac_class error=%d\n", __func__, err); |
1124 | err = -EINVAL; | 1032 | goto fail_out; |
1125 | goto out; | ||
1126 | } | 1033 | } |
1127 | 1034 | ||
1128 | mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL); | 1035 | /* Init the MC's kobject */ |
1129 | if (!mci_pdev) { | 1036 | mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj); |
1037 | if (!mc_kset) { | ||
1130 | err = -ENOMEM; | 1038 | err = -ENOMEM; |
1131 | goto out_put_sysfs; | 1039 | debugf1("%s() Failed to register '.../edac/mc'\n", __func__); |
1040 | goto fail_kset; | ||
1132 | } | 1041 | } |
1133 | 1042 | ||
1134 | mci_pdev->bus = edac_subsys; | 1043 | debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); |
1135 | mci_pdev->type = &mc_attr_type; | ||
1136 | device_initialize(mci_pdev); | ||
1137 | dev_set_name(mci_pdev, "mc"); | ||
1138 | |||
1139 | err = device_add(mci_pdev); | ||
1140 | if (err < 0) | ||
1141 | goto out_dev_free; | ||
1142 | |||
1143 | edac_dbg(0, "device %s created\n", dev_name(mci_pdev)); | ||
1144 | 1044 | ||
1145 | return 0; | 1045 | return 0; |
1146 | 1046 | ||
1147 | out_dev_free: | 1047 | fail_kset: |
1148 | kfree(mci_pdev); | 1048 | edac_put_sysfs_class(); |
1149 | out_put_sysfs: | 1049 | |
1150 | edac_put_sysfs_subsys(); | 1050 | fail_out: |
1151 | out: | ||
1152 | return err; | 1051 | return err; |
1153 | } | 1052 | } |
1154 | 1053 | ||
1155 | void __exit edac_mc_sysfs_exit(void) | 1054 | /* |
1055 | * edac_sysfs_teardown_mc_kset | ||
1056 | * | ||
1057 | * deconstruct the mc_ket for memory controllers | ||
1058 | */ | ||
1059 | void edac_sysfs_teardown_mc_kset(void) | ||
1156 | { | 1060 | { |
1157 | device_unregister(mci_pdev); | 1061 | kset_unregister(mc_kset); |
1158 | edac_put_sysfs_subsys(); | 1062 | edac_put_sysfs_class(); |
1159 | } | 1063 | } |
1064 | |||
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index 12c951a2c33..5ddaa86d6a6 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c | |||
@@ -15,32 +15,12 @@ | |||
15 | #include "edac_core.h" | 15 | #include "edac_core.h" |
16 | #include "edac_module.h" | 16 | #include "edac_module.h" |
17 | 17 | ||
18 | #define EDAC_VERSION "Ver: 3.0.0" | 18 | #define EDAC_VERSION "Ver: 2.1.0" |
19 | 19 | ||
20 | #ifdef CONFIG_EDAC_DEBUG | 20 | #ifdef CONFIG_EDAC_DEBUG |
21 | |||
22 | static int edac_set_debug_level(const char *buf, struct kernel_param *kp) | ||
23 | { | ||
24 | unsigned long val; | ||
25 | int ret; | ||
26 | |||
27 | ret = kstrtoul(buf, 0, &val); | ||
28 | if (ret) | ||
29 | return ret; | ||
30 | |||
31 | if (val < 0 || val > 4) | ||
32 | return -EINVAL; | ||
33 | |||
34 | return param_set_int(buf, kp); | ||
35 | } | ||
36 | |||
37 | /* Values of 0 to 4 will generate output */ | 21 | /* Values of 0 to 4 will generate output */ |
38 | int edac_debug_level = 2; | 22 | int edac_debug_level = 2; |
39 | EXPORT_SYMBOL_GPL(edac_debug_level); | 23 | EXPORT_SYMBOL_GPL(edac_debug_level); |
40 | |||
41 | module_param_call(edac_debug_level, edac_set_debug_level, param_get_int, | ||
42 | &edac_debug_level, 0644); | ||
43 | MODULE_PARM_DESC(edac_debug_level, "EDAC debug level: [0-4], default: 2"); | ||
44 | #endif | 24 | #endif |
45 | 25 | ||
46 | /* scope is to module level only */ | 26 | /* scope is to module level only */ |
@@ -110,21 +90,26 @@ static int __init edac_init(void) | |||
110 | */ | 90 | */ |
111 | edac_pci_clear_parity_errors(); | 91 | edac_pci_clear_parity_errors(); |
112 | 92 | ||
113 | err = edac_mc_sysfs_init(); | 93 | /* |
94 | * now set up the mc_kset under the edac class object | ||
95 | */ | ||
96 | err = edac_sysfs_setup_mc_kset(); | ||
114 | if (err) | 97 | if (err) |
115 | goto error; | 98 | goto error; |
116 | 99 | ||
117 | edac_debugfs_init(); | ||
118 | |||
119 | /* Setup/Initialize the workq for this core */ | 100 | /* Setup/Initialize the workq for this core */ |
120 | err = edac_workqueue_setup(); | 101 | err = edac_workqueue_setup(); |
121 | if (err) { | 102 | if (err) { |
122 | edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n"); | 103 | edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n"); |
123 | goto error; | 104 | goto workq_fail; |
124 | } | 105 | } |
125 | 106 | ||
126 | return 0; | 107 | return 0; |
127 | 108 | ||
109 | /* Error teardown stack */ | ||
110 | workq_fail: | ||
111 | edac_sysfs_teardown_mc_kset(); | ||
112 | |||
128 | error: | 113 | error: |
129 | return err; | 114 | return err; |
130 | } | 115 | } |
@@ -135,12 +120,11 @@ error: | |||
135 | */ | 120 | */ |
136 | static void __exit edac_exit(void) | 121 | static void __exit edac_exit(void) |
137 | { | 122 | { |
138 | edac_dbg(0, "\n"); | 123 | debugf0("%s()\n", __func__); |
139 | 124 | ||
140 | /* tear down the various subsystems */ | 125 | /* tear down the various subsystems */ |
141 | edac_workqueue_teardown(); | 126 | edac_workqueue_teardown(); |
142 | edac_mc_sysfs_exit(); | 127 | edac_sysfs_teardown_mc_kset(); |
143 | edac_debugfs_exit(); | ||
144 | } | 128 | } |
145 | 129 | ||
146 | /* | 130 | /* |
@@ -152,3 +136,10 @@ module_exit(edac_exit); | |||
152 | MODULE_LICENSE("GPL"); | 136 | MODULE_LICENSE("GPL"); |
153 | MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al"); | 137 | MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al"); |
154 | MODULE_DESCRIPTION("Core library routines for EDAC reporting"); | 138 | MODULE_DESCRIPTION("Core library routines for EDAC reporting"); |
139 | |||
140 | /* refer to *_sysfs.c files for parameters that are exported via sysfs */ | ||
141 | |||
142 | #ifdef CONFIG_EDAC_DEBUG | ||
143 | module_param(edac_debug_level, int, 0644); | ||
144 | MODULE_PARM_DESC(edac_debug_level, "Debug level"); | ||
145 | #endif | ||
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 3d139c6e7fe..17aabb7b90e 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef __EDAC_MODULE_H__ | 10 | #ifndef __EDAC_MODULE_H__ |
11 | #define __EDAC_MODULE_H__ | 11 | #define __EDAC_MODULE_H__ |
12 | 12 | ||
13 | #include <linux/sysdev.h> | ||
14 | |||
13 | #include "edac_core.h" | 15 | #include "edac_core.h" |
14 | 16 | ||
15 | /* | 17 | /* |
@@ -19,12 +21,12 @@ | |||
19 | * | 21 | * |
20 | * edac_mc objects | 22 | * edac_mc objects |
21 | */ | 23 | */ |
22 | /* on edac_mc_sysfs.c */ | 24 | extern int edac_sysfs_setup_mc_kset(void); |
23 | int edac_mc_sysfs_init(void); | 25 | extern void edac_sysfs_teardown_mc_kset(void); |
24 | void edac_mc_sysfs_exit(void); | 26 | extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci); |
27 | extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); | ||
25 | extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); | 28 | extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); |
26 | extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); | 29 | extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); |
27 | void edac_unregister_sysfs(struct mem_ctl_info *mci); | ||
28 | extern int edac_get_log_ue(void); | 30 | extern int edac_get_log_ue(void); |
29 | extern int edac_get_log_ce(void); | 31 | extern int edac_get_log_ce(void); |
30 | extern int edac_get_panic_on_ue(void); | 32 | extern int edac_get_panic_on_ue(void); |
@@ -34,10 +36,6 @@ extern int edac_mc_get_panic_on_ue(void); | |||
34 | extern int edac_get_poll_msec(void); | 36 | extern int edac_get_poll_msec(void); |
35 | extern int edac_mc_get_poll_msec(void); | 37 | extern int edac_mc_get_poll_msec(void); |
36 | 38 | ||
37 | unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, | ||
38 | unsigned len); | ||
39 | |||
40 | /* on edac_device.c */ | ||
41 | extern int edac_device_register_sysfs_main_kobj( | 39 | extern int edac_device_register_sysfs_main_kobj( |
42 | struct edac_device_ctl_info *edac_dev); | 40 | struct edac_device_ctl_info *edac_dev); |
43 | extern void edac_device_unregister_sysfs_main_kobj( | 41 | extern void edac_device_unregister_sysfs_main_kobj( |
@@ -54,21 +52,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info | |||
54 | *edac_dev, unsigned long value); | 52 | *edac_dev, unsigned long value); |
55 | extern void edac_mc_reset_delay_period(int value); | 53 | extern void edac_mc_reset_delay_period(int value); |
56 | 54 | ||
57 | extern void *edac_align_ptr(void **p, unsigned size, int n_elems); | 55 | extern void *edac_align_ptr(void *ptr, unsigned size); |
58 | |||
59 | /* | ||
60 | * EDAC debugfs functions | ||
61 | */ | ||
62 | #ifdef CONFIG_EDAC_DEBUG | ||
63 | int edac_debugfs_init(void); | ||
64 | void edac_debugfs_exit(void); | ||
65 | #else | ||
66 | static inline int edac_debugfs_init(void) | ||
67 | { | ||
68 | return -ENODEV; | ||
69 | } | ||
70 | static inline void edac_debugfs_exit(void) {} | ||
71 | #endif | ||
72 | 56 | ||
73 | /* | 57 | /* |
74 | * EDAC PCI functions | 58 | * EDAC PCI functions |
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index dd370f92ace..2b378207d57 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/sysdev.h> | ||
22 | #include <linux/ctype.h> | 23 | #include <linux/ctype.h> |
23 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
@@ -42,13 +43,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt, | |||
42 | const char *edac_pci_name) | 43 | const char *edac_pci_name) |
43 | { | 44 | { |
44 | struct edac_pci_ctl_info *pci; | 45 | struct edac_pci_ctl_info *pci; |
45 | void *p = NULL, *pvt; | 46 | void *pvt; |
46 | unsigned int size; | 47 | unsigned int size; |
47 | 48 | ||
48 | edac_dbg(1, "\n"); | 49 | debugf1("%s()\n", __func__); |
49 | 50 | ||
50 | pci = edac_align_ptr(&p, sizeof(*pci), 1); | 51 | pci = (struct edac_pci_ctl_info *)0; |
51 | pvt = edac_align_ptr(&p, 1, sz_pvt); | 52 | pvt = edac_align_ptr(&pci[1], sz_pvt); |
52 | size = ((unsigned long)pvt) + sz_pvt; | 53 | size = ((unsigned long)pvt) + sz_pvt; |
53 | 54 | ||
54 | /* Alloc the needed control struct memory */ | 55 | /* Alloc the needed control struct memory */ |
@@ -80,7 +81,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info); | |||
80 | */ | 81 | */ |
81 | void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci) | 82 | void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci) |
82 | { | 83 | { |
83 | edac_dbg(1, "\n"); | 84 | debugf1("%s()\n", __func__); |
84 | 85 | ||
85 | edac_pci_remove_sysfs(pci); | 86 | edac_pci_remove_sysfs(pci); |
86 | } | 87 | } |
@@ -97,7 +98,7 @@ static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev) | |||
97 | struct edac_pci_ctl_info *pci; | 98 | struct edac_pci_ctl_info *pci; |
98 | struct list_head *item; | 99 | struct list_head *item; |
99 | 100 | ||
100 | edac_dbg(1, "\n"); | 101 | debugf1("%s()\n", __func__); |
101 | 102 | ||
102 | list_for_each(item, &edac_pci_list) { | 103 | list_for_each(item, &edac_pci_list) { |
103 | pci = list_entry(item, struct edac_pci_ctl_info, link); | 104 | pci = list_entry(item, struct edac_pci_ctl_info, link); |
@@ -122,7 +123,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci) | |||
122 | struct list_head *item, *insert_before; | 123 | struct list_head *item, *insert_before; |
123 | struct edac_pci_ctl_info *rover; | 124 | struct edac_pci_ctl_info *rover; |
124 | 125 | ||
125 | edac_dbg(1, "\n"); | 126 | debugf1("%s()\n", __func__); |
126 | 127 | ||
127 | insert_before = &edac_pci_list; | 128 | insert_before = &edac_pci_list; |
128 | 129 | ||
@@ -226,7 +227,7 @@ static void edac_pci_workq_function(struct work_struct *work_req) | |||
226 | int msec; | 227 | int msec; |
227 | unsigned long delay; | 228 | unsigned long delay; |
228 | 229 | ||
229 | edac_dbg(3, "checking\n"); | 230 | debugf3("%s() checking\n", __func__); |
230 | 231 | ||
231 | mutex_lock(&edac_pci_ctls_mutex); | 232 | mutex_lock(&edac_pci_ctls_mutex); |
232 | 233 | ||
@@ -261,7 +262,7 @@ static void edac_pci_workq_function(struct work_struct *work_req) | |||
261 | static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, | 262 | static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, |
262 | unsigned int msec) | 263 | unsigned int msec) |
263 | { | 264 | { |
264 | edac_dbg(0, "\n"); | 265 | debugf0("%s()\n", __func__); |
265 | 266 | ||
266 | INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); | 267 | INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); |
267 | queue_delayed_work(edac_workqueue, &pci->work, | 268 | queue_delayed_work(edac_workqueue, &pci->work, |
@@ -276,7 +277,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) | |||
276 | { | 277 | { |
277 | int status; | 278 | int status; |
278 | 279 | ||
279 | edac_dbg(0, "\n"); | 280 | debugf0("%s()\n", __func__); |
280 | 281 | ||
281 | status = cancel_delayed_work(&pci->work); | 282 | status = cancel_delayed_work(&pci->work); |
282 | if (status == 0) | 283 | if (status == 0) |
@@ -293,7 +294,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) | |||
293 | void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, | 294 | void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, |
294 | unsigned long value) | 295 | unsigned long value) |
295 | { | 296 | { |
296 | edac_dbg(0, "\n"); | 297 | debugf0("%s()\n", __func__); |
297 | 298 | ||
298 | edac_pci_workq_teardown(pci); | 299 | edac_pci_workq_teardown(pci); |
299 | 300 | ||
@@ -333,7 +334,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_index); | |||
333 | */ | 334 | */ |
334 | int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) | 335 | int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) |
335 | { | 336 | { |
336 | edac_dbg(0, "\n"); | 337 | debugf0("%s()\n", __func__); |
337 | 338 | ||
338 | pci->pci_idx = edac_idx; | 339 | pci->pci_idx = edac_idx; |
339 | pci->start_time = jiffies; | 340 | pci->start_time = jiffies; |
@@ -393,7 +394,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev) | |||
393 | { | 394 | { |
394 | struct edac_pci_ctl_info *pci; | 395 | struct edac_pci_ctl_info *pci; |
395 | 396 | ||
396 | edac_dbg(0, "\n"); | 397 | debugf0("%s()\n", __func__); |
397 | 398 | ||
398 | mutex_lock(&edac_pci_ctls_mutex); | 399 | mutex_lock(&edac_pci_ctls_mutex); |
399 | 400 | ||
@@ -430,7 +431,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device); | |||
430 | */ | 431 | */ |
431 | static void edac_pci_generic_check(struct edac_pci_ctl_info *pci) | 432 | static void edac_pci_generic_check(struct edac_pci_ctl_info *pci) |
432 | { | 433 | { |
433 | edac_dbg(4, "\n"); | 434 | debugf4("%s()\n", __func__); |
434 | edac_pci_do_parity_check(); | 435 | edac_pci_do_parity_check(); |
435 | } | 436 | } |
436 | 437 | ||
@@ -470,13 +471,12 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev, | |||
470 | 471 | ||
471 | pci->mod_name = mod_name; | 472 | pci->mod_name = mod_name; |
472 | pci->ctl_name = EDAC_PCI_GENCTL_NAME; | 473 | pci->ctl_name = EDAC_PCI_GENCTL_NAME; |
473 | if (edac_op_state == EDAC_OPSTATE_POLL) | 474 | pci->edac_check = edac_pci_generic_check; |
474 | pci->edac_check = edac_pci_generic_check; | ||
475 | 475 | ||
476 | pdata->edac_idx = edac_pci_idx++; | 476 | pdata->edac_idx = edac_pci_idx++; |
477 | 477 | ||
478 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { | 478 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { |
479 | edac_dbg(3, "failed edac_pci_add_device()\n"); | 479 | debugf3("%s(): failed edac_pci_add_device()\n", __func__); |
480 | edac_pci_free_ctl_info(pci); | 480 | edac_pci_free_ctl_info(pci); |
481 | return NULL; | 481 | return NULL; |
482 | } | 482 | } |
@@ -492,7 +492,7 @@ EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl); | |||
492 | */ | 492 | */ |
493 | void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci) | 493 | void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci) |
494 | { | 494 | { |
495 | edac_dbg(0, "pci mod=%s\n", pci->mod_name); | 495 | debugf0("%s() pci mod=%s\n", __func__, pci->mod_name); |
496 | 496 | ||
497 | edac_pci_del_device(pci->dev); | 497 | edac_pci_del_device(pci->dev); |
498 | edac_pci_free_ctl_info(pci); | 498 | edac_pci_free_ctl_info(pci); |
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index dc6e905ee1a..495198ad059 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c | |||
@@ -78,7 +78,7 @@ static void edac_pci_instance_release(struct kobject *kobj) | |||
78 | { | 78 | { |
79 | struct edac_pci_ctl_info *pci; | 79 | struct edac_pci_ctl_info *pci; |
80 | 80 | ||
81 | edac_dbg(0, "\n"); | 81 | debugf0("%s()\n", __func__); |
82 | 82 | ||
83 | /* Form pointer to containing struct, the pci control struct */ | 83 | /* Form pointer to containing struct, the pci control struct */ |
84 | pci = to_instance(kobj); | 84 | pci = to_instance(kobj); |
@@ -161,7 +161,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx) | |||
161 | struct kobject *main_kobj; | 161 | struct kobject *main_kobj; |
162 | int err; | 162 | int err; |
163 | 163 | ||
164 | edac_dbg(0, "\n"); | 164 | debugf0("%s()\n", __func__); |
165 | 165 | ||
166 | /* First bump the ref count on the top main kobj, which will | 166 | /* First bump the ref count on the top main kobj, which will |
167 | * track the number of PCI instances we have, and thus nest | 167 | * track the number of PCI instances we have, and thus nest |
@@ -177,13 +177,14 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx) | |||
177 | err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance, | 177 | err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance, |
178 | edac_pci_top_main_kobj, "pci%d", idx); | 178 | edac_pci_top_main_kobj, "pci%d", idx); |
179 | if (err != 0) { | 179 | if (err != 0) { |
180 | edac_dbg(2, "failed to register instance pci%d\n", idx); | 180 | debugf2("%s() failed to register instance pci%d\n", |
181 | __func__, idx); | ||
181 | kobject_put(edac_pci_top_main_kobj); | 182 | kobject_put(edac_pci_top_main_kobj); |
182 | goto error_out; | 183 | goto error_out; |
183 | } | 184 | } |
184 | 185 | ||
185 | kobject_uevent(&pci->kobj, KOBJ_ADD); | 186 | kobject_uevent(&pci->kobj, KOBJ_ADD); |
186 | edac_dbg(1, "Register instance 'pci%d' kobject\n", idx); | 187 | debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx); |
187 | 188 | ||
188 | return 0; | 189 | return 0; |
189 | 190 | ||
@@ -200,7 +201,7 @@ error_out: | |||
200 | static void edac_pci_unregister_sysfs_instance_kobj( | 201 | static void edac_pci_unregister_sysfs_instance_kobj( |
201 | struct edac_pci_ctl_info *pci) | 202 | struct edac_pci_ctl_info *pci) |
202 | { | 203 | { |
203 | edac_dbg(0, "\n"); | 204 | debugf0("%s()\n", __func__); |
204 | 205 | ||
205 | /* Unregister the instance kobject and allow its release | 206 | /* Unregister the instance kobject and allow its release |
206 | * function release the main reference count and then | 207 | * function release the main reference count and then |
@@ -316,7 +317,7 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = { | |||
316 | */ | 317 | */ |
317 | static void edac_pci_release_main_kobj(struct kobject *kobj) | 318 | static void edac_pci_release_main_kobj(struct kobject *kobj) |
318 | { | 319 | { |
319 | edac_dbg(0, "here to module_put(THIS_MODULE)\n"); | 320 | debugf0("%s() here to module_put(THIS_MODULE)\n", __func__); |
320 | 321 | ||
321 | kfree(kobj); | 322 | kfree(kobj); |
322 | 323 | ||
@@ -337,14 +338,14 @@ static struct kobj_type ktype_edac_pci_main_kobj = { | |||
337 | * edac_pci_main_kobj_setup() | 338 | * edac_pci_main_kobj_setup() |
338 | * | 339 | * |
339 | * setup the sysfs for EDAC PCI attributes | 340 | * setup the sysfs for EDAC PCI attributes |
340 | * assumes edac_subsys has already been initialized | 341 | * assumes edac_class has already been initialized |
341 | */ | 342 | */ |
342 | static int edac_pci_main_kobj_setup(void) | 343 | static int edac_pci_main_kobj_setup(void) |
343 | { | 344 | { |
344 | int err; | 345 | int err; |
345 | struct bus_type *edac_subsys; | 346 | struct sysdev_class *edac_class; |
346 | 347 | ||
347 | edac_dbg(0, "\n"); | 348 | debugf0("%s()\n", __func__); |
348 | 349 | ||
349 | /* check and count if we have already created the main kobject */ | 350 | /* check and count if we have already created the main kobject */ |
350 | if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1) | 351 | if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1) |
@@ -353,9 +354,9 @@ static int edac_pci_main_kobj_setup(void) | |||
353 | /* First time, so create the main kobject and its | 354 | /* First time, so create the main kobject and its |
354 | * controls and attributes | 355 | * controls and attributes |
355 | */ | 356 | */ |
356 | edac_subsys = edac_get_sysfs_subsys(); | 357 | edac_class = edac_get_sysfs_class(); |
357 | if (edac_subsys == NULL) { | 358 | if (edac_class == NULL) { |
358 | edac_dbg(1, "no edac_subsys\n"); | 359 | debugf1("%s() no edac_class\n", __func__); |
359 | err = -ENODEV; | 360 | err = -ENODEV; |
360 | goto decrement_count_fail; | 361 | goto decrement_count_fail; |
361 | } | 362 | } |
@@ -365,14 +366,14 @@ static int edac_pci_main_kobj_setup(void) | |||
365 | * level main kobj for EDAC PCI | 366 | * level main kobj for EDAC PCI |
366 | */ | 367 | */ |
367 | if (!try_module_get(THIS_MODULE)) { | 368 | if (!try_module_get(THIS_MODULE)) { |
368 | edac_dbg(1, "try_module_get() failed\n"); | 369 | debugf1("%s() try_module_get() failed\n", __func__); |
369 | err = -ENODEV; | 370 | err = -ENODEV; |
370 | goto mod_get_fail; | 371 | goto mod_get_fail; |
371 | } | 372 | } |
372 | 373 | ||
373 | edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); | 374 | edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); |
374 | if (!edac_pci_top_main_kobj) { | 375 | if (!edac_pci_top_main_kobj) { |
375 | edac_dbg(1, "Failed to allocate\n"); | 376 | debugf1("Failed to allocate\n"); |
376 | err = -ENOMEM; | 377 | err = -ENOMEM; |
377 | goto kzalloc_fail; | 378 | goto kzalloc_fail; |
378 | } | 379 | } |
@@ -380,9 +381,9 @@ static int edac_pci_main_kobj_setup(void) | |||
380 | /* Instanstiate the pci object */ | 381 | /* Instanstiate the pci object */ |
381 | err = kobject_init_and_add(edac_pci_top_main_kobj, | 382 | err = kobject_init_and_add(edac_pci_top_main_kobj, |
382 | &ktype_edac_pci_main_kobj, | 383 | &ktype_edac_pci_main_kobj, |
383 | &edac_subsys->dev_root->kobj, "pci"); | 384 | &edac_class->kset.kobj, "pci"); |
384 | if (err) { | 385 | if (err) { |
385 | edac_dbg(1, "Failed to register '.../edac/pci'\n"); | 386 | debugf1("Failed to register '.../edac/pci'\n"); |
386 | goto kobject_init_and_add_fail; | 387 | goto kobject_init_and_add_fail; |
387 | } | 388 | } |
388 | 389 | ||
@@ -391,7 +392,7 @@ static int edac_pci_main_kobj_setup(void) | |||
391 | * must be used, for resources to be cleaned up properly | 392 | * must be used, for resources to be cleaned up properly |
392 | */ | 393 | */ |
393 | kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD); | 394 | kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD); |
394 | edac_dbg(1, "Registered '.../edac/pci' kobject\n"); | 395 | debugf1("Registered '.../edac/pci' kobject\n"); |
395 | 396 | ||
396 | return 0; | 397 | return 0; |
397 | 398 | ||
@@ -403,7 +404,7 @@ kzalloc_fail: | |||
403 | module_put(THIS_MODULE); | 404 | module_put(THIS_MODULE); |
404 | 405 | ||
405 | mod_get_fail: | 406 | mod_get_fail: |
406 | edac_put_sysfs_subsys(); | 407 | edac_put_sysfs_class(); |
407 | 408 | ||
408 | decrement_count_fail: | 409 | decrement_count_fail: |
409 | /* if are on this error exit, nothing to tear down */ | 410 | /* if are on this error exit, nothing to tear down */ |
@@ -420,17 +421,18 @@ decrement_count_fail: | |||
420 | */ | 421 | */ |
421 | static void edac_pci_main_kobj_teardown(void) | 422 | static void edac_pci_main_kobj_teardown(void) |
422 | { | 423 | { |
423 | edac_dbg(0, "\n"); | 424 | debugf0("%s()\n", __func__); |
424 | 425 | ||
425 | /* Decrement the count and only if no more controller instances | 426 | /* Decrement the count and only if no more controller instances |
426 | * are connected perform the unregisteration of the top level | 427 | * are connected perform the unregisteration of the top level |
427 | * main kobj | 428 | * main kobj |
428 | */ | 429 | */ |
429 | if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { | 430 | if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { |
430 | edac_dbg(0, "called kobject_put on main kobj\n"); | 431 | debugf0("%s() called kobject_put on main kobj\n", |
432 | __func__); | ||
431 | kobject_put(edac_pci_top_main_kobj); | 433 | kobject_put(edac_pci_top_main_kobj); |
432 | } | 434 | } |
433 | edac_put_sysfs_subsys(); | 435 | edac_put_sysfs_class(); |
434 | } | 436 | } |
435 | 437 | ||
436 | /* | 438 | /* |
@@ -444,7 +446,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci) | |||
444 | int err; | 446 | int err; |
445 | struct kobject *edac_kobj = &pci->kobj; | 447 | struct kobject *edac_kobj = &pci->kobj; |
446 | 448 | ||
447 | edac_dbg(0, "idx=%d\n", pci->pci_idx); | 449 | debugf0("%s() idx=%d\n", __func__, pci->pci_idx); |
448 | 450 | ||
449 | /* create the top main EDAC PCI kobject, IF needed */ | 451 | /* create the top main EDAC PCI kobject, IF needed */ |
450 | err = edac_pci_main_kobj_setup(); | 452 | err = edac_pci_main_kobj_setup(); |
@@ -458,7 +460,8 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci) | |||
458 | 460 | ||
459 | err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK); | 461 | err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK); |
460 | if (err) { | 462 | if (err) { |
461 | edac_dbg(0, "sysfs_create_link() returned err= %d\n", err); | 463 | debugf0("%s() sysfs_create_link() returned err= %d\n", |
464 | __func__, err); | ||
462 | goto symlink_fail; | 465 | goto symlink_fail; |
463 | } | 466 | } |
464 | 467 | ||
@@ -481,7 +484,7 @@ unregister_cleanup: | |||
481 | */ | 484 | */ |
482 | void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) | 485 | void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) |
483 | { | 486 | { |
484 | edac_dbg(0, "index=%d\n", pci->pci_idx); | 487 | debugf0("%s() index=%d\n", __func__, pci->pci_idx); |
485 | 488 | ||
486 | /* Remove the symlink */ | 489 | /* Remove the symlink */ |
487 | sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK); | 490 | sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK); |
@@ -493,7 +496,7 @@ void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) | |||
493 | * if this 'pci' is the last instance. | 496 | * if this 'pci' is the last instance. |
494 | * If it is, the main kobject will be unregistered as a result | 497 | * If it is, the main kobject will be unregistered as a result |
495 | */ | 498 | */ |
496 | edac_dbg(0, "calling edac_pci_main_kobj_teardown()\n"); | 499 | debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__); |
497 | edac_pci_main_kobj_teardown(); | 500 | edac_pci_main_kobj_teardown(); |
498 | } | 501 | } |
499 | 502 | ||
@@ -569,7 +572,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) | |||
569 | 572 | ||
570 | local_irq_restore(flags); | 573 | local_irq_restore(flags); |
571 | 574 | ||
572 | edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); | 575 | debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); |
573 | 576 | ||
574 | /* check the status reg for errors on boards NOT marked as broken | 577 | /* check the status reg for errors on boards NOT marked as broken |
575 | * if broken, we cannot trust any of the status bits | 578 | * if broken, we cannot trust any of the status bits |
@@ -600,15 +603,13 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) | |||
600 | } | 603 | } |
601 | 604 | ||
602 | 605 | ||
603 | edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n", | 606 | debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev)); |
604 | header_type, dev_name(&dev->dev)); | ||
605 | 607 | ||
606 | if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { | 608 | if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { |
607 | /* On bridges, need to examine secondary status register */ | 609 | /* On bridges, need to examine secondary status register */ |
608 | status = get_pci_parity_status(dev, 1); | 610 | status = get_pci_parity_status(dev, 1); |
609 | 611 | ||
610 | edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n", | 612 | debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); |
611 | status, dev_name(&dev->dev)); | ||
612 | 613 | ||
613 | /* check the secondary status reg for errors, | 614 | /* check the secondary status reg for errors, |
614 | * on NOT broken boards | 615 | * on NOT broken boards |
@@ -645,16 +646,20 @@ typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); | |||
645 | 646 | ||
646 | /* | 647 | /* |
647 | * pci_dev parity list iterator | 648 | * pci_dev parity list iterator |
648 | * | 649 | * Scan the PCI device list for one pass, looking for SERRORs |
649 | * Scan the PCI device list looking for SERRORs, Master Parity ERRORS or | 650 | * Master Parity ERRORS or Parity ERRORs on primary or secondary devices |
650 | * Parity ERRORs on primary or secondary devices. | ||
651 | */ | 651 | */ |
652 | static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) | 652 | static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) |
653 | { | 653 | { |
654 | struct pci_dev *dev = NULL; | 654 | struct pci_dev *dev = NULL; |
655 | 655 | ||
656 | for_each_pci_dev(dev) | 656 | /* request for kernel access to the next PCI device, if any, |
657 | * and while we are looking at it have its reference count | ||
658 | * bumped until we are done with it | ||
659 | */ | ||
660 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
657 | fn(dev); | 661 | fn(dev); |
662 | } | ||
658 | } | 663 | } |
659 | 664 | ||
660 | /* | 665 | /* |
@@ -666,7 +671,7 @@ void edac_pci_do_parity_check(void) | |||
666 | { | 671 | { |
667 | int before_count; | 672 | int before_count; |
668 | 673 | ||
669 | edac_dbg(3, "\n"); | 674 | debugf3("%s()\n", __func__); |
670 | 675 | ||
671 | /* if policy has PCI check off, leave now */ | 676 | /* if policy has PCI check off, leave now */ |
672 | if (!check_pci_errors) | 677 | if (!check_pci_errors) |
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index 351945fa2ec..86ad2eee120 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * 2007 (c) MontaVista Software, Inc. | 6 | * 2007 (c) MontaVista Software, Inc. |
7 | * 2010 (c) Advanced Micro Devices Inc. | 7 | * 2010 (c) Advanced Micro Devices Inc. |
8 | * Borislav Petkov <bp@alien8.de> | 8 | * Borislav Petkov <borislav.petkov@amd.com> |
9 | * | 9 | * |
10 | * This file is licensed under the terms of the GNU General Public | 10 | * This file is licensed under the terms of the GNU General Public |
11 | * License version 2. This program is licensed "as is" without any | 11 | * License version 2. This program is licensed "as is" without any |
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/edac.h> | 16 | #include <linux/edac.h> |
17 | #include <linux/atomic.h> | 17 | #include <linux/atomic.h> |
18 | #include <linux/device.h> | ||
19 | #include <asm/edac.h> | 18 | #include <asm/edac.h> |
20 | 19 | ||
21 | int edac_op_state = EDAC_OPSTATE_INVAL; | 20 | int edac_op_state = EDAC_OPSTATE_INVAL; |
@@ -27,7 +26,7 @@ EXPORT_SYMBOL_GPL(edac_handlers); | |||
27 | int edac_err_assert = 0; | 26 | int edac_err_assert = 0; |
28 | EXPORT_SYMBOL_GPL(edac_err_assert); | 27 | EXPORT_SYMBOL_GPL(edac_err_assert); |
29 | 28 | ||
30 | static atomic_t edac_subsys_valid = ATOMIC_INIT(0); | 29 | static atomic_t edac_class_valid = ATOMIC_INIT(0); |
31 | 30 | ||
32 | /* | 31 | /* |
33 | * called to determine if there is an EDAC driver interested in | 32 | * called to determine if there is an EDAC driver interested in |
@@ -55,37 +54,36 @@ EXPORT_SYMBOL_GPL(edac_atomic_assert_error); | |||
55 | * sysfs object: /sys/devices/system/edac | 54 | * sysfs object: /sys/devices/system/edac |
56 | * need to export to other files | 55 | * need to export to other files |
57 | */ | 56 | */ |
58 | struct bus_type edac_subsys = { | 57 | struct sysdev_class edac_class = { |
59 | .name = "edac", | 58 | .name = "edac", |
60 | .dev_name = "edac", | ||
61 | }; | 59 | }; |
62 | EXPORT_SYMBOL_GPL(edac_subsys); | 60 | EXPORT_SYMBOL_GPL(edac_class); |
63 | 61 | ||
64 | /* return pointer to the 'edac' node in sysfs */ | 62 | /* return pointer to the 'edac' node in sysfs */ |
65 | struct bus_type *edac_get_sysfs_subsys(void) | 63 | struct sysdev_class *edac_get_sysfs_class(void) |
66 | { | 64 | { |
67 | int err = 0; | 65 | int err = 0; |
68 | 66 | ||
69 | if (atomic_read(&edac_subsys_valid)) | 67 | if (atomic_read(&edac_class_valid)) |
70 | goto out; | 68 | goto out; |
71 | 69 | ||
72 | /* create the /sys/devices/system/edac directory */ | 70 | /* create the /sys/devices/system/edac directory */ |
73 | err = subsys_system_register(&edac_subsys, NULL); | 71 | err = sysdev_class_register(&edac_class); |
74 | if (err) { | 72 | if (err) { |
75 | printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n"); | 73 | printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n"); |
76 | return NULL; | 74 | return NULL; |
77 | } | 75 | } |
78 | 76 | ||
79 | out: | 77 | out: |
80 | atomic_inc(&edac_subsys_valid); | 78 | atomic_inc(&edac_class_valid); |
81 | return &edac_subsys; | 79 | return &edac_class; |
82 | } | 80 | } |
83 | EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys); | 81 | EXPORT_SYMBOL_GPL(edac_get_sysfs_class); |
84 | 82 | ||
85 | void edac_put_sysfs_subsys(void) | 83 | void edac_put_sysfs_class(void) |
86 | { | 84 | { |
87 | /* last user unregisters it */ | 85 | /* last user unregisters it */ |
88 | if (atomic_dec_and_test(&edac_subsys_valid)) | 86 | if (atomic_dec_and_test(&edac_class_valid)) |
89 | bus_unregister(&edac_subsys); | 87 | sysdev_class_unregister(&edac_class); |
90 | } | 88 | } |
91 | EXPORT_SYMBOL_GPL(edac_put_sysfs_subsys); | 89 | EXPORT_SYMBOL_GPL(edac_put_sysfs_class); |
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c deleted file mode 100644 index c2bd8c6a434..00000000000 --- a/drivers/edac/highbank_l2_edac.c +++ /dev/null | |||
@@ -1,149 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2011-2012 Calxeda, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/ctype.h> | ||
19 | #include <linux/edac.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | |||
24 | #include "edac_core.h" | ||
25 | #include "edac_module.h" | ||
26 | |||
27 | #define SR_CLR_SB_ECC_INTR 0x0 | ||
28 | #define SR_CLR_DB_ECC_INTR 0x4 | ||
29 | |||
30 | struct hb_l2_drvdata { | ||
31 | void __iomem *base; | ||
32 | int sb_irq; | ||
33 | int db_irq; | ||
34 | }; | ||
35 | |||
36 | static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id) | ||
37 | { | ||
38 | struct edac_device_ctl_info *dci = dev_id; | ||
39 | struct hb_l2_drvdata *drvdata = dci->pvt_info; | ||
40 | |||
41 | if (irq == drvdata->sb_irq) { | ||
42 | writel(1, drvdata->base + SR_CLR_SB_ECC_INTR); | ||
43 | edac_device_handle_ce(dci, 0, 0, dci->ctl_name); | ||
44 | } | ||
45 | if (irq == drvdata->db_irq) { | ||
46 | writel(1, drvdata->base + SR_CLR_DB_ECC_INTR); | ||
47 | edac_device_handle_ue(dci, 0, 0, dci->ctl_name); | ||
48 | } | ||
49 | |||
50 | return IRQ_HANDLED; | ||
51 | } | ||
52 | |||
53 | static int highbank_l2_err_probe(struct platform_device *pdev) | ||
54 | { | ||
55 | struct edac_device_ctl_info *dci; | ||
56 | struct hb_l2_drvdata *drvdata; | ||
57 | struct resource *r; | ||
58 | int res = 0; | ||
59 | |||
60 | dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu", | ||
61 | 1, "L", 1, 2, NULL, 0, 0); | ||
62 | if (!dci) | ||
63 | return -ENOMEM; | ||
64 | |||
65 | drvdata = dci->pvt_info; | ||
66 | dci->dev = &pdev->dev; | ||
67 | platform_set_drvdata(pdev, dci); | ||
68 | |||
69 | if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) | ||
70 | return -ENOMEM; | ||
71 | |||
72 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
73 | if (!r) { | ||
74 | dev_err(&pdev->dev, "Unable to get mem resource\n"); | ||
75 | res = -ENODEV; | ||
76 | goto err; | ||
77 | } | ||
78 | |||
79 | if (!devm_request_mem_region(&pdev->dev, r->start, | ||
80 | resource_size(r), dev_name(&pdev->dev))) { | ||
81 | dev_err(&pdev->dev, "Error while requesting mem region\n"); | ||
82 | res = -EBUSY; | ||
83 | goto err; | ||
84 | } | ||
85 | |||
86 | drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); | ||
87 | if (!drvdata->base) { | ||
88 | dev_err(&pdev->dev, "Unable to map regs\n"); | ||
89 | res = -ENOMEM; | ||
90 | goto err; | ||
91 | } | ||
92 | |||
93 | drvdata->db_irq = platform_get_irq(pdev, 0); | ||
94 | res = devm_request_irq(&pdev->dev, drvdata->db_irq, | ||
95 | highbank_l2_err_handler, | ||
96 | 0, dev_name(&pdev->dev), dci); | ||
97 | if (res < 0) | ||
98 | goto err; | ||
99 | |||
100 | drvdata->sb_irq = platform_get_irq(pdev, 1); | ||
101 | res = devm_request_irq(&pdev->dev, drvdata->sb_irq, | ||
102 | highbank_l2_err_handler, | ||
103 | 0, dev_name(&pdev->dev), dci); | ||
104 | if (res < 0) | ||
105 | goto err; | ||
106 | |||
107 | dci->mod_name = dev_name(&pdev->dev); | ||
108 | dci->dev_name = dev_name(&pdev->dev); | ||
109 | |||
110 | if (edac_device_add_device(dci)) | ||
111 | goto err; | ||
112 | |||
113 | devres_close_group(&pdev->dev, NULL); | ||
114 | return 0; | ||
115 | err: | ||
116 | devres_release_group(&pdev->dev, NULL); | ||
117 | edac_device_free_ctl_info(dci); | ||
118 | return res; | ||
119 | } | ||
120 | |||
121 | static int highbank_l2_err_remove(struct platform_device *pdev) | ||
122 | { | ||
123 | struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); | ||
124 | |||
125 | edac_device_del_device(&pdev->dev); | ||
126 | edac_device_free_ctl_info(dci); | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static const struct of_device_id hb_l2_err_of_match[] = { | ||
131 | { .compatible = "calxeda,hb-sregs-l2-ecc", }, | ||
132 | {}, | ||
133 | }; | ||
134 | MODULE_DEVICE_TABLE(of, hb_l2_err_of_match); | ||
135 | |||
136 | static struct platform_driver highbank_l2_edac_driver = { | ||
137 | .probe = highbank_l2_err_probe, | ||
138 | .remove = highbank_l2_err_remove, | ||
139 | .driver = { | ||
140 | .name = "hb_l2_edac", | ||
141 | .of_match_table = hb_l2_err_of_match, | ||
142 | }, | ||
143 | }; | ||
144 | |||
145 | module_platform_driver(highbank_l2_edac_driver); | ||
146 | |||
147 | MODULE_LICENSE("GPL v2"); | ||
148 | MODULE_AUTHOR("Calxeda, Inc."); | ||
149 | MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank L2 Cache"); | ||
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c deleted file mode 100644 index 4695dd2d71f..00000000000 --- a/drivers/edac/highbank_mc_edac.c +++ /dev/null | |||
@@ -1,258 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2011-2012 Calxeda, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/ctype.h> | ||
19 | #include <linux/edac.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | |||
25 | #include "edac_core.h" | ||
26 | #include "edac_module.h" | ||
27 | |||
28 | /* DDR Ctrlr Error Registers */ | ||
29 | #define HB_DDR_ECC_OPT 0x128 | ||
30 | #define HB_DDR_ECC_U_ERR_ADDR 0x130 | ||
31 | #define HB_DDR_ECC_U_ERR_STAT 0x134 | ||
32 | #define HB_DDR_ECC_U_ERR_DATAL 0x138 | ||
33 | #define HB_DDR_ECC_U_ERR_DATAH 0x13c | ||
34 | #define HB_DDR_ECC_C_ERR_ADDR 0x140 | ||
35 | #define HB_DDR_ECC_C_ERR_STAT 0x144 | ||
36 | #define HB_DDR_ECC_C_ERR_DATAL 0x148 | ||
37 | #define HB_DDR_ECC_C_ERR_DATAH 0x14c | ||
38 | #define HB_DDR_ECC_INT_STATUS 0x180 | ||
39 | #define HB_DDR_ECC_INT_ACK 0x184 | ||
40 | #define HB_DDR_ECC_U_ERR_ID 0x424 | ||
41 | #define HB_DDR_ECC_C_ERR_ID 0x428 | ||
42 | |||
43 | #define HB_DDR_ECC_INT_STAT_CE 0x8 | ||
44 | #define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10 | ||
45 | #define HB_DDR_ECC_INT_STAT_UE 0x20 | ||
46 | #define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40 | ||
47 | |||
48 | #define HB_DDR_ECC_OPT_MODE_MASK 0x3 | ||
49 | #define HB_DDR_ECC_OPT_FWC 0x100 | ||
50 | #define HB_DDR_ECC_OPT_XOR_SHIFT 16 | ||
51 | |||
52 | struct hb_mc_drvdata { | ||
53 | void __iomem *mc_vbase; | ||
54 | }; | ||
55 | |||
56 | static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) | ||
57 | { | ||
58 | struct mem_ctl_info *mci = dev_id; | ||
59 | struct hb_mc_drvdata *drvdata = mci->pvt_info; | ||
60 | u32 status, err_addr; | ||
61 | |||
62 | /* Read the interrupt status register */ | ||
63 | status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS); | ||
64 | |||
65 | if (status & HB_DDR_ECC_INT_STAT_UE) { | ||
66 | err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR); | ||
67 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | ||
68 | err_addr >> PAGE_SHIFT, | ||
69 | err_addr & ~PAGE_MASK, 0, | ||
70 | 0, 0, -1, | ||
71 | mci->ctl_name, ""); | ||
72 | } | ||
73 | if (status & HB_DDR_ECC_INT_STAT_CE) { | ||
74 | u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT); | ||
75 | syndrome = (syndrome >> 8) & 0xff; | ||
76 | err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR); | ||
77 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | ||
78 | err_addr >> PAGE_SHIFT, | ||
79 | err_addr & ~PAGE_MASK, syndrome, | ||
80 | 0, 0, -1, | ||
81 | mci->ctl_name, ""); | ||
82 | } | ||
83 | |||
84 | /* clear the error, clears the interrupt */ | ||
85 | writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK); | ||
86 | return IRQ_HANDLED; | ||
87 | } | ||
88 | |||
89 | #ifdef CONFIG_EDAC_DEBUG | ||
90 | static ssize_t highbank_mc_err_inject_write(struct file *file, | ||
91 | const char __user *data, | ||
92 | size_t count, loff_t *ppos) | ||
93 | { | ||
94 | struct mem_ctl_info *mci = file->private_data; | ||
95 | struct hb_mc_drvdata *pdata = mci->pvt_info; | ||
96 | char buf[32]; | ||
97 | size_t buf_size; | ||
98 | u32 reg; | ||
99 | u8 synd; | ||
100 | |||
101 | buf_size = min(count, (sizeof(buf)-1)); | ||
102 | if (copy_from_user(buf, data, buf_size)) | ||
103 | return -EFAULT; | ||
104 | buf[buf_size] = 0; | ||
105 | |||
106 | if (!kstrtou8(buf, 16, &synd)) { | ||
107 | reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT); | ||
108 | reg &= HB_DDR_ECC_OPT_MODE_MASK; | ||
109 | reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC; | ||
110 | writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT); | ||
111 | } | ||
112 | |||
113 | return count; | ||
114 | } | ||
115 | |||
116 | static const struct file_operations highbank_mc_debug_inject_fops = { | ||
117 | .open = simple_open, | ||
118 | .write = highbank_mc_err_inject_write, | ||
119 | .llseek = generic_file_llseek, | ||
120 | }; | ||
121 | |||
122 | static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) | ||
123 | { | ||
124 | if (mci->debugfs) | ||
125 | debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, | ||
126 | &highbank_mc_debug_inject_fops); | ||
127 | ; | ||
128 | } | ||
129 | #else | ||
130 | static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) | ||
131 | {} | ||
132 | #endif | ||
133 | |||
134 | static int highbank_mc_probe(struct platform_device *pdev) | ||
135 | { | ||
136 | struct edac_mc_layer layers[2]; | ||
137 | struct mem_ctl_info *mci; | ||
138 | struct hb_mc_drvdata *drvdata; | ||
139 | struct dimm_info *dimm; | ||
140 | struct resource *r; | ||
141 | u32 control; | ||
142 | int irq; | ||
143 | int res = 0; | ||
144 | |||
145 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | ||
146 | layers[0].size = 1; | ||
147 | layers[0].is_virt_csrow = true; | ||
148 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
149 | layers[1].size = 1; | ||
150 | layers[1].is_virt_csrow = false; | ||
151 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, | ||
152 | sizeof(struct hb_mc_drvdata)); | ||
153 | if (!mci) | ||
154 | return -ENOMEM; | ||
155 | |||
156 | mci->pdev = &pdev->dev; | ||
157 | drvdata = mci->pvt_info; | ||
158 | platform_set_drvdata(pdev, mci); | ||
159 | |||
160 | if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) | ||
161 | return -ENOMEM; | ||
162 | |||
163 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
164 | if (!r) { | ||
165 | dev_err(&pdev->dev, "Unable to get mem resource\n"); | ||
166 | res = -ENODEV; | ||
167 | goto err; | ||
168 | } | ||
169 | |||
170 | if (!devm_request_mem_region(&pdev->dev, r->start, | ||
171 | resource_size(r), dev_name(&pdev->dev))) { | ||
172 | dev_err(&pdev->dev, "Error while requesting mem region\n"); | ||
173 | res = -EBUSY; | ||
174 | goto err; | ||
175 | } | ||
176 | |||
177 | drvdata->mc_vbase = devm_ioremap(&pdev->dev, | ||
178 | r->start, resource_size(r)); | ||
179 | if (!drvdata->mc_vbase) { | ||
180 | dev_err(&pdev->dev, "Unable to map regs\n"); | ||
181 | res = -ENOMEM; | ||
182 | goto err; | ||
183 | } | ||
184 | |||
185 | control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3; | ||
186 | if (!control || (control == 0x2)) { | ||
187 | dev_err(&pdev->dev, "No ECC present, or ECC disabled\n"); | ||
188 | res = -ENODEV; | ||
189 | goto err; | ||
190 | } | ||
191 | |||
192 | irq = platform_get_irq(pdev, 0); | ||
193 | res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler, | ||
194 | 0, dev_name(&pdev->dev), mci); | ||
195 | if (res < 0) { | ||
196 | dev_err(&pdev->dev, "Unable to request irq %d\n", irq); | ||
197 | goto err; | ||
198 | } | ||
199 | |||
200 | mci->mtype_cap = MEM_FLAG_DDR3; | ||
201 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | ||
202 | mci->edac_cap = EDAC_FLAG_SECDED; | ||
203 | mci->mod_name = dev_name(&pdev->dev); | ||
204 | mci->mod_ver = "1"; | ||
205 | mci->ctl_name = dev_name(&pdev->dev); | ||
206 | mci->scrub_mode = SCRUB_SW_SRC; | ||
207 | |||
208 | /* Only a single 4GB DIMM is supported */ | ||
209 | dimm = *mci->dimms; | ||
210 | dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1; | ||
211 | dimm->grain = 8; | ||
212 | dimm->dtype = DEV_X8; | ||
213 | dimm->mtype = MEM_DDR3; | ||
214 | dimm->edac_mode = EDAC_SECDED; | ||
215 | |||
216 | res = edac_mc_add_mc(mci); | ||
217 | if (res < 0) | ||
218 | goto err; | ||
219 | |||
220 | highbank_mc_create_debugfs_nodes(mci); | ||
221 | |||
222 | devres_close_group(&pdev->dev, NULL); | ||
223 | return 0; | ||
224 | err: | ||
225 | devres_release_group(&pdev->dev, NULL); | ||
226 | edac_mc_free(mci); | ||
227 | return res; | ||
228 | } | ||
229 | |||
230 | static int highbank_mc_remove(struct platform_device *pdev) | ||
231 | { | ||
232 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); | ||
233 | |||
234 | edac_mc_del_mc(&pdev->dev); | ||
235 | edac_mc_free(mci); | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static const struct of_device_id hb_ddr_ctrl_of_match[] = { | ||
240 | { .compatible = "calxeda,hb-ddr-ctrl", }, | ||
241 | {}, | ||
242 | }; | ||
243 | MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match); | ||
244 | |||
245 | static struct platform_driver highbank_mc_edac_driver = { | ||
246 | .probe = highbank_mc_probe, | ||
247 | .remove = highbank_mc_remove, | ||
248 | .driver = { | ||
249 | .name = "hb_mc_edac", | ||
250 | .of_match_table = hb_ddr_ctrl_of_match, | ||
251 | }, | ||
252 | }; | ||
253 | |||
254 | module_platform_driver(highbank_mc_edac_driver); | ||
255 | |||
256 | MODULE_LICENSE("GPL v2"); | ||
257 | MODULE_AUTHOR("Calxeda, Inc."); | ||
258 | MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank"); | ||
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c index 694efcbf19c..c0510b3d703 100644 --- a/drivers/edac/i3000_edac.c +++ b/drivers/edac/i3000_edac.c | |||
@@ -194,7 +194,7 @@ static void i3000_get_error_info(struct mem_ctl_info *mci, | |||
194 | { | 194 | { |
195 | struct pci_dev *pdev; | 195 | struct pci_dev *pdev; |
196 | 196 | ||
197 | pdev = to_pci_dev(mci->pdev); | 197 | pdev = to_pci_dev(mci->dev); |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * This is a mess because there is no atomic way to read all the | 200 | * This is a mess because there is no atomic way to read all the |
@@ -236,7 +236,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci, | |||
236 | int row, multi_chan, channel; | 236 | int row, multi_chan, channel; |
237 | unsigned long pfn, offset; | 237 | unsigned long pfn, offset; |
238 | 238 | ||
239 | multi_chan = mci->csrows[0]->nr_channels - 1; | 239 | multi_chan = mci->csrows[0].nr_channels - 1; |
240 | 240 | ||
241 | if (!(info->errsts & I3000_ERRSTS_BITS)) | 241 | if (!(info->errsts & I3000_ERRSTS_BITS)) |
242 | return 0; | 242 | return 0; |
@@ -245,9 +245,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci, | |||
245 | return 1; | 245 | return 1; |
246 | 246 | ||
247 | if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { | 247 | if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { |
248 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 248 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
249 | -1, -1, -1, | ||
250 | "UE overwrote CE", ""); | ||
251 | info->errsts = info->errsts2; | 249 | info->errsts = info->errsts2; |
252 | } | 250 | } |
253 | 251 | ||
@@ -258,15 +256,10 @@ static int i3000_process_error_info(struct mem_ctl_info *mci, | |||
258 | row = edac_mc_find_csrow_by_page(mci, pfn); | 256 | row = edac_mc_find_csrow_by_page(mci, pfn); |
259 | 257 | ||
260 | if (info->errsts & I3000_ERRSTS_UE) | 258 | if (info->errsts & I3000_ERRSTS_UE) |
261 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 259 | edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE"); |
262 | pfn, offset, 0, | ||
263 | row, -1, -1, | ||
264 | "i3000 UE", ""); | ||
265 | else | 260 | else |
266 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 261 | edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row, |
267 | pfn, offset, info->derrsyn, | 262 | multi_chan ? channel : 0, "i3000 CE"); |
268 | row, multi_chan ? channel : 0, -1, | ||
269 | "i3000 CE", ""); | ||
270 | 263 | ||
271 | return 1; | 264 | return 1; |
272 | } | 265 | } |
@@ -275,7 +268,7 @@ static void i3000_check(struct mem_ctl_info *mci) | |||
275 | { | 268 | { |
276 | struct i3000_error_info info; | 269 | struct i3000_error_info info; |
277 | 270 | ||
278 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 271 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
279 | i3000_get_error_info(mci, &info); | 272 | i3000_get_error_info(mci, &info); |
280 | i3000_process_error_info(mci, &info, 1); | 273 | i3000_process_error_info(mci, &info, 1); |
281 | } | 274 | } |
@@ -311,10 +304,9 @@ static int i3000_is_interleaved(const unsigned char *c0dra, | |||
311 | static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | 304 | static int i3000_probe1(struct pci_dev *pdev, int dev_idx) |
312 | { | 305 | { |
313 | int rc; | 306 | int rc; |
314 | int i, j; | 307 | int i; |
315 | struct mem_ctl_info *mci = NULL; | 308 | struct mem_ctl_info *mci = NULL; |
316 | struct edac_mc_layer layers[2]; | 309 | unsigned long last_cumul_size; |
317 | unsigned long last_cumul_size, nr_pages; | ||
318 | int interleaved, nr_channels; | 310 | int interleaved, nr_channels; |
319 | unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS]; | 311 | unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS]; |
320 | unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2]; | 312 | unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2]; |
@@ -322,7 +314,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | |||
322 | unsigned long mchbar; | 314 | unsigned long mchbar; |
323 | void __iomem *window; | 315 | void __iomem *window; |
324 | 316 | ||
325 | edac_dbg(0, "MC:\n"); | 317 | debugf0("MC: %s()\n", __func__); |
326 | 318 | ||
327 | pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); | 319 | pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); |
328 | mchbar &= I3000_MCHBAR_MASK; | 320 | mchbar &= I3000_MCHBAR_MASK; |
@@ -355,20 +347,13 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | |||
355 | */ | 347 | */ |
356 | interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb); | 348 | interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb); |
357 | nr_channels = interleaved ? 2 : 1; | 349 | nr_channels = interleaved ? 2 : 1; |
358 | 350 | mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0); | |
359 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | ||
360 | layers[0].size = I3000_RANKS / nr_channels; | ||
361 | layers[0].is_virt_csrow = true; | ||
362 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
363 | layers[1].size = nr_channels; | ||
364 | layers[1].is_virt_csrow = false; | ||
365 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); | ||
366 | if (!mci) | 351 | if (!mci) |
367 | return -ENOMEM; | 352 | return -ENOMEM; |
368 | 353 | ||
369 | edac_dbg(3, "MC: init mci\n"); | 354 | debugf3("MC: %s(): init mci\n", __func__); |
370 | 355 | ||
371 | mci->pdev = &pdev->dev; | 356 | mci->dev = &pdev->dev; |
372 | mci->mtype_cap = MEM_FLAG_DDR2; | 357 | mci->mtype_cap = MEM_FLAG_DDR2; |
373 | 358 | ||
374 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | 359 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; |
@@ -393,30 +378,27 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | |||
393 | for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { | 378 | for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { |
394 | u8 value; | 379 | u8 value; |
395 | u32 cumul_size; | 380 | u32 cumul_size; |
396 | struct csrow_info *csrow = mci->csrows[i]; | 381 | struct csrow_info *csrow = &mci->csrows[i]; |
397 | 382 | ||
398 | value = drb[i]; | 383 | value = drb[i]; |
399 | cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); | 384 | cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); |
400 | if (interleaved) | 385 | if (interleaved) |
401 | cumul_size <<= 1; | 386 | cumul_size <<= 1; |
402 | edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size); | 387 | debugf3("MC: %s(): (%d) cumul_size 0x%x\n", |
403 | if (cumul_size == last_cumul_size) | 388 | __func__, i, cumul_size); |
389 | if (cumul_size == last_cumul_size) { | ||
390 | csrow->mtype = MEM_EMPTY; | ||
404 | continue; | 391 | continue; |
392 | } | ||
405 | 393 | ||
406 | csrow->first_page = last_cumul_size; | 394 | csrow->first_page = last_cumul_size; |
407 | csrow->last_page = cumul_size - 1; | 395 | csrow->last_page = cumul_size - 1; |
408 | nr_pages = cumul_size - last_cumul_size; | 396 | csrow->nr_pages = cumul_size - last_cumul_size; |
409 | last_cumul_size = cumul_size; | 397 | last_cumul_size = cumul_size; |
410 | 398 | csrow->grain = I3000_DEAP_GRAIN; | |
411 | for (j = 0; j < nr_channels; j++) { | 399 | csrow->mtype = MEM_DDR2; |
412 | struct dimm_info *dimm = csrow->channels[j]->dimm; | 400 | csrow->dtype = DEV_UNKNOWN; |
413 | 401 | csrow->edac_mode = EDAC_UNKNOWN; | |
414 | dimm->nr_pages = nr_pages / nr_channels; | ||
415 | dimm->grain = I3000_DEAP_GRAIN; | ||
416 | dimm->mtype = MEM_DDR2; | ||
417 | dimm->dtype = DEV_UNKNOWN; | ||
418 | dimm->edac_mode = EDAC_UNKNOWN; | ||
419 | } | ||
420 | } | 402 | } |
421 | 403 | ||
422 | /* | 404 | /* |
@@ -428,7 +410,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | |||
428 | 410 | ||
429 | rc = -ENODEV; | 411 | rc = -ENODEV; |
430 | if (edac_mc_add_mc(mci)) { | 412 | if (edac_mc_add_mc(mci)) { |
431 | edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); | 413 | debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); |
432 | goto fail; | 414 | goto fail; |
433 | } | 415 | } |
434 | 416 | ||
@@ -444,7 +426,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | |||
444 | } | 426 | } |
445 | 427 | ||
446 | /* get this far and it's successful */ | 428 | /* get this far and it's successful */ |
447 | edac_dbg(3, "MC: success\n"); | 429 | debugf3("MC: %s(): success\n", __func__); |
448 | return 0; | 430 | return 0; |
449 | 431 | ||
450 | fail: | 432 | fail: |
@@ -455,11 +437,12 @@ fail: | |||
455 | } | 437 | } |
456 | 438 | ||
457 | /* returns count (>= 0), or negative on error */ | 439 | /* returns count (>= 0), or negative on error */ |
458 | static int i3000_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 440 | static int __devinit i3000_init_one(struct pci_dev *pdev, |
441 | const struct pci_device_id *ent) | ||
459 | { | 442 | { |
460 | int rc; | 443 | int rc; |
461 | 444 | ||
462 | edac_dbg(0, "MC:\n"); | 445 | debugf0("MC: %s()\n", __func__); |
463 | 446 | ||
464 | if (pci_enable_device(pdev) < 0) | 447 | if (pci_enable_device(pdev) < 0) |
465 | return -EIO; | 448 | return -EIO; |
@@ -471,11 +454,11 @@ static int i3000_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
471 | return rc; | 454 | return rc; |
472 | } | 455 | } |
473 | 456 | ||
474 | static void i3000_remove_one(struct pci_dev *pdev) | 457 | static void __devexit i3000_remove_one(struct pci_dev *pdev) |
475 | { | 458 | { |
476 | struct mem_ctl_info *mci; | 459 | struct mem_ctl_info *mci; |
477 | 460 | ||
478 | edac_dbg(0, "\n"); | 461 | debugf0("%s()\n", __func__); |
479 | 462 | ||
480 | if (i3000_pci) | 463 | if (i3000_pci) |
481 | edac_pci_release_generic_ctl(i3000_pci); | 464 | edac_pci_release_generic_ctl(i3000_pci); |
@@ -487,7 +470,7 @@ static void i3000_remove_one(struct pci_dev *pdev) | |||
487 | edac_mc_free(mci); | 470 | edac_mc_free(mci); |
488 | } | 471 | } |
489 | 472 | ||
490 | static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = { | 473 | static const struct pci_device_id i3000_pci_tbl[] __devinitdata = { |
491 | { | 474 | { |
492 | PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 475 | PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
493 | I3000}, | 476 | I3000}, |
@@ -501,7 +484,7 @@ MODULE_DEVICE_TABLE(pci, i3000_pci_tbl); | |||
501 | static struct pci_driver i3000_driver = { | 484 | static struct pci_driver i3000_driver = { |
502 | .name = EDAC_MOD_STR, | 485 | .name = EDAC_MOD_STR, |
503 | .probe = i3000_init_one, | 486 | .probe = i3000_init_one, |
504 | .remove = i3000_remove_one, | 487 | .remove = __devexit_p(i3000_remove_one), |
505 | .id_table = i3000_pci_tbl, | 488 | .id_table = i3000_pci_tbl, |
506 | }; | 489 | }; |
507 | 490 | ||
@@ -509,7 +492,7 @@ static int __init i3000_init(void) | |||
509 | { | 492 | { |
510 | int pci_rc; | 493 | int pci_rc; |
511 | 494 | ||
512 | edac_dbg(3, "MC:\n"); | 495 | debugf3("MC: %s()\n", __func__); |
513 | 496 | ||
514 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 497 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
515 | opstate_init(); | 498 | opstate_init(); |
@@ -523,14 +506,14 @@ static int __init i3000_init(void) | |||
523 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 506 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
524 | PCI_DEVICE_ID_INTEL_3000_HB, NULL); | 507 | PCI_DEVICE_ID_INTEL_3000_HB, NULL); |
525 | if (!mci_pdev) { | 508 | if (!mci_pdev) { |
526 | edac_dbg(0, "i3000 pci_get_device fail\n"); | 509 | debugf0("i3000 pci_get_device fail\n"); |
527 | pci_rc = -ENODEV; | 510 | pci_rc = -ENODEV; |
528 | goto fail1; | 511 | goto fail1; |
529 | } | 512 | } |
530 | 513 | ||
531 | pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); | 514 | pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); |
532 | if (pci_rc < 0) { | 515 | if (pci_rc < 0) { |
533 | edac_dbg(0, "i3000 init fail\n"); | 516 | debugf0("i3000 init fail\n"); |
534 | pci_rc = -ENODEV; | 517 | pci_rc = -ENODEV; |
535 | goto fail1; | 518 | goto fail1; |
536 | } | 519 | } |
@@ -550,7 +533,7 @@ fail0: | |||
550 | 533 | ||
551 | static void __exit i3000_exit(void) | 534 | static void __exit i3000_exit(void) |
552 | { | 535 | { |
553 | edac_dbg(3, "MC:\n"); | 536 | debugf3("MC: %s()\n", __func__); |
554 | 537 | ||
555 | pci_unregister_driver(&i3000_driver); | 538 | pci_unregister_driver(&i3000_driver); |
556 | if (!i3000_registered) { | 539 | if (!i3000_registered) { |
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index 4e8337602e7..aa08497a075 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c | |||
@@ -15,15 +15,12 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include "edac_core.h" | 16 | #include "edac_core.h" |
17 | 17 | ||
18 | #include <asm-generic/io-64-nonatomic-lo-hi.h> | ||
19 | |||
20 | #define I3200_REVISION "1.1" | 18 | #define I3200_REVISION "1.1" |
21 | 19 | ||
22 | #define EDAC_MOD_STR "i3200_edac" | 20 | #define EDAC_MOD_STR "i3200_edac" |
23 | 21 | ||
24 | #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 | 22 | #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 |
25 | 23 | ||
26 | #define I3200_DIMMS 4 | ||
27 | #define I3200_RANKS 8 | 24 | #define I3200_RANKS 8 |
28 | #define I3200_RANKS_PER_CHANNEL 4 | 25 | #define I3200_RANKS_PER_CHANNEL 4 |
29 | #define I3200_CHANNELS 2 | 26 | #define I3200_CHANNELS 2 |
@@ -104,16 +101,29 @@ struct i3200_priv { | |||
104 | 101 | ||
105 | static int nr_channels; | 102 | static int nr_channels; |
106 | 103 | ||
104 | #ifndef readq | ||
105 | static inline __u64 readq(const volatile void __iomem *addr) | ||
106 | { | ||
107 | const volatile u32 __iomem *p = addr; | ||
108 | u32 low, high; | ||
109 | |||
110 | low = readl(p); | ||
111 | high = readl(p + 1); | ||
112 | |||
113 | return low + ((u64)high << 32); | ||
114 | } | ||
115 | #endif | ||
116 | |||
107 | static int how_many_channels(struct pci_dev *pdev) | 117 | static int how_many_channels(struct pci_dev *pdev) |
108 | { | 118 | { |
109 | unsigned char capid0_8b; /* 8th byte of CAPID0 */ | 119 | unsigned char capid0_8b; /* 8th byte of CAPID0 */ |
110 | 120 | ||
111 | pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); | 121 | pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); |
112 | if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ | 122 | if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ |
113 | edac_dbg(0, "In single channel mode\n"); | 123 | debugf0("In single channel mode.\n"); |
114 | return 1; | 124 | return 1; |
115 | } else { | 125 | } else { |
116 | edac_dbg(0, "In dual channel mode\n"); | 126 | debugf0("In dual channel mode.\n"); |
117 | return 2; | 127 | return 2; |
118 | } | 128 | } |
119 | } | 129 | } |
@@ -159,7 +169,7 @@ static void i3200_clear_error_info(struct mem_ctl_info *mci) | |||
159 | { | 169 | { |
160 | struct pci_dev *pdev; | 170 | struct pci_dev *pdev; |
161 | 171 | ||
162 | pdev = to_pci_dev(mci->pdev); | 172 | pdev = to_pci_dev(mci->dev); |
163 | 173 | ||
164 | /* | 174 | /* |
165 | * Clear any error bits. | 175 | * Clear any error bits. |
@@ -176,7 +186,7 @@ static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, | |||
176 | struct i3200_priv *priv = mci->pvt_info; | 186 | struct i3200_priv *priv = mci->pvt_info; |
177 | void __iomem *window = priv->window; | 187 | void __iomem *window = priv->window; |
178 | 188 | ||
179 | pdev = to_pci_dev(mci->pdev); | 189 | pdev = to_pci_dev(mci->dev); |
180 | 190 | ||
181 | /* | 191 | /* |
182 | * This is a mess because there is no atomic way to read all the | 192 | * This is a mess because there is no atomic way to read all the |
@@ -218,25 +228,21 @@ static void i3200_process_error_info(struct mem_ctl_info *mci, | |||
218 | return; | 228 | return; |
219 | 229 | ||
220 | if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { | 230 | if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { |
221 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 231 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
222 | -1, -1, -1, "UE overwrote CE", ""); | ||
223 | info->errsts = info->errsts2; | 232 | info->errsts = info->errsts2; |
224 | } | 233 | } |
225 | 234 | ||
226 | for (channel = 0; channel < nr_channels; channel++) { | 235 | for (channel = 0; channel < nr_channels; channel++) { |
227 | log = info->eccerrlog[channel]; | 236 | log = info->eccerrlog[channel]; |
228 | if (log & I3200_ECCERRLOG_UE) { | 237 | if (log & I3200_ECCERRLOG_UE) { |
229 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 238 | edac_mc_handle_ue(mci, 0, 0, |
230 | 0, 0, 0, | 239 | eccerrlog_row(channel, log), |
231 | eccerrlog_row(channel, log), | 240 | "i3200 UE"); |
232 | -1, -1, | ||
233 | "i3000 UE", ""); | ||
234 | } else if (log & I3200_ECCERRLOG_CE) { | 241 | } else if (log & I3200_ECCERRLOG_CE) { |
235 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 242 | edac_mc_handle_ce(mci, 0, 0, |
236 | 0, 0, eccerrlog_syndrome(log), | 243 | eccerrlog_syndrome(log), |
237 | eccerrlog_row(channel, log), | 244 | eccerrlog_row(channel, log), 0, |
238 | -1, -1, | 245 | "i3200 CE"); |
239 | "i3000 UE", ""); | ||
240 | } | 246 | } |
241 | } | 247 | } |
242 | } | 248 | } |
@@ -245,7 +251,7 @@ static void i3200_check(struct mem_ctl_info *mci) | |||
245 | { | 251 | { |
246 | struct i3200_error_info info; | 252 | struct i3200_error_info info; |
247 | 253 | ||
248 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 254 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
249 | i3200_get_and_clear_error_info(mci, &info); | 255 | i3200_get_and_clear_error_info(mci, &info); |
250 | i3200_process_error_info(mci, &info); | 256 | i3200_process_error_info(mci, &info); |
251 | } | 257 | } |
@@ -324,15 +330,15 @@ static unsigned long drb_to_nr_pages( | |||
324 | static int i3200_probe1(struct pci_dev *pdev, int dev_idx) | 330 | static int i3200_probe1(struct pci_dev *pdev, int dev_idx) |
325 | { | 331 | { |
326 | int rc; | 332 | int rc; |
327 | int i, j; | 333 | int i; |
328 | struct mem_ctl_info *mci = NULL; | 334 | struct mem_ctl_info *mci = NULL; |
329 | struct edac_mc_layer layers[2]; | 335 | unsigned long last_page; |
330 | u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; | 336 | u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; |
331 | bool stacked; | 337 | bool stacked; |
332 | void __iomem *window; | 338 | void __iomem *window; |
333 | struct i3200_priv *priv; | 339 | struct i3200_priv *priv; |
334 | 340 | ||
335 | edac_dbg(0, "MC:\n"); | 341 | debugf0("MC: %s()\n", __func__); |
336 | 342 | ||
337 | window = i3200_map_mchbar(pdev); | 343 | window = i3200_map_mchbar(pdev); |
338 | if (!window) | 344 | if (!window) |
@@ -341,20 +347,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) | |||
341 | i3200_get_drbs(window, drbs); | 347 | i3200_get_drbs(window, drbs); |
342 | nr_channels = how_many_channels(pdev); | 348 | nr_channels = how_many_channels(pdev); |
343 | 349 | ||
344 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 350 | mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS, |
345 | layers[0].size = I3200_DIMMS; | 351 | nr_channels, 0); |
346 | layers[0].is_virt_csrow = true; | ||
347 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
348 | layers[1].size = nr_channels; | ||
349 | layers[1].is_virt_csrow = false; | ||
350 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, | ||
351 | sizeof(struct i3200_priv)); | ||
352 | if (!mci) | 352 | if (!mci) |
353 | return -ENOMEM; | 353 | return -ENOMEM; |
354 | 354 | ||
355 | edac_dbg(3, "MC: init mci\n"); | 355 | debugf3("MC: %s(): init mci\n", __func__); |
356 | 356 | ||
357 | mci->pdev = &pdev->dev; | 357 | mci->dev = &pdev->dev; |
358 | mci->mtype_cap = MEM_FLAG_DDR2; | 358 | mci->mtype_cap = MEM_FLAG_DDR2; |
359 | 359 | ||
360 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | 360 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; |
@@ -377,38 +377,41 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) | |||
377 | * cumulative; the last one will contain the total memory | 377 | * cumulative; the last one will contain the total memory |
378 | * contained in all ranks. | 378 | * contained in all ranks. |
379 | */ | 379 | */ |
380 | last_page = -1UL; | ||
380 | for (i = 0; i < mci->nr_csrows; i++) { | 381 | for (i = 0; i < mci->nr_csrows; i++) { |
381 | unsigned long nr_pages; | 382 | unsigned long nr_pages; |
382 | struct csrow_info *csrow = mci->csrows[i]; | 383 | struct csrow_info *csrow = &mci->csrows[i]; |
383 | 384 | ||
384 | nr_pages = drb_to_nr_pages(drbs, stacked, | 385 | nr_pages = drb_to_nr_pages(drbs, stacked, |
385 | i / I3200_RANKS_PER_CHANNEL, | 386 | i / I3200_RANKS_PER_CHANNEL, |
386 | i % I3200_RANKS_PER_CHANNEL); | 387 | i % I3200_RANKS_PER_CHANNEL); |
387 | 388 | ||
388 | if (nr_pages == 0) | 389 | if (nr_pages == 0) { |
390 | csrow->mtype = MEM_EMPTY; | ||
389 | continue; | 391 | continue; |
392 | } | ||
390 | 393 | ||
391 | for (j = 0; j < nr_channels; j++) { | 394 | csrow->first_page = last_page + 1; |
392 | struct dimm_info *dimm = csrow->channels[j]->dimm; | 395 | last_page += nr_pages; |
396 | csrow->last_page = last_page; | ||
397 | csrow->nr_pages = nr_pages; | ||
393 | 398 | ||
394 | dimm->nr_pages = nr_pages; | 399 | csrow->grain = nr_pages << PAGE_SHIFT; |
395 | dimm->grain = nr_pages << PAGE_SHIFT; | 400 | csrow->mtype = MEM_DDR2; |
396 | dimm->mtype = MEM_DDR2; | 401 | csrow->dtype = DEV_UNKNOWN; |
397 | dimm->dtype = DEV_UNKNOWN; | 402 | csrow->edac_mode = EDAC_UNKNOWN; |
398 | dimm->edac_mode = EDAC_UNKNOWN; | ||
399 | } | ||
400 | } | 403 | } |
401 | 404 | ||
402 | i3200_clear_error_info(mci); | 405 | i3200_clear_error_info(mci); |
403 | 406 | ||
404 | rc = -ENODEV; | 407 | rc = -ENODEV; |
405 | if (edac_mc_add_mc(mci)) { | 408 | if (edac_mc_add_mc(mci)) { |
406 | edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); | 409 | debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); |
407 | goto fail; | 410 | goto fail; |
408 | } | 411 | } |
409 | 412 | ||
410 | /* get this far and it's successful */ | 413 | /* get this far and it's successful */ |
411 | edac_dbg(3, "MC: success\n"); | 414 | debugf3("MC: %s(): success\n", __func__); |
412 | return 0; | 415 | return 0; |
413 | 416 | ||
414 | fail: | 417 | fail: |
@@ -419,11 +422,12 @@ fail: | |||
419 | return rc; | 422 | return rc; |
420 | } | 423 | } |
421 | 424 | ||
422 | static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 425 | static int __devinit i3200_init_one(struct pci_dev *pdev, |
426 | const struct pci_device_id *ent) | ||
423 | { | 427 | { |
424 | int rc; | 428 | int rc; |
425 | 429 | ||
426 | edac_dbg(0, "MC:\n"); | 430 | debugf0("MC: %s()\n", __func__); |
427 | 431 | ||
428 | if (pci_enable_device(pdev) < 0) | 432 | if (pci_enable_device(pdev) < 0) |
429 | return -EIO; | 433 | return -EIO; |
@@ -435,12 +439,12 @@ static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
435 | return rc; | 439 | return rc; |
436 | } | 440 | } |
437 | 441 | ||
438 | static void i3200_remove_one(struct pci_dev *pdev) | 442 | static void __devexit i3200_remove_one(struct pci_dev *pdev) |
439 | { | 443 | { |
440 | struct mem_ctl_info *mci; | 444 | struct mem_ctl_info *mci; |
441 | struct i3200_priv *priv; | 445 | struct i3200_priv *priv; |
442 | 446 | ||
443 | edac_dbg(0, "\n"); | 447 | debugf0("%s()\n", __func__); |
444 | 448 | ||
445 | mci = edac_mc_del_mc(&pdev->dev); | 449 | mci = edac_mc_del_mc(&pdev->dev); |
446 | if (!mci) | 450 | if (!mci) |
@@ -452,7 +456,7 @@ static void i3200_remove_one(struct pci_dev *pdev) | |||
452 | edac_mc_free(mci); | 456 | edac_mc_free(mci); |
453 | } | 457 | } |
454 | 458 | ||
455 | static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = { | 459 | static const struct pci_device_id i3200_pci_tbl[] __devinitdata = { |
456 | { | 460 | { |
457 | PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 461 | PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
458 | I3200}, | 462 | I3200}, |
@@ -466,7 +470,7 @@ MODULE_DEVICE_TABLE(pci, i3200_pci_tbl); | |||
466 | static struct pci_driver i3200_driver = { | 470 | static struct pci_driver i3200_driver = { |
467 | .name = EDAC_MOD_STR, | 471 | .name = EDAC_MOD_STR, |
468 | .probe = i3200_init_one, | 472 | .probe = i3200_init_one, |
469 | .remove = i3200_remove_one, | 473 | .remove = __devexit_p(i3200_remove_one), |
470 | .id_table = i3200_pci_tbl, | 474 | .id_table = i3200_pci_tbl, |
471 | }; | 475 | }; |
472 | 476 | ||
@@ -474,7 +478,7 @@ static int __init i3200_init(void) | |||
474 | { | 478 | { |
475 | int pci_rc; | 479 | int pci_rc; |
476 | 480 | ||
477 | edac_dbg(3, "MC:\n"); | 481 | debugf3("MC: %s()\n", __func__); |
478 | 482 | ||
479 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 483 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
480 | opstate_init(); | 484 | opstate_init(); |
@@ -488,14 +492,14 @@ static int __init i3200_init(void) | |||
488 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 492 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
489 | PCI_DEVICE_ID_INTEL_3200_HB, NULL); | 493 | PCI_DEVICE_ID_INTEL_3200_HB, NULL); |
490 | if (!mci_pdev) { | 494 | if (!mci_pdev) { |
491 | edac_dbg(0, "i3200 pci_get_device fail\n"); | 495 | debugf0("i3200 pci_get_device fail\n"); |
492 | pci_rc = -ENODEV; | 496 | pci_rc = -ENODEV; |
493 | goto fail1; | 497 | goto fail1; |
494 | } | 498 | } |
495 | 499 | ||
496 | pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); | 500 | pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); |
497 | if (pci_rc < 0) { | 501 | if (pci_rc < 0) { |
498 | edac_dbg(0, "i3200 init fail\n"); | 502 | debugf0("i3200 init fail\n"); |
499 | pci_rc = -ENODEV; | 503 | pci_rc = -ENODEV; |
500 | goto fail1; | 504 | goto fail1; |
501 | } | 505 | } |
@@ -515,7 +519,7 @@ fail0: | |||
515 | 519 | ||
516 | static void __exit i3200_exit(void) | 520 | static void __exit i3200_exit(void) |
517 | { | 521 | { |
518 | edac_dbg(3, "MC:\n"); | 522 | debugf3("MC: %s()\n", __func__); |
519 | 523 | ||
520 | pci_unregister_driver(&i3200_driver); | 524 | pci_unregister_driver(&i3200_driver); |
521 | if (!i3200_registered) { | 525 | if (!i3200_registered) { |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 63b2194e8c2..4dc3ac25a42 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
@@ -270,10 +270,9 @@ | |||
270 | #define MTR3 0x8C | 270 | #define MTR3 0x8C |
271 | 271 | ||
272 | #define NUM_MTRS 4 | 272 | #define NUM_MTRS 4 |
273 | #define CHANNELS_PER_BRANCH 2 | 273 | #define CHANNELS_PER_BRANCH (2) |
274 | #define MAX_BRANCHES 2 | ||
275 | 274 | ||
276 | /* Defines to extract the various fields from the | 275 | /* Defines to extract the vaious fields from the |
277 | * MTRx - Memory Technology Registers | 276 | * MTRx - Memory Technology Registers |
278 | */ | 277 | */ |
279 | #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8)) | 278 | #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8)) |
@@ -287,6 +286,22 @@ | |||
287 | #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) | 286 | #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) |
288 | #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) | 287 | #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) |
289 | 288 | ||
289 | #ifdef CONFIG_EDAC_DEBUG | ||
290 | static char *numrow_toString[] = { | ||
291 | "8,192 - 13 rows", | ||
292 | "16,384 - 14 rows", | ||
293 | "32,768 - 15 rows", | ||
294 | "reserved" | ||
295 | }; | ||
296 | |||
297 | static char *numcol_toString[] = { | ||
298 | "1,024 - 10 columns", | ||
299 | "2,048 - 11 columns", | ||
300 | "4,096 - 12 columns", | ||
301 | "reserved" | ||
302 | }; | ||
303 | #endif | ||
304 | |||
290 | /* enables the report of miscellaneous messages as CE errors - default off */ | 305 | /* enables the report of miscellaneous messages as CE errors - default off */ |
291 | static int misc_messages; | 306 | static int misc_messages; |
292 | 307 | ||
@@ -328,13 +343,7 @@ struct i5000_pvt { | |||
328 | struct pci_dev *branch_1; /* 22.0 */ | 343 | struct pci_dev *branch_1; /* 22.0 */ |
329 | 344 | ||
330 | u16 tolm; /* top of low memory */ | 345 | u16 tolm; /* top of low memory */ |
331 | union { | 346 | u64 ambase; /* AMB BAR */ |
332 | u64 ambase; /* AMB BAR */ | ||
333 | struct { | ||
334 | u32 ambase_bottom; | ||
335 | u32 ambase_top; | ||
336 | } u __packed; | ||
337 | }; | ||
338 | 347 | ||
339 | u16 mir0, mir1, mir2; | 348 | u16 mir0, mir1, mir2; |
340 | 349 | ||
@@ -464,6 +473,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, | |||
464 | char msg[EDAC_MC_LABEL_LEN + 1 + 160]; | 473 | char msg[EDAC_MC_LABEL_LEN + 1 + 160]; |
465 | char *specific = NULL; | 474 | char *specific = NULL; |
466 | u32 allErrors; | 475 | u32 allErrors; |
476 | int branch; | ||
467 | int channel; | 477 | int channel; |
468 | int bank; | 478 | int bank; |
469 | int rank; | 479 | int rank; |
@@ -475,7 +485,8 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, | |||
475 | if (!allErrors) | 485 | if (!allErrors) |
476 | return; /* if no error, return now */ | 486 | return; /* if no error, return now */ |
477 | 487 | ||
478 | channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd); | 488 | branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd); |
489 | channel = branch; | ||
479 | 490 | ||
480 | /* Use the NON-Recoverable macros to extract data */ | 491 | /* Use the NON-Recoverable macros to extract data */ |
481 | bank = NREC_BANK(info->nrecmema); | 492 | bank = NREC_BANK(info->nrecmema); |
@@ -484,9 +495,10 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, | |||
484 | ras = NREC_RAS(info->nrecmemb); | 495 | ras = NREC_RAS(info->nrecmemb); |
485 | cas = NREC_CAS(info->nrecmemb); | 496 | cas = NREC_CAS(info->nrecmemb); |
486 | 497 | ||
487 | edac_dbg(0, "\t\tCSROW= %d Channel= %d (DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", | 498 | debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " |
488 | rank, channel, bank, | 499 | "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", |
489 | rdwr ? "Write" : "Read", ras, cas); | 500 | rank, channel, channel + 1, branch >> 1, bank, |
501 | rdwr ? "Write" : "Read", ras, cas); | ||
490 | 502 | ||
491 | /* Only 1 bit will be on */ | 503 | /* Only 1 bit will be on */ |
492 | switch (allErrors) { | 504 | switch (allErrors) { |
@@ -521,14 +533,13 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, | |||
521 | 533 | ||
522 | /* Form out message */ | 534 | /* Form out message */ |
523 | snprintf(msg, sizeof(msg), | 535 | snprintf(msg, sizeof(msg), |
524 | "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)", | 536 | "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d " |
525 | bank, ras, cas, allErrors, specific); | 537 | "FATAL Err=0x%x (%s))", |
538 | branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas, | ||
539 | allErrors, specific); | ||
526 | 540 | ||
527 | /* Call the helper to output message */ | 541 | /* Call the helper to output message */ |
528 | edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0, | 542 | edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); |
529 | channel >> 1, channel & 1, rank, | ||
530 | rdwr ? "Write error" : "Read error", | ||
531 | msg); | ||
532 | } | 543 | } |
533 | 544 | ||
534 | /* | 545 | /* |
@@ -563,7 +574,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
563 | /* ONLY ONE of the possible error bits will be set, as per the docs */ | 574 | /* ONLY ONE of the possible error bits will be set, as per the docs */ |
564 | ue_errors = allErrors & FERR_NF_UNCORRECTABLE; | 575 | ue_errors = allErrors & FERR_NF_UNCORRECTABLE; |
565 | if (ue_errors) { | 576 | if (ue_errors) { |
566 | edac_dbg(0, "\tUncorrected bits= 0x%x\n", ue_errors); | 577 | debugf0("\tUncorrected bits= 0x%x\n", ue_errors); |
567 | 578 | ||
568 | branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); | 579 | branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); |
569 | 580 | ||
@@ -579,9 +590,11 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
579 | ras = NREC_RAS(info->nrecmemb); | 590 | ras = NREC_RAS(info->nrecmemb); |
580 | cas = NREC_CAS(info->nrecmemb); | 591 | cas = NREC_CAS(info->nrecmemb); |
581 | 592 | ||
582 | edac_dbg(0, "\t\tCSROW= %d Channels= %d,%d (Branch= %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", | 593 | debugf0 |
583 | rank, channel, channel + 1, branch >> 1, bank, | 594 | ("\t\tCSROW= %d Channels= %d,%d (Branch= %d " |
584 | rdwr ? "Write" : "Read", ras, cas); | 595 | "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", |
596 | rank, channel, channel + 1, branch >> 1, bank, | ||
597 | rdwr ? "Write" : "Read", ras, cas); | ||
585 | 598 | ||
586 | switch (ue_errors) { | 599 | switch (ue_errors) { |
587 | case FERR_NF_M12ERR: | 600 | case FERR_NF_M12ERR: |
@@ -620,20 +633,19 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
620 | 633 | ||
621 | /* Form out message */ | 634 | /* Form out message */ |
622 | snprintf(msg, sizeof(msg), | 635 | snprintf(msg, sizeof(msg), |
623 | "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)", | 636 | "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " |
624 | rank, bank, ras, cas, ue_errors, specific); | 637 | "CAS=%d, UE Err=0x%x (%s))", |
638 | branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas, | ||
639 | ue_errors, specific); | ||
625 | 640 | ||
626 | /* Call the helper to output message */ | 641 | /* Call the helper to output message */ |
627 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 642 | edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); |
628 | channel >> 1, -1, rank, | ||
629 | rdwr ? "Write error" : "Read error", | ||
630 | msg); | ||
631 | } | 643 | } |
632 | 644 | ||
633 | /* Check correctable errors */ | 645 | /* Check correctable errors */ |
634 | ce_errors = allErrors & FERR_NF_CORRECTABLE; | 646 | ce_errors = allErrors & FERR_NF_CORRECTABLE; |
635 | if (ce_errors) { | 647 | if (ce_errors) { |
636 | edac_dbg(0, "\tCorrected bits= 0x%x\n", ce_errors); | 648 | debugf0("\tCorrected bits= 0x%x\n", ce_errors); |
637 | 649 | ||
638 | branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); | 650 | branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); |
639 | 651 | ||
@@ -651,9 +663,10 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
651 | ras = REC_RAS(info->recmemb); | 663 | ras = REC_RAS(info->recmemb); |
652 | cas = REC_CAS(info->recmemb); | 664 | cas = REC_CAS(info->recmemb); |
653 | 665 | ||
654 | edac_dbg(0, "\t\tCSROW= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", | 666 | debugf0("\t\tCSROW= %d Channel= %d (Branch %d " |
655 | rank, channel, branch >> 1, bank, | 667 | "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", |
656 | rdwr ? "Write" : "Read", ras, cas); | 668 | rank, channel, branch >> 1, bank, |
669 | rdwr ? "Write" : "Read", ras, cas); | ||
657 | 670 | ||
658 | switch (ce_errors) { | 671 | switch (ce_errors) { |
659 | case FERR_NF_M17ERR: | 672 | case FERR_NF_M17ERR: |
@@ -672,16 +685,13 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
672 | 685 | ||
673 | /* Form out message */ | 686 | /* Form out message */ |
674 | snprintf(msg, sizeof(msg), | 687 | snprintf(msg, sizeof(msg), |
675 | "Rank=%d Bank=%d RDWR=%s RAS=%d " | 688 | "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " |
676 | "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank, | 689 | "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank, |
677 | rdwr ? "Write" : "Read", ras, cas, ce_errors, | 690 | rdwr ? "Write" : "Read", ras, cas, ce_errors, |
678 | specific); | 691 | specific); |
679 | 692 | ||
680 | /* Call the helper to output message */ | 693 | /* Call the helper to output message */ |
681 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, | 694 | edac_mc_handle_fbd_ce(mci, rank, channel, msg); |
682 | channel >> 1, channel % 2, rank, | ||
683 | rdwr ? "Write error" : "Read error", | ||
684 | msg); | ||
685 | } | 695 | } |
686 | 696 | ||
687 | if (!misc_messages) | 697 | if (!misc_messages) |
@@ -721,12 +731,11 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
721 | 731 | ||
722 | /* Form out message */ | 732 | /* Form out message */ |
723 | snprintf(msg, sizeof(msg), | 733 | snprintf(msg, sizeof(msg), |
724 | "Err=%#x (%s)", misc_errors, specific); | 734 | "(Branch=%d Err=%#x (%s))", branch >> 1, |
735 | misc_errors, specific); | ||
725 | 736 | ||
726 | /* Call the helper to output message */ | 737 | /* Call the helper to output message */ |
727 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, | 738 | edac_mc_handle_fbd_ce(mci, 0, 0, msg); |
728 | branch >> 1, -1, -1, | ||
729 | "Misc error", msg); | ||
730 | } | 739 | } |
731 | } | 740 | } |
732 | 741 | ||
@@ -765,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci) | |||
765 | static void i5000_check_error(struct mem_ctl_info *mci) | 774 | static void i5000_check_error(struct mem_ctl_info *mci) |
766 | { | 775 | { |
767 | struct i5000_error_info info; | 776 | struct i5000_error_info info; |
768 | edac_dbg(4, "MC%d\n", mci->mc_idx); | 777 | debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); |
769 | i5000_get_error_info(mci, &info); | 778 | i5000_get_error_info(mci, &info); |
770 | i5000_process_error_info(mci, &info, 1); | 779 | i5000_process_error_info(mci, &info, 1); |
771 | } | 780 | } |
@@ -836,16 +845,15 @@ static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx) | |||
836 | 845 | ||
837 | pvt->fsb_error_regs = pdev; | 846 | pvt->fsb_error_regs = pdev; |
838 | 847 | ||
839 | edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", | 848 | debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", |
840 | pci_name(pvt->system_address), | 849 | pci_name(pvt->system_address), |
841 | pvt->system_address->vendor, pvt->system_address->device); | 850 | pvt->system_address->vendor, pvt->system_address->device); |
842 | edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", | 851 | debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", |
843 | pci_name(pvt->branchmap_werrors), | 852 | pci_name(pvt->branchmap_werrors), |
844 | pvt->branchmap_werrors->vendor, | 853 | pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); |
845 | pvt->branchmap_werrors->device); | 854 | debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", |
846 | edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", | 855 | pci_name(pvt->fsb_error_regs), |
847 | pci_name(pvt->fsb_error_regs), | 856 | pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); |
848 | pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); | ||
849 | 857 | ||
850 | pdev = NULL; | 858 | pdev = NULL; |
851 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 859 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
@@ -948,14 +956,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel) | |||
948 | * | 956 | * |
949 | * return the proper MTR register as determine by the csrow and channel desired | 957 | * return the proper MTR register as determine by the csrow and channel desired |
950 | */ | 958 | */ |
951 | static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel) | 959 | static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel) |
952 | { | 960 | { |
953 | int mtr; | 961 | int mtr; |
954 | 962 | ||
955 | if (channel < CHANNELS_PER_BRANCH) | 963 | if (channel < CHANNELS_PER_BRANCH) |
956 | mtr = pvt->b0_mtr[slot]; | 964 | mtr = pvt->b0_mtr[csrow >> 1]; |
957 | else | 965 | else |
958 | mtr = pvt->b1_mtr[slot]; | 966 | mtr = pvt->b1_mtr[csrow >> 1]; |
959 | 967 | ||
960 | return mtr; | 968 | return mtr; |
961 | } | 969 | } |
@@ -968,59 +976,49 @@ static void decode_mtr(int slot_row, u16 mtr) | |||
968 | 976 | ||
969 | ans = MTR_DIMMS_PRESENT(mtr); | 977 | ans = MTR_DIMMS_PRESENT(mtr); |
970 | 978 | ||
971 | edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n", | 979 | debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, |
972 | slot_row, mtr, ans ? "" : "NOT "); | 980 | ans ? "Present" : "NOT Present"); |
973 | if (!ans) | 981 | if (!ans) |
974 | return; | 982 | return; |
975 | 983 | ||
976 | edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); | 984 | debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); |
977 | edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); | 985 | debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); |
978 | edac_dbg(2, "\t\tNUMRANK: %s\n", | 986 | debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); |
979 | MTR_DIMM_RANK(mtr) ? "double" : "single"); | 987 | debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); |
980 | edac_dbg(2, "\t\tNUMROW: %s\n", | 988 | debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); |
981 | MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : | ||
982 | MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : | ||
983 | MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : | ||
984 | "reserved"); | ||
985 | edac_dbg(2, "\t\tNUMCOL: %s\n", | ||
986 | MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : | ||
987 | MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : | ||
988 | MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : | ||
989 | "reserved"); | ||
990 | } | 989 | } |
991 | 990 | ||
992 | static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, | 991 | static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel, |
993 | struct i5000_dimm_info *dinfo) | 992 | struct i5000_dimm_info *dinfo) |
994 | { | 993 | { |
995 | int mtr; | 994 | int mtr; |
996 | int amb_present_reg; | 995 | int amb_present_reg; |
997 | int addrBits; | 996 | int addrBits; |
998 | 997 | ||
999 | mtr = determine_mtr(pvt, slot, channel); | 998 | mtr = determine_mtr(pvt, csrow, channel); |
1000 | if (MTR_DIMMS_PRESENT(mtr)) { | 999 | if (MTR_DIMMS_PRESENT(mtr)) { |
1001 | amb_present_reg = determine_amb_present_reg(pvt, channel); | 1000 | amb_present_reg = determine_amb_present_reg(pvt, channel); |
1002 | 1001 | ||
1003 | /* Determine if there is a DIMM present in this DIMM slot */ | 1002 | /* Determine if there is a DIMM present in this DIMM slot */ |
1004 | if (amb_present_reg) { | 1003 | if (amb_present_reg & (1 << (csrow >> 1))) { |
1005 | dinfo->dual_rank = MTR_DIMM_RANK(mtr); | 1004 | dinfo->dual_rank = MTR_DIMM_RANK(mtr); |
1006 | 1005 | ||
1007 | /* Start with the number of bits for a Bank | 1006 | if (!((dinfo->dual_rank == 0) && |
1008 | * on the DRAM */ | 1007 | ((csrow & 0x1) == 0x1))) { |
1009 | addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); | 1008 | /* Start with the number of bits for a Bank |
1010 | /* Add the number of ROW bits */ | 1009 | * on the DRAM */ |
1011 | addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); | 1010 | addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); |
1012 | /* add the number of COLUMN bits */ | 1011 | /* Add thenumber of ROW bits */ |
1013 | addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); | 1012 | addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); |
1014 | 1013 | /* add the number of COLUMN bits */ | |
1015 | /* Dual-rank memories have twice the size */ | 1014 | addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); |
1016 | if (dinfo->dual_rank) | 1015 | |
1017 | addrBits++; | 1016 | addrBits += 6; /* add 64 bits per DIMM */ |
1018 | 1017 | addrBits -= 20; /* divide by 2^^20 */ | |
1019 | addrBits += 6; /* add 64 bits per DIMM */ | 1018 | addrBits -= 3; /* 8 bits per bytes */ |
1020 | addrBits -= 20; /* divide by 2^^20 */ | 1019 | |
1021 | addrBits -= 3; /* 8 bits per bytes */ | 1020 | dinfo->megabytes = 1 << addrBits; |
1022 | 1021 | } | |
1023 | dinfo->megabytes = 1 << addrBits; | ||
1024 | } | 1022 | } |
1025 | } | 1023 | } |
1026 | } | 1024 | } |
@@ -1034,9 +1032,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, | |||
1034 | static void calculate_dimm_size(struct i5000_pvt *pvt) | 1032 | static void calculate_dimm_size(struct i5000_pvt *pvt) |
1035 | { | 1033 | { |
1036 | struct i5000_dimm_info *dinfo; | 1034 | struct i5000_dimm_info *dinfo; |
1037 | int slot, channel, branch; | 1035 | int csrow, max_csrows; |
1038 | char *p, *mem_buffer; | 1036 | char *p, *mem_buffer; |
1039 | int space, n; | 1037 | int space, n; |
1038 | int channel; | ||
1040 | 1039 | ||
1041 | /* ================= Generate some debug output ================= */ | 1040 | /* ================= Generate some debug output ================= */ |
1042 | space = PAGE_SIZE; | 1041 | space = PAGE_SIZE; |
@@ -1047,57 +1046,53 @@ static void calculate_dimm_size(struct i5000_pvt *pvt) | |||
1047 | return; | 1046 | return; |
1048 | } | 1047 | } |
1049 | 1048 | ||
1050 | /* Scan all the actual slots | 1049 | n = snprintf(p, space, "\n"); |
1050 | p += n; | ||
1051 | space -= n; | ||
1052 | |||
1053 | /* Scan all the actual CSROWS (which is # of DIMMS * 2) | ||
1051 | * and calculate the information for each DIMM | 1054 | * and calculate the information for each DIMM |
1052 | * Start with the highest slot first, to display it first | 1055 | * Start with the highest csrow first, to display it first |
1053 | * and work toward the 0th slot | 1056 | * and work toward the 0th csrow |
1054 | */ | 1057 | */ |
1055 | for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) { | 1058 | max_csrows = pvt->maxdimmperch * 2; |
1059 | for (csrow = max_csrows - 1; csrow >= 0; csrow--) { | ||
1056 | 1060 | ||
1057 | /* on an odd slot, first output a 'boundary' marker, | 1061 | /* on an odd csrow, first output a 'boundary' marker, |
1058 | * then reset the message buffer */ | 1062 | * then reset the message buffer */ |
1059 | if (slot & 0x1) { | 1063 | if (csrow & 0x1) { |
1060 | n = snprintf(p, space, "--------------------------" | 1064 | n = snprintf(p, space, "---------------------------" |
1061 | "--------------------------------"); | 1065 | "--------------------------------"); |
1062 | p += n; | 1066 | p += n; |
1063 | space -= n; | 1067 | space -= n; |
1064 | edac_dbg(2, "%s\n", mem_buffer); | 1068 | debugf2("%s\n", mem_buffer); |
1065 | p = mem_buffer; | 1069 | p = mem_buffer; |
1066 | space = PAGE_SIZE; | 1070 | space = PAGE_SIZE; |
1067 | } | 1071 | } |
1068 | n = snprintf(p, space, "slot %2d ", slot); | 1072 | n = snprintf(p, space, "csrow %2d ", csrow); |
1069 | p += n; | 1073 | p += n; |
1070 | space -= n; | 1074 | space -= n; |
1071 | 1075 | ||
1072 | for (channel = 0; channel < pvt->maxch; channel++) { | 1076 | for (channel = 0; channel < pvt->maxch; channel++) { |
1073 | dinfo = &pvt->dimm_info[slot][channel]; | 1077 | dinfo = &pvt->dimm_info[csrow][channel]; |
1074 | handle_channel(pvt, slot, channel, dinfo); | 1078 | handle_channel(pvt, csrow, channel, dinfo); |
1075 | if (dinfo->megabytes) | 1079 | n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); |
1076 | n = snprintf(p, space, "%4d MB %dR| ", | ||
1077 | dinfo->megabytes, dinfo->dual_rank + 1); | ||
1078 | else | ||
1079 | n = snprintf(p, space, "%4d MB | ", 0); | ||
1080 | p += n; | 1080 | p += n; |
1081 | space -= n; | 1081 | space -= n; |
1082 | } | 1082 | } |
1083 | n = snprintf(p, space, "\n"); | ||
1083 | p += n; | 1084 | p += n; |
1084 | space -= n; | 1085 | space -= n; |
1085 | edac_dbg(2, "%s\n", mem_buffer); | ||
1086 | p = mem_buffer; | ||
1087 | space = PAGE_SIZE; | ||
1088 | } | 1086 | } |
1089 | 1087 | ||
1090 | /* Output the last bottom 'boundary' marker */ | 1088 | /* Output the last bottom 'boundary' marker */ |
1091 | n = snprintf(p, space, "--------------------------" | 1089 | n = snprintf(p, space, "---------------------------" |
1092 | "--------------------------------"); | 1090 | "--------------------------------\n"); |
1093 | p += n; | 1091 | p += n; |
1094 | space -= n; | 1092 | space -= n; |
1095 | edac_dbg(2, "%s\n", mem_buffer); | ||
1096 | p = mem_buffer; | ||
1097 | space = PAGE_SIZE; | ||
1098 | 1093 | ||
1099 | /* now output the 'channel' labels */ | 1094 | /* now output the 'channel' labels */ |
1100 | n = snprintf(p, space, " "); | 1095 | n = snprintf(p, space, " "); |
1101 | p += n; | 1096 | p += n; |
1102 | space -= n; | 1097 | space -= n; |
1103 | for (channel = 0; channel < pvt->maxch; channel++) { | 1098 | for (channel = 0; channel < pvt->maxch; channel++) { |
@@ -1105,20 +1100,12 @@ static void calculate_dimm_size(struct i5000_pvt *pvt) | |||
1105 | p += n; | 1100 | p += n; |
1106 | space -= n; | 1101 | space -= n; |
1107 | } | 1102 | } |
1108 | edac_dbg(2, "%s\n", mem_buffer); | 1103 | n = snprintf(p, space, "\n"); |
1109 | p = mem_buffer; | ||
1110 | space = PAGE_SIZE; | ||
1111 | |||
1112 | n = snprintf(p, space, " "); | ||
1113 | p += n; | 1104 | p += n; |
1114 | for (branch = 0; branch < MAX_BRANCHES; branch++) { | 1105 | space -= n; |
1115 | n = snprintf(p, space, " branch %d | ", branch); | ||
1116 | p += n; | ||
1117 | space -= n; | ||
1118 | } | ||
1119 | 1106 | ||
1120 | /* output the last message and free buffer */ | 1107 | /* output the last message and free buffer */ |
1121 | edac_dbg(2, "%s\n", mem_buffer); | 1108 | debugf2("%s\n", mem_buffer); |
1122 | kfree(mem_buffer); | 1109 | kfree(mem_buffer); |
1123 | } | 1110 | } |
1124 | 1111 | ||
@@ -1141,25 +1128,24 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) | |||
1141 | pvt = mci->pvt_info; | 1128 | pvt = mci->pvt_info; |
1142 | 1129 | ||
1143 | pci_read_config_dword(pvt->system_address, AMBASE, | 1130 | pci_read_config_dword(pvt->system_address, AMBASE, |
1144 | &pvt->u.ambase_bottom); | 1131 | (u32 *) & pvt->ambase); |
1145 | pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), | 1132 | pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), |
1146 | &pvt->u.ambase_top); | 1133 | ((u32 *) & pvt->ambase) + sizeof(u32)); |
1147 | 1134 | ||
1148 | maxdimmperch = pvt->maxdimmperch; | 1135 | maxdimmperch = pvt->maxdimmperch; |
1149 | maxch = pvt->maxch; | 1136 | maxch = pvt->maxch; |
1150 | 1137 | ||
1151 | edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", | 1138 | debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", |
1152 | (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); | 1139 | (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); |
1153 | 1140 | ||
1154 | /* Get the Branch Map regs */ | 1141 | /* Get the Branch Map regs */ |
1155 | pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); | 1142 | pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); |
1156 | pvt->tolm >>= 12; | 1143 | pvt->tolm >>= 12; |
1157 | edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n", | 1144 | debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, |
1158 | pvt->tolm, pvt->tolm); | 1145 | pvt->tolm); |
1159 | 1146 | ||
1160 | actual_tolm = pvt->tolm << 28; | 1147 | actual_tolm = pvt->tolm << 28; |
1161 | edac_dbg(2, "Actual TOLM byte addr=%u (0x%x)\n", | 1148 | debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm); |
1162 | actual_tolm, actual_tolm); | ||
1163 | 1149 | ||
1164 | pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); | 1150 | pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); |
1165 | pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); | 1151 | pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); |
@@ -1169,18 +1155,15 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) | |||
1169 | limit = (pvt->mir0 >> 4) & 0x0FFF; | 1155 | limit = (pvt->mir0 >> 4) & 0x0FFF; |
1170 | way0 = pvt->mir0 & 0x1; | 1156 | way0 = pvt->mir0 & 0x1; |
1171 | way1 = pvt->mir0 & 0x2; | 1157 | way1 = pvt->mir0 & 0x2; |
1172 | edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", | 1158 | debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); |
1173 | limit, way1, way0); | ||
1174 | limit = (pvt->mir1 >> 4) & 0x0FFF; | 1159 | limit = (pvt->mir1 >> 4) & 0x0FFF; |
1175 | way0 = pvt->mir1 & 0x1; | 1160 | way0 = pvt->mir1 & 0x1; |
1176 | way1 = pvt->mir1 & 0x2; | 1161 | way1 = pvt->mir1 & 0x2; |
1177 | edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", | 1162 | debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); |
1178 | limit, way1, way0); | ||
1179 | limit = (pvt->mir2 >> 4) & 0x0FFF; | 1163 | limit = (pvt->mir2 >> 4) & 0x0FFF; |
1180 | way0 = pvt->mir2 & 0x1; | 1164 | way0 = pvt->mir2 & 0x1; |
1181 | way1 = pvt->mir2 & 0x2; | 1165 | way1 = pvt->mir2 & 0x2; |
1182 | edac_dbg(2, "MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", | 1166 | debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); |
1183 | limit, way1, way0); | ||
1184 | 1167 | ||
1185 | /* Get the MTR[0-3] regs */ | 1168 | /* Get the MTR[0-3] regs */ |
1186 | for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { | 1169 | for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { |
@@ -1189,31 +1172,31 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) | |||
1189 | pci_read_config_word(pvt->branch_0, where, | 1172 | pci_read_config_word(pvt->branch_0, where, |
1190 | &pvt->b0_mtr[slot_row]); | 1173 | &pvt->b0_mtr[slot_row]); |
1191 | 1174 | ||
1192 | edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n", | 1175 | debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, |
1193 | slot_row, where, pvt->b0_mtr[slot_row]); | 1176 | pvt->b0_mtr[slot_row]); |
1194 | 1177 | ||
1195 | if (pvt->maxch >= CHANNELS_PER_BRANCH) { | 1178 | if (pvt->maxch >= CHANNELS_PER_BRANCH) { |
1196 | pci_read_config_word(pvt->branch_1, where, | 1179 | pci_read_config_word(pvt->branch_1, where, |
1197 | &pvt->b1_mtr[slot_row]); | 1180 | &pvt->b1_mtr[slot_row]); |
1198 | edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n", | 1181 | debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, |
1199 | slot_row, where, pvt->b1_mtr[slot_row]); | 1182 | where, pvt->b1_mtr[slot_row]); |
1200 | } else { | 1183 | } else { |
1201 | pvt->b1_mtr[slot_row] = 0; | 1184 | pvt->b1_mtr[slot_row] = 0; |
1202 | } | 1185 | } |
1203 | } | 1186 | } |
1204 | 1187 | ||
1205 | /* Read and dump branch 0's MTRs */ | 1188 | /* Read and dump branch 0's MTRs */ |
1206 | edac_dbg(2, "Memory Technology Registers:\n"); | 1189 | debugf2("\nMemory Technology Registers:\n"); |
1207 | edac_dbg(2, " Branch 0:\n"); | 1190 | debugf2(" Branch 0:\n"); |
1208 | for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { | 1191 | for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { |
1209 | decode_mtr(slot_row, pvt->b0_mtr[slot_row]); | 1192 | decode_mtr(slot_row, pvt->b0_mtr[slot_row]); |
1210 | } | 1193 | } |
1211 | pci_read_config_word(pvt->branch_0, AMB_PRESENT_0, | 1194 | pci_read_config_word(pvt->branch_0, AMB_PRESENT_0, |
1212 | &pvt->b0_ambpresent0); | 1195 | &pvt->b0_ambpresent0); |
1213 | edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); | 1196 | debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); |
1214 | pci_read_config_word(pvt->branch_0, AMB_PRESENT_1, | 1197 | pci_read_config_word(pvt->branch_0, AMB_PRESENT_1, |
1215 | &pvt->b0_ambpresent1); | 1198 | &pvt->b0_ambpresent1); |
1216 | edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); | 1199 | debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); |
1217 | 1200 | ||
1218 | /* Only if we have 2 branchs (4 channels) */ | 1201 | /* Only if we have 2 branchs (4 channels) */ |
1219 | if (pvt->maxch < CHANNELS_PER_BRANCH) { | 1202 | if (pvt->maxch < CHANNELS_PER_BRANCH) { |
@@ -1221,18 +1204,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) | |||
1221 | pvt->b1_ambpresent1 = 0; | 1204 | pvt->b1_ambpresent1 = 0; |
1222 | } else { | 1205 | } else { |
1223 | /* Read and dump branch 1's MTRs */ | 1206 | /* Read and dump branch 1's MTRs */ |
1224 | edac_dbg(2, " Branch 1:\n"); | 1207 | debugf2(" Branch 1:\n"); |
1225 | for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { | 1208 | for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { |
1226 | decode_mtr(slot_row, pvt->b1_mtr[slot_row]); | 1209 | decode_mtr(slot_row, pvt->b1_mtr[slot_row]); |
1227 | } | 1210 | } |
1228 | pci_read_config_word(pvt->branch_1, AMB_PRESENT_0, | 1211 | pci_read_config_word(pvt->branch_1, AMB_PRESENT_0, |
1229 | &pvt->b1_ambpresent0); | 1212 | &pvt->b1_ambpresent0); |
1230 | edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n", | 1213 | debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", |
1231 | pvt->b1_ambpresent0); | 1214 | pvt->b1_ambpresent0); |
1232 | pci_read_config_word(pvt->branch_1, AMB_PRESENT_1, | 1215 | pci_read_config_word(pvt->branch_1, AMB_PRESENT_1, |
1233 | &pvt->b1_ambpresent1); | 1216 | &pvt->b1_ambpresent1); |
1234 | edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n", | 1217 | debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", |
1235 | pvt->b1_ambpresent1); | 1218 | pvt->b1_ambpresent1); |
1236 | } | 1219 | } |
1237 | 1220 | ||
1238 | /* Go and determine the size of each DIMM and place in an | 1221 | /* Go and determine the size of each DIMM and place in an |
@@ -1252,13 +1235,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) | |||
1252 | static int i5000_init_csrows(struct mem_ctl_info *mci) | 1235 | static int i5000_init_csrows(struct mem_ctl_info *mci) |
1253 | { | 1236 | { |
1254 | struct i5000_pvt *pvt; | 1237 | struct i5000_pvt *pvt; |
1255 | struct dimm_info *dimm; | 1238 | struct csrow_info *p_csrow; |
1256 | int empty, channel_count; | 1239 | int empty, channel_count; |
1257 | int max_csrows; | 1240 | int max_csrows; |
1258 | int mtr; | 1241 | int mtr, mtr1; |
1259 | int csrow_megs; | 1242 | int csrow_megs; |
1260 | int channel; | 1243 | int channel; |
1261 | int slot; | 1244 | int csrow; |
1262 | 1245 | ||
1263 | pvt = mci->pvt_info; | 1246 | pvt = mci->pvt_info; |
1264 | 1247 | ||
@@ -1267,41 +1250,44 @@ static int i5000_init_csrows(struct mem_ctl_info *mci) | |||
1267 | 1250 | ||
1268 | empty = 1; /* Assume NO memory */ | 1251 | empty = 1; /* Assume NO memory */ |
1269 | 1252 | ||
1270 | /* | 1253 | for (csrow = 0; csrow < max_csrows; csrow++) { |
1271 | * FIXME: The memory layout used to map slot/channel into the | 1254 | p_csrow = &mci->csrows[csrow]; |
1272 | * real memory architecture is weird: branch+slot are "csrows" | ||
1273 | * and channel is channel. That required an extra array (dimm_info) | ||
1274 | * to map the dimms. A good cleanup would be to remove this array, | ||
1275 | * and do a loop here with branch, channel, slot | ||
1276 | */ | ||
1277 | for (slot = 0; slot < max_csrows; slot++) { | ||
1278 | for (channel = 0; channel < pvt->maxch; channel++) { | ||
1279 | |||
1280 | mtr = determine_mtr(pvt, slot, channel); | ||
1281 | 1255 | ||
1282 | if (!MTR_DIMMS_PRESENT(mtr)) | 1256 | p_csrow->csrow_idx = csrow; |
1283 | continue; | ||
1284 | 1257 | ||
1285 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, | 1258 | /* use branch 0 for the basis */ |
1286 | channel / MAX_BRANCHES, | 1259 | mtr = pvt->b0_mtr[csrow >> 1]; |
1287 | channel % MAX_BRANCHES, slot); | 1260 | mtr1 = pvt->b1_mtr[csrow >> 1]; |
1288 | 1261 | ||
1289 | csrow_megs = pvt->dimm_info[slot][channel].megabytes; | 1262 | /* if no DIMMS on this row, continue */ |
1290 | dimm->grain = 8; | 1263 | if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1)) |
1264 | continue; | ||
1291 | 1265 | ||
1292 | /* Assume DDR2 for now */ | 1266 | /* FAKE OUT VALUES, FIXME */ |
1293 | dimm->mtype = MEM_FB_DDR2; | 1267 | p_csrow->first_page = 0 + csrow * 20; |
1268 | p_csrow->last_page = 9 + csrow * 20; | ||
1269 | p_csrow->page_mask = 0xFFF; | ||
1294 | 1270 | ||
1295 | /* ask what device type on this row */ | 1271 | p_csrow->grain = 8; |
1296 | if (MTR_DRAM_WIDTH(mtr)) | ||
1297 | dimm->dtype = DEV_X8; | ||
1298 | else | ||
1299 | dimm->dtype = DEV_X4; | ||
1300 | 1272 | ||
1301 | dimm->edac_mode = EDAC_S8ECD8ED; | 1273 | csrow_megs = 0; |
1302 | dimm->nr_pages = csrow_megs << 8; | 1274 | for (channel = 0; channel < pvt->maxch; channel++) { |
1275 | csrow_megs += pvt->dimm_info[csrow][channel].megabytes; | ||
1303 | } | 1276 | } |
1304 | 1277 | ||
1278 | p_csrow->nr_pages = csrow_megs << 8; | ||
1279 | |||
1280 | /* Assume DDR2 for now */ | ||
1281 | p_csrow->mtype = MEM_FB_DDR2; | ||
1282 | |||
1283 | /* ask what device type on this row */ | ||
1284 | if (MTR_DRAM_WIDTH(mtr)) | ||
1285 | p_csrow->dtype = DEV_X8; | ||
1286 | else | ||
1287 | p_csrow->dtype = DEV_X4; | ||
1288 | |||
1289 | p_csrow->edac_mode = EDAC_S8ECD8ED; | ||
1290 | |||
1305 | empty = 0; | 1291 | empty = 0; |
1306 | } | 1292 | } |
1307 | 1293 | ||
@@ -1331,7 +1317,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci) | |||
1331 | } | 1317 | } |
1332 | 1318 | ||
1333 | /* | 1319 | /* |
1334 | * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels) | 1320 | * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels) |
1335 | * | 1321 | * |
1336 | * ask the device how many channels are present and how many CSROWS | 1322 | * ask the device how many channels are present and how many CSROWS |
1337 | * as well | 1323 | * as well |
@@ -1346,7 +1332,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev, | |||
1346 | * supported on this memory controller | 1332 | * supported on this memory controller |
1347 | */ | 1333 | */ |
1348 | pci_read_config_byte(pdev, MAXDIMMPERCH, &value); | 1334 | pci_read_config_byte(pdev, MAXDIMMPERCH, &value); |
1349 | *num_dimms_per_channel = (int)value; | 1335 | *num_dimms_per_channel = (int)value *2; |
1350 | 1336 | ||
1351 | pci_read_config_byte(pdev, MAXCH, &value); | 1337 | pci_read_config_byte(pdev, MAXCH, &value); |
1352 | *num_channels = (int)value; | 1338 | *num_channels = (int)value; |
@@ -1362,14 +1348,15 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev, | |||
1362 | static int i5000_probe1(struct pci_dev *pdev, int dev_idx) | 1348 | static int i5000_probe1(struct pci_dev *pdev, int dev_idx) |
1363 | { | 1349 | { |
1364 | struct mem_ctl_info *mci; | 1350 | struct mem_ctl_info *mci; |
1365 | struct edac_mc_layer layers[3]; | ||
1366 | struct i5000_pvt *pvt; | 1351 | struct i5000_pvt *pvt; |
1367 | int num_channels; | 1352 | int num_channels; |
1368 | int num_dimms_per_channel; | 1353 | int num_dimms_per_channel; |
1354 | int num_csrows; | ||
1369 | 1355 | ||
1370 | edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", | 1356 | debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", |
1371 | pdev->bus->number, | 1357 | __FILE__, __func__, |
1372 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | 1358 | pdev->bus->number, |
1359 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | ||
1373 | 1360 | ||
1374 | /* We only are looking for func 0 of the set */ | 1361 | /* We only are looking for func 0 of the set */ |
1375 | if (PCI_FUNC(pdev->devfn) != 0) | 1362 | if (PCI_FUNC(pdev->devfn) != 0) |
@@ -1390,28 +1377,21 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) | |||
1390 | */ | 1377 | */ |
1391 | i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, | 1378 | i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, |
1392 | &num_channels); | 1379 | &num_channels); |
1380 | num_csrows = num_dimms_per_channel * 2; | ||
1393 | 1381 | ||
1394 | edac_dbg(0, "MC: Number of Branches=2 Channels= %d DIMMS= %d\n", | 1382 | debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", |
1395 | num_channels, num_dimms_per_channel); | 1383 | __func__, num_channels, num_dimms_per_channel, num_csrows); |
1396 | 1384 | ||
1397 | /* allocate a new MC control structure */ | 1385 | /* allocate a new MC control structure */ |
1386 | mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); | ||
1398 | 1387 | ||
1399 | layers[0].type = EDAC_MC_LAYER_BRANCH; | ||
1400 | layers[0].size = MAX_BRANCHES; | ||
1401 | layers[0].is_virt_csrow = false; | ||
1402 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
1403 | layers[1].size = num_channels / MAX_BRANCHES; | ||
1404 | layers[1].is_virt_csrow = false; | ||
1405 | layers[2].type = EDAC_MC_LAYER_SLOT; | ||
1406 | layers[2].size = num_dimms_per_channel; | ||
1407 | layers[2].is_virt_csrow = true; | ||
1408 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | ||
1409 | if (mci == NULL) | 1388 | if (mci == NULL) |
1410 | return -ENOMEM; | 1389 | return -ENOMEM; |
1411 | 1390 | ||
1412 | edac_dbg(0, "MC: mci = %p\n", mci); | 1391 | kobject_get(&mci->edac_mci_kobj); |
1392 | debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); | ||
1413 | 1393 | ||
1414 | mci->pdev = &pdev->dev; /* record ptr to the generic device */ | 1394 | mci->dev = &pdev->dev; /* record ptr to the generic device */ |
1415 | 1395 | ||
1416 | pvt = mci->pvt_info; | 1396 | pvt = mci->pvt_info; |
1417 | pvt->system_address = pdev; /* Record this device in our private */ | 1397 | pvt->system_address = pdev; /* Record this device in our private */ |
@@ -1441,16 +1421,19 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) | |||
1441 | /* initialize the MC control structure 'csrows' table | 1421 | /* initialize the MC control structure 'csrows' table |
1442 | * with the mapping and control information */ | 1422 | * with the mapping and control information */ |
1443 | if (i5000_init_csrows(mci)) { | 1423 | if (i5000_init_csrows(mci)) { |
1444 | edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5000_init_csrows() returned nonzero value\n"); | 1424 | debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" |
1425 | " because i5000_init_csrows() returned nonzero " | ||
1426 | "value\n"); | ||
1445 | mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ | 1427 | mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ |
1446 | } else { | 1428 | } else { |
1447 | edac_dbg(1, "MC: Enable error reporting now\n"); | 1429 | debugf1("MC: Enable error reporting now\n"); |
1448 | i5000_enable_error_reporting(mci); | 1430 | i5000_enable_error_reporting(mci); |
1449 | } | 1431 | } |
1450 | 1432 | ||
1451 | /* add this new MC control structure to EDAC's list of MCs */ | 1433 | /* add this new MC control structure to EDAC's list of MCs */ |
1452 | if (edac_mc_add_mc(mci)) { | 1434 | if (edac_mc_add_mc(mci)) { |
1453 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | 1435 | debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", |
1436 | __FILE__, __func__); | ||
1454 | /* FIXME: perhaps some code should go here that disables error | 1437 | /* FIXME: perhaps some code should go here that disables error |
1455 | * reporting if we just enabled it | 1438 | * reporting if we just enabled it |
1456 | */ | 1439 | */ |
@@ -1478,6 +1461,7 @@ fail1: | |||
1478 | i5000_put_devices(mci); | 1461 | i5000_put_devices(mci); |
1479 | 1462 | ||
1480 | fail0: | 1463 | fail0: |
1464 | kobject_put(&mci->edac_mci_kobj); | ||
1481 | edac_mc_free(mci); | 1465 | edac_mc_free(mci); |
1482 | return -ENODEV; | 1466 | return -ENODEV; |
1483 | } | 1467 | } |
@@ -1489,11 +1473,12 @@ fail0: | |||
1489 | * negative on error | 1473 | * negative on error |
1490 | * count (>= 0) | 1474 | * count (>= 0) |
1491 | */ | 1475 | */ |
1492 | static int i5000_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 1476 | static int __devinit i5000_init_one(struct pci_dev *pdev, |
1477 | const struct pci_device_id *id) | ||
1493 | { | 1478 | { |
1494 | int rc; | 1479 | int rc; |
1495 | 1480 | ||
1496 | edac_dbg(0, "MC:\n"); | 1481 | debugf0("MC: %s: %s()\n", __FILE__, __func__); |
1497 | 1482 | ||
1498 | /* wake up device */ | 1483 | /* wake up device */ |
1499 | rc = pci_enable_device(pdev); | 1484 | rc = pci_enable_device(pdev); |
@@ -1508,11 +1493,11 @@ static int i5000_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1508 | * i5000_remove_one destructor for one instance of device | 1493 | * i5000_remove_one destructor for one instance of device |
1509 | * | 1494 | * |
1510 | */ | 1495 | */ |
1511 | static void i5000_remove_one(struct pci_dev *pdev) | 1496 | static void __devexit i5000_remove_one(struct pci_dev *pdev) |
1512 | { | 1497 | { |
1513 | struct mem_ctl_info *mci; | 1498 | struct mem_ctl_info *mci; |
1514 | 1499 | ||
1515 | edac_dbg(0, "\n"); | 1500 | debugf0("%s: %s()\n", __FILE__, __func__); |
1516 | 1501 | ||
1517 | if (i5000_pci) | 1502 | if (i5000_pci) |
1518 | edac_pci_release_generic_ctl(i5000_pci); | 1503 | edac_pci_release_generic_ctl(i5000_pci); |
@@ -1522,6 +1507,7 @@ static void i5000_remove_one(struct pci_dev *pdev) | |||
1522 | 1507 | ||
1523 | /* retrieve references to resources, and free those resources */ | 1508 | /* retrieve references to resources, and free those resources */ |
1524 | i5000_put_devices(mci); | 1509 | i5000_put_devices(mci); |
1510 | kobject_put(&mci->edac_mci_kobj); | ||
1525 | edac_mc_free(mci); | 1511 | edac_mc_free(mci); |
1526 | } | 1512 | } |
1527 | 1513 | ||
@@ -1530,7 +1516,7 @@ static void i5000_remove_one(struct pci_dev *pdev) | |||
1530 | * | 1516 | * |
1531 | * The "E500P" device is the first device supported. | 1517 | * The "E500P" device is the first device supported. |
1532 | */ | 1518 | */ |
1533 | static DEFINE_PCI_DEVICE_TABLE(i5000_pci_tbl) = { | 1519 | static const struct pci_device_id i5000_pci_tbl[] __devinitdata = { |
1534 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), | 1520 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), |
1535 | .driver_data = I5000P}, | 1521 | .driver_data = I5000P}, |
1536 | 1522 | ||
@@ -1546,7 +1532,7 @@ MODULE_DEVICE_TABLE(pci, i5000_pci_tbl); | |||
1546 | static struct pci_driver i5000_driver = { | 1532 | static struct pci_driver i5000_driver = { |
1547 | .name = KBUILD_BASENAME, | 1533 | .name = KBUILD_BASENAME, |
1548 | .probe = i5000_init_one, | 1534 | .probe = i5000_init_one, |
1549 | .remove = i5000_remove_one, | 1535 | .remove = __devexit_p(i5000_remove_one), |
1550 | .id_table = i5000_pci_tbl, | 1536 | .id_table = i5000_pci_tbl, |
1551 | }; | 1537 | }; |
1552 | 1538 | ||
@@ -1558,7 +1544,7 @@ static int __init i5000_init(void) | |||
1558 | { | 1544 | { |
1559 | int pci_rc; | 1545 | int pci_rc; |
1560 | 1546 | ||
1561 | edac_dbg(2, "MC:\n"); | 1547 | debugf2("MC: %s: %s()\n", __FILE__, __func__); |
1562 | 1548 | ||
1563 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 1549 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
1564 | opstate_init(); | 1550 | opstate_init(); |
@@ -1574,7 +1560,7 @@ static int __init i5000_init(void) | |||
1574 | */ | 1560 | */ |
1575 | static void __exit i5000_exit(void) | 1561 | static void __exit i5000_exit(void) |
1576 | { | 1562 | { |
1577 | edac_dbg(2, "MC:\n"); | 1563 | debugf2("MC: %s: %s()\n", __FILE__, __func__); |
1578 | pci_unregister_driver(&i5000_driver); | 1564 | pci_unregister_driver(&i5000_driver); |
1579 | } | 1565 | } |
1580 | 1566 | ||
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index d6955b2cc99..bcbdeeca48b 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c | |||
@@ -14,11 +14,6 @@ | |||
14 | * rows for each respective channel are laid out one after another, | 14 | * rows for each respective channel are laid out one after another, |
15 | * the first half belonging to channel 0, the second half belonging | 15 | * the first half belonging to channel 0, the second half belonging |
16 | * to channel 1. | 16 | * to channel 1. |
17 | * | ||
18 | * This driver is for DDR2 DIMMs, and it uses chip select to select among the | ||
19 | * several ranks. However, instead of showing memories as ranks, it outputs | ||
20 | * them as DIMM's. An internal table creates the association between ranks | ||
21 | * and DIMM's. | ||
22 | */ | 17 | */ |
23 | #include <linux/module.h> | 18 | #include <linux/module.h> |
24 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -54,7 +49,7 @@ | |||
54 | #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6) | 49 | #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6) |
55 | #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5) | 50 | #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5) |
56 | #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4) | 51 | #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4) |
57 | #define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1) | 52 | #define I5100_FERR_NF_MEM_M1ERR_MASK 1 |
58 | #define I5100_FERR_NF_MEM_ANY_MASK \ | 53 | #define I5100_FERR_NF_MEM_ANY_MASK \ |
59 | (I5100_FERR_NF_MEM_M16ERR_MASK | \ | 54 | (I5100_FERR_NF_MEM_M16ERR_MASK | \ |
60 | I5100_FERR_NF_MEM_M15ERR_MASK | \ | 55 | I5100_FERR_NF_MEM_M15ERR_MASK | \ |
@@ -415,6 +410,14 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow) | |||
415 | return csrow / priv->ranksperchan; | 410 | return csrow / priv->ranksperchan; |
416 | } | 411 | } |
417 | 412 | ||
413 | static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci, | ||
414 | int chan, int rank) | ||
415 | { | ||
416 | const struct i5100_priv *priv = mci->pvt_info; | ||
417 | |||
418 | return chan * priv->ranksperchan + rank; | ||
419 | } | ||
420 | |||
418 | static void i5100_handle_ce(struct mem_ctl_info *mci, | 421 | static void i5100_handle_ce(struct mem_ctl_info *mci, |
419 | int chan, | 422 | int chan, |
420 | unsigned bank, | 423 | unsigned bank, |
@@ -424,17 +427,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci, | |||
424 | unsigned ras, | 427 | unsigned ras, |
425 | const char *msg) | 428 | const char *msg) |
426 | { | 429 | { |
427 | char detail[80]; | 430 | const int csrow = i5100_rank_to_csrow(mci, chan, rank); |
428 | 431 | ||
429 | /* Form out message */ | 432 | printk(KERN_ERR |
430 | snprintf(detail, sizeof(detail), | 433 | "CE chan %d, bank %u, rank %u, syndrome 0x%lx, " |
431 | "bank %u, cas %u, ras %u\n", | 434 | "cas %u, ras %u, csrow %u, label \"%s\": %s\n", |
432 | bank, cas, ras); | 435 | chan, bank, rank, syndrome, cas, ras, |
436 | csrow, mci->csrows[csrow].channels[0].label, msg); | ||
433 | 437 | ||
434 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 438 | mci->ce_count++; |
435 | 0, 0, syndrome, | 439 | mci->csrows[csrow].ce_count++; |
436 | chan, rank, -1, | 440 | mci->csrows[csrow].channels[0].ce_count++; |
437 | msg, detail); | ||
438 | } | 441 | } |
439 | 442 | ||
440 | static void i5100_handle_ue(struct mem_ctl_info *mci, | 443 | static void i5100_handle_ue(struct mem_ctl_info *mci, |
@@ -446,17 +449,16 @@ static void i5100_handle_ue(struct mem_ctl_info *mci, | |||
446 | unsigned ras, | 449 | unsigned ras, |
447 | const char *msg) | 450 | const char *msg) |
448 | { | 451 | { |
449 | char detail[80]; | 452 | const int csrow = i5100_rank_to_csrow(mci, chan, rank); |
450 | 453 | ||
451 | /* Form out message */ | 454 | printk(KERN_ERR |
452 | snprintf(detail, sizeof(detail), | 455 | "UE chan %d, bank %u, rank %u, syndrome 0x%lx, " |
453 | "bank %u, cas %u, ras %u\n", | 456 | "cas %u, ras %u, csrow %u, label \"%s\": %s\n", |
454 | bank, cas, ras); | 457 | chan, bank, rank, syndrome, cas, ras, |
458 | csrow, mci->csrows[csrow].channels[0].label, msg); | ||
455 | 459 | ||
456 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 460 | mci->ue_count++; |
457 | 0, 0, syndrome, | 461 | mci->csrows[csrow].ue_count++; |
458 | chan, rank, -1, | ||
459 | msg, detail); | ||
460 | } | 462 | } |
461 | 463 | ||
462 | static void i5100_read_log(struct mem_ctl_info *mci, int chan, | 464 | static void i5100_read_log(struct mem_ctl_info *mci, int chan, |
@@ -533,20 +535,23 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, | |||
533 | static void i5100_check_error(struct mem_ctl_info *mci) | 535 | static void i5100_check_error(struct mem_ctl_info *mci) |
534 | { | 536 | { |
535 | struct i5100_priv *priv = mci->pvt_info; | 537 | struct i5100_priv *priv = mci->pvt_info; |
536 | u32 dw, dw2; | 538 | u32 dw; |
539 | |||
537 | 540 | ||
538 | pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw); | 541 | pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw); |
539 | if (i5100_ferr_nf_mem_any(dw)) { | 542 | if (i5100_ferr_nf_mem_any(dw)) { |
543 | u32 dw2; | ||
540 | 544 | ||
541 | pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2); | 545 | pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2); |
546 | if (dw2) | ||
547 | pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, | ||
548 | dw2); | ||
549 | pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw); | ||
542 | 550 | ||
543 | i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw), | 551 | i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw), |
544 | i5100_ferr_nf_mem_any(dw), | 552 | i5100_ferr_nf_mem_any(dw), |
545 | i5100_nerr_nf_mem_any(dw2)); | 553 | i5100_nerr_nf_mem_any(dw2)); |
546 | |||
547 | pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2); | ||
548 | } | 554 | } |
549 | pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw); | ||
550 | } | 555 | } |
551 | 556 | ||
552 | /* The i5100 chipset will scrub the entire memory once, then | 557 | /* The i5100 chipset will scrub the entire memory once, then |
@@ -638,7 +643,8 @@ static struct pci_dev *pci_get_device_func(unsigned vendor, | |||
638 | return ret; | 643 | return ret; |
639 | } | 644 | } |
640 | 645 | ||
641 | static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow) | 646 | static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci, |
647 | int csrow) | ||
642 | { | 648 | { |
643 | struct i5100_priv *priv = mci->pvt_info; | 649 | struct i5100_priv *priv = mci->pvt_info; |
644 | const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow); | 650 | const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow); |
@@ -659,7 +665,7 @@ static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow) | |||
659 | ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); | 665 | ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); |
660 | } | 666 | } |
661 | 667 | ||
662 | static void i5100_init_mtr(struct mem_ctl_info *mci) | 668 | static void __devinit i5100_init_mtr(struct mem_ctl_info *mci) |
663 | { | 669 | { |
664 | struct i5100_priv *priv = mci->pvt_info; | 670 | struct i5100_priv *priv = mci->pvt_info; |
665 | struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; | 671 | struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; |
@@ -731,7 +737,7 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci, | |||
731 | * o not the only way to may chip selects to dimm slots | 737 | * o not the only way to may chip selects to dimm slots |
732 | * o investigate if there is some way to obtain this map from the bios | 738 | * o investigate if there is some way to obtain this map from the bios |
733 | */ | 739 | */ |
734 | static void i5100_init_dimm_csmap(struct mem_ctl_info *mci) | 740 | static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci) |
735 | { | 741 | { |
736 | struct i5100_priv *priv = mci->pvt_info; | 742 | struct i5100_priv *priv = mci->pvt_info; |
737 | int i; | 743 | int i; |
@@ -761,8 +767,8 @@ static void i5100_init_dimm_csmap(struct mem_ctl_info *mci) | |||
761 | } | 767 | } |
762 | } | 768 | } |
763 | 769 | ||
764 | static void i5100_init_dimm_layout(struct pci_dev *pdev, | 770 | static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev, |
765 | struct mem_ctl_info *mci) | 771 | struct mem_ctl_info *mci) |
766 | { | 772 | { |
767 | struct i5100_priv *priv = mci->pvt_info; | 773 | struct i5100_priv *priv = mci->pvt_info; |
768 | int i; | 774 | int i; |
@@ -783,8 +789,8 @@ static void i5100_init_dimm_layout(struct pci_dev *pdev, | |||
783 | i5100_init_dimm_csmap(mci); | 789 | i5100_init_dimm_csmap(mci); |
784 | } | 790 | } |
785 | 791 | ||
786 | static void i5100_init_interleaving(struct pci_dev *pdev, | 792 | static void __devinit i5100_init_interleaving(struct pci_dev *pdev, |
787 | struct mem_ctl_info *mci) | 793 | struct mem_ctl_info *mci) |
788 | { | 794 | { |
789 | u16 w; | 795 | u16 w; |
790 | u32 dw; | 796 | u32 dw; |
@@ -829,13 +835,13 @@ static void i5100_init_interleaving(struct pci_dev *pdev, | |||
829 | i5100_init_mtr(mci); | 835 | i5100_init_mtr(mci); |
830 | } | 836 | } |
831 | 837 | ||
832 | static void i5100_init_csrows(struct mem_ctl_info *mci) | 838 | static void __devinit i5100_init_csrows(struct mem_ctl_info *mci) |
833 | { | 839 | { |
834 | int i; | 840 | int i; |
841 | unsigned long total_pages = 0UL; | ||
835 | struct i5100_priv *priv = mci->pvt_info; | 842 | struct i5100_priv *priv = mci->pvt_info; |
836 | 843 | ||
837 | for (i = 0; i < mci->tot_dimms; i++) { | 844 | for (i = 0; i < mci->nr_csrows; i++) { |
838 | struct dimm_info *dimm; | ||
839 | const unsigned long npages = i5100_npages(mci, i); | 845 | const unsigned long npages = i5100_npages(mci, i); |
840 | const unsigned chan = i5100_csrow_to_chan(mci, i); | 846 | const unsigned chan = i5100_csrow_to_chan(mci, i); |
841 | const unsigned rank = i5100_csrow_to_rank(mci, i); | 847 | const unsigned rank = i5100_csrow_to_rank(mci, i); |
@@ -843,31 +849,41 @@ static void i5100_init_csrows(struct mem_ctl_info *mci) | |||
843 | if (!npages) | 849 | if (!npages) |
844 | continue; | 850 | continue; |
845 | 851 | ||
846 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, | 852 | /* |
847 | chan, rank, 0); | 853 | * FIXME: these two are totally bogus -- I don't see how to |
848 | 854 | * map them correctly to this structure... | |
849 | dimm->nr_pages = npages; | 855 | */ |
850 | if (npages) { | 856 | mci->csrows[i].first_page = total_pages; |
851 | dimm->grain = 32; | 857 | mci->csrows[i].last_page = total_pages + npages - 1; |
852 | dimm->dtype = (priv->mtr[chan][rank].width == 4) ? | 858 | mci->csrows[i].page_mask = 0UL; |
853 | DEV_X4 : DEV_X8; | 859 | |
854 | dimm->mtype = MEM_RDDR2; | 860 | mci->csrows[i].nr_pages = npages; |
855 | dimm->edac_mode = EDAC_SECDED; | 861 | mci->csrows[i].grain = 32; |
856 | snprintf(dimm->label, sizeof(dimm->label), | 862 | mci->csrows[i].csrow_idx = i; |
857 | "DIMM%u", | 863 | mci->csrows[i].dtype = |
858 | i5100_rank_to_slot(mci, chan, rank)); | 864 | (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8; |
859 | } | 865 | mci->csrows[i].ue_count = 0; |
860 | 866 | mci->csrows[i].ce_count = 0; | |
861 | edac_dbg(2, "dimm channel %d, rank %d, size %ld\n", | 867 | mci->csrows[i].mtype = MEM_RDDR2; |
862 | chan, rank, (long)PAGES_TO_MiB(npages)); | 868 | mci->csrows[i].edac_mode = EDAC_SECDED; |
869 | mci->csrows[i].mci = mci; | ||
870 | mci->csrows[i].nr_channels = 1; | ||
871 | mci->csrows[i].channels[0].chan_idx = 0; | ||
872 | mci->csrows[i].channels[0].ce_count = 0; | ||
873 | mci->csrows[i].channels[0].csrow = mci->csrows + i; | ||
874 | snprintf(mci->csrows[i].channels[0].label, | ||
875 | sizeof(mci->csrows[i].channels[0].label), | ||
876 | "DIMM%u", i5100_rank_to_slot(mci, chan, rank)); | ||
877 | |||
878 | total_pages += npages; | ||
863 | } | 879 | } |
864 | } | 880 | } |
865 | 881 | ||
866 | static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 882 | static int __devinit i5100_init_one(struct pci_dev *pdev, |
883 | const struct pci_device_id *id) | ||
867 | { | 884 | { |
868 | int rc; | 885 | int rc; |
869 | struct mem_ctl_info *mci; | 886 | struct mem_ctl_info *mci; |
870 | struct edac_mc_layer layers[2]; | ||
871 | struct i5100_priv *priv; | 887 | struct i5100_priv *priv; |
872 | struct pci_dev *ch0mm, *ch1mm; | 888 | struct pci_dev *ch0mm, *ch1mm; |
873 | int ret = 0; | 889 | int ret = 0; |
@@ -928,20 +944,13 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
928 | goto bail_ch1; | 944 | goto bail_ch1; |
929 | } | 945 | } |
930 | 946 | ||
931 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | 947 | mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0); |
932 | layers[0].size = 2; | ||
933 | layers[0].is_virt_csrow = false; | ||
934 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
935 | layers[1].size = ranksperch; | ||
936 | layers[1].is_virt_csrow = true; | ||
937 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, | ||
938 | sizeof(*priv)); | ||
939 | if (!mci) { | 948 | if (!mci) { |
940 | ret = -ENOMEM; | 949 | ret = -ENOMEM; |
941 | goto bail_disable_ch1; | 950 | goto bail_disable_ch1; |
942 | } | 951 | } |
943 | 952 | ||
944 | mci->pdev = &pdev->dev; | 953 | mci->dev = &pdev->dev; |
945 | 954 | ||
946 | priv = mci->pvt_info; | 955 | priv = mci->pvt_info; |
947 | priv->ranksperchan = ranksperch; | 956 | priv->ranksperchan = ranksperch; |
@@ -1018,7 +1027,7 @@ bail: | |||
1018 | return ret; | 1027 | return ret; |
1019 | } | 1028 | } |
1020 | 1029 | ||
1021 | static void i5100_remove_one(struct pci_dev *pdev) | 1030 | static void __devexit i5100_remove_one(struct pci_dev *pdev) |
1022 | { | 1031 | { |
1023 | struct mem_ctl_info *mci; | 1032 | struct mem_ctl_info *mci; |
1024 | struct i5100_priv *priv; | 1033 | struct i5100_priv *priv; |
@@ -1042,7 +1051,7 @@ static void i5100_remove_one(struct pci_dev *pdev) | |||
1042 | edac_mc_free(mci); | 1051 | edac_mc_free(mci); |
1043 | } | 1052 | } |
1044 | 1053 | ||
1045 | static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = { | 1054 | static const struct pci_device_id i5100_pci_tbl[] __devinitdata = { |
1046 | /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ | 1055 | /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ |
1047 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, | 1056 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, |
1048 | { 0, } | 1057 | { 0, } |
@@ -1052,7 +1061,7 @@ MODULE_DEVICE_TABLE(pci, i5100_pci_tbl); | |||
1052 | static struct pci_driver i5100_driver = { | 1061 | static struct pci_driver i5100_driver = { |
1053 | .name = KBUILD_BASENAME, | 1062 | .name = KBUILD_BASENAME, |
1054 | .probe = i5100_init_one, | 1063 | .probe = i5100_init_one, |
1055 | .remove = i5100_remove_one, | 1064 | .remove = __devexit_p(i5100_remove_one), |
1056 | .id_table = i5100_pci_tbl, | 1065 | .id_table = i5100_pci_tbl, |
1057 | }; | 1066 | }; |
1058 | 1067 | ||
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index 0a05bbceb08..74d6ec342af 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c | |||
@@ -18,10 +18,6 @@ | |||
18 | * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet | 18 | * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet |
19 | * http://developer.intel.com/design/chipsets/datashts/313070.htm | 19 | * http://developer.intel.com/design/chipsets/datashts/313070.htm |
20 | * | 20 | * |
21 | * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with | ||
22 | * 2 channels operating in lockstep no-mirror mode. Each channel can have up to | ||
23 | * 4 dimm's, each with up to 8GB. | ||
24 | * | ||
25 | */ | 21 | */ |
26 | 22 | ||
27 | #include <linux/module.h> | 23 | #include <linux/module.h> |
@@ -48,10 +44,12 @@ | |||
48 | edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg) | 44 | edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg) |
49 | 45 | ||
50 | /* Limits for i5400 */ | 46 | /* Limits for i5400 */ |
51 | #define MAX_BRANCHES 2 | 47 | #define NUM_MTRS_PER_BRANCH 4 |
52 | #define CHANNELS_PER_BRANCH 2 | 48 | #define CHANNELS_PER_BRANCH 2 |
53 | #define DIMMS_PER_CHANNEL 4 | 49 | #define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH |
54 | #define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH) | 50 | #define MAX_CHANNELS 4 |
51 | /* max possible csrows per channel */ | ||
52 | #define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL) | ||
55 | 53 | ||
56 | /* Device 16, | 54 | /* Device 16, |
57 | * Function 0: System Address | 55 | * Function 0: System Address |
@@ -300,6 +298,24 @@ static inline int extract_fbdchan_indx(u32 x) | |||
300 | return (x>>28) & 0x3; | 298 | return (x>>28) & 0x3; |
301 | } | 299 | } |
302 | 300 | ||
301 | #ifdef CONFIG_EDAC_DEBUG | ||
302 | /* MTR NUMROW */ | ||
303 | static const char *numrow_toString[] = { | ||
304 | "8,192 - 13 rows", | ||
305 | "16,384 - 14 rows", | ||
306 | "32,768 - 15 rows", | ||
307 | "65,536 - 16 rows" | ||
308 | }; | ||
309 | |||
310 | /* MTR NUMCOL */ | ||
311 | static const char *numcol_toString[] = { | ||
312 | "1,024 - 10 columns", | ||
313 | "2,048 - 11 columns", | ||
314 | "4,096 - 12 columns", | ||
315 | "reserved" | ||
316 | }; | ||
317 | #endif | ||
318 | |||
303 | /* Device name and register DID (Device ID) */ | 319 | /* Device name and register DID (Device ID) */ |
304 | struct i5400_dev_info { | 320 | struct i5400_dev_info { |
305 | const char *ctl_name; /* name for this device */ | 321 | const char *ctl_name; /* name for this device */ |
@@ -327,26 +343,20 @@ struct i5400_pvt { | |||
327 | struct pci_dev *branch_1; /* 22.0 */ | 343 | struct pci_dev *branch_1; /* 22.0 */ |
328 | 344 | ||
329 | u16 tolm; /* top of low memory */ | 345 | u16 tolm; /* top of low memory */ |
330 | union { | 346 | u64 ambase; /* AMB BAR */ |
331 | u64 ambase; /* AMB BAR */ | ||
332 | struct { | ||
333 | u32 ambase_bottom; | ||
334 | u32 ambase_top; | ||
335 | } u __packed; | ||
336 | }; | ||
337 | 347 | ||
338 | u16 mir0, mir1; | 348 | u16 mir0, mir1; |
339 | 349 | ||
340 | u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */ | 350 | u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ |
341 | u16 b0_ambpresent0; /* Branch 0, Channel 0 */ | 351 | u16 b0_ambpresent0; /* Branch 0, Channel 0 */ |
342 | u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ | 352 | u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ |
343 | 353 | ||
344 | u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */ | 354 | u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ |
345 | u16 b1_ambpresent0; /* Branch 1, Channel 8 */ | 355 | u16 b1_ambpresent0; /* Branch 1, Channel 8 */ |
346 | u16 b1_ambpresent1; /* Branch 1, Channel 1 */ | 356 | u16 b1_ambpresent1; /* Branch 1, Channel 1 */ |
347 | 357 | ||
348 | /* DIMM information matrix, allocating architecture maximums */ | 358 | /* DIMM information matrix, allocating architecture maximums */ |
349 | struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS]; | 359 | struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS]; |
350 | 360 | ||
351 | /* Actual values for this controller */ | 361 | /* Actual values for this controller */ |
352 | int maxch; /* Max channels */ | 362 | int maxch; /* Max channels */ |
@@ -522,15 +532,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, | |||
522 | int ras, cas; | 532 | int ras, cas; |
523 | int errnum; | 533 | int errnum; |
524 | char *type = NULL; | 534 | char *type = NULL; |
525 | enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED; | ||
526 | 535 | ||
527 | if (!allErrors) | 536 | if (!allErrors) |
528 | return; /* if no error, return now */ | 537 | return; /* if no error, return now */ |
529 | 538 | ||
530 | if (allErrors & ERROR_FAT_MASK) { | 539 | if (allErrors & ERROR_FAT_MASK) |
531 | type = "FATAL"; | 540 | type = "FATAL"; |
532 | tp_event = HW_EVENT_ERR_FATAL; | 541 | else if (allErrors & FERR_NF_UNCORRECTABLE) |
533 | } else if (allErrors & FERR_NF_UNCORRECTABLE) | ||
534 | type = "NON-FATAL uncorrected"; | 542 | type = "NON-FATAL uncorrected"; |
535 | else | 543 | else |
536 | type = "NON-FATAL recoverable"; | 544 | type = "NON-FATAL recoverable"; |
@@ -548,22 +556,23 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, | |||
548 | ras = nrec_ras(info); | 556 | ras = nrec_ras(info); |
549 | cas = nrec_cas(info); | 557 | cas = nrec_cas(info); |
550 | 558 | ||
551 | edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", | 559 | debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " |
552 | rank, channel, channel + 1, branch >> 1, bank, | 560 | "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", |
553 | buf_id, rdwr_str(rdwr), ras, cas); | 561 | rank, channel, channel + 1, branch >> 1, bank, |
562 | buf_id, rdwr_str(rdwr), ras, cas); | ||
554 | 563 | ||
555 | /* Only 1 bit will be on */ | 564 | /* Only 1 bit will be on */ |
556 | errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); | 565 | errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); |
557 | 566 | ||
558 | /* Form out message */ | 567 | /* Form out message */ |
559 | snprintf(msg, sizeof(msg), | 568 | snprintf(msg, sizeof(msg), |
560 | "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)", | 569 | "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s " |
561 | bank, buf_id, ras, cas, allErrors, error_name[errnum]); | 570 | "RAS=%d CAS=%d %s Err=0x%lx (%s))", |
571 | type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas, | ||
572 | type, allErrors, error_name[errnum]); | ||
562 | 573 | ||
563 | edac_mc_handle_error(tp_event, mci, 1, 0, 0, 0, | 574 | /* Call the helper to output message */ |
564 | branch >> 1, -1, rank, | 575 | edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); |
565 | rdwr ? "Write error" : "Read error", | ||
566 | msg); | ||
567 | } | 576 | } |
568 | 577 | ||
569 | /* | 578 | /* |
@@ -600,7 +609,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
600 | 609 | ||
601 | /* Correctable errors */ | 610 | /* Correctable errors */ |
602 | if (allErrors & ERROR_NF_CORRECTABLE) { | 611 | if (allErrors & ERROR_NF_CORRECTABLE) { |
603 | edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors); | 612 | debugf0("\tCorrected bits= 0x%lx\n", allErrors); |
604 | 613 | ||
605 | branch = extract_fbdchan_indx(info->ferr_nf_fbd); | 614 | branch = extract_fbdchan_indx(info->ferr_nf_fbd); |
606 | 615 | ||
@@ -621,9 +630,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
621 | /* Only 1 bit will be on */ | 630 | /* Only 1 bit will be on */ |
622 | errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); | 631 | errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); |
623 | 632 | ||
624 | edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", | 633 | debugf0("\t\tCSROW= %d Channel= %d (Branch %d " |
625 | rank, channel, branch >> 1, bank, | 634 | "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", |
626 | rdwr_str(rdwr), ras, cas); | 635 | rank, channel, branch >> 1, bank, |
636 | rdwr_str(rdwr), ras, cas); | ||
627 | 637 | ||
628 | /* Form out message */ | 638 | /* Form out message */ |
629 | snprintf(msg, sizeof(msg), | 639 | snprintf(msg, sizeof(msg), |
@@ -632,10 +642,8 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
632 | branch >> 1, bank, rdwr_str(rdwr), ras, cas, | 642 | branch >> 1, bank, rdwr_str(rdwr), ras, cas, |
633 | allErrors, error_name[errnum]); | 643 | allErrors, error_name[errnum]); |
634 | 644 | ||
635 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, | 645 | /* Call the helper to output message */ |
636 | branch >> 1, channel % 2, rank, | 646 | edac_mc_handle_fbd_ce(mci, rank, channel, msg); |
637 | rdwr ? "Write error" : "Read error", | ||
638 | msg); | ||
639 | 647 | ||
640 | return; | 648 | return; |
641 | } | 649 | } |
@@ -686,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci) | |||
686 | static void i5400_check_error(struct mem_ctl_info *mci) | 694 | static void i5400_check_error(struct mem_ctl_info *mci) |
687 | { | 695 | { |
688 | struct i5400_error_info info; | 696 | struct i5400_error_info info; |
689 | edac_dbg(4, "MC%d\n", mci->mc_idx); | 697 | debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); |
690 | i5400_get_error_info(mci, &info); | 698 | i5400_get_error_info(mci, &info); |
691 | i5400_process_error_info(mci, &info); | 699 | i5400_process_error_info(mci, &info); |
692 | } | 700 | } |
@@ -727,7 +735,7 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) | |||
727 | 735 | ||
728 | /* Attempt to 'get' the MCH register we want */ | 736 | /* Attempt to 'get' the MCH register we want */ |
729 | pdev = NULL; | 737 | pdev = NULL; |
730 | while (1) { | 738 | while (!pvt->branchmap_werrors || !pvt->fsb_error_regs) { |
731 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 739 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
732 | PCI_DEVICE_ID_INTEL_5400_ERR, pdev); | 740 | PCI_DEVICE_ID_INTEL_5400_ERR, pdev); |
733 | if (!pdev) { | 741 | if (!pdev) { |
@@ -735,53 +743,33 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) | |||
735 | i5400_printk(KERN_ERR, | 743 | i5400_printk(KERN_ERR, |
736 | "'system address,Process Bus' " | 744 | "'system address,Process Bus' " |
737 | "device not found:" | 745 | "device not found:" |
738 | "vendor 0x%x device 0x%x ERR func 1 " | 746 | "vendor 0x%x device 0x%x ERR funcs " |
739 | "(broken BIOS?)\n", | 747 | "(broken BIOS?)\n", |
740 | PCI_VENDOR_ID_INTEL, | 748 | PCI_VENDOR_ID_INTEL, |
741 | PCI_DEVICE_ID_INTEL_5400_ERR); | 749 | PCI_DEVICE_ID_INTEL_5400_ERR); |
742 | return -ENODEV; | 750 | goto error; |
743 | } | 751 | } |
744 | 752 | ||
745 | /* Store device 16 func 1 */ | 753 | /* Store device 16 funcs 1 and 2 */ |
746 | if (PCI_FUNC(pdev->devfn) == 1) | 754 | switch (PCI_FUNC(pdev->devfn)) { |
755 | case 1: | ||
756 | pvt->branchmap_werrors = pdev; | ||
747 | break; | 757 | break; |
748 | } | 758 | case 2: |
749 | pvt->branchmap_werrors = pdev; | 759 | pvt->fsb_error_regs = pdev; |
750 | |||
751 | pdev = NULL; | ||
752 | while (1) { | ||
753 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
754 | PCI_DEVICE_ID_INTEL_5400_ERR, pdev); | ||
755 | if (!pdev) { | ||
756 | /* End of list, leave */ | ||
757 | i5400_printk(KERN_ERR, | ||
758 | "'system address,Process Bus' " | ||
759 | "device not found:" | ||
760 | "vendor 0x%x device 0x%x ERR func 2 " | ||
761 | "(broken BIOS?)\n", | ||
762 | PCI_VENDOR_ID_INTEL, | ||
763 | PCI_DEVICE_ID_INTEL_5400_ERR); | ||
764 | |||
765 | pci_dev_put(pvt->branchmap_werrors); | ||
766 | return -ENODEV; | ||
767 | } | ||
768 | |||
769 | /* Store device 16 func 2 */ | ||
770 | if (PCI_FUNC(pdev->devfn) == 2) | ||
771 | break; | 760 | break; |
761 | } | ||
772 | } | 762 | } |
773 | pvt->fsb_error_regs = pdev; | 763 | |
774 | 764 | debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", | |
775 | edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", | 765 | pci_name(pvt->system_address), |
776 | pci_name(pvt->system_address), | 766 | pvt->system_address->vendor, pvt->system_address->device); |
777 | pvt->system_address->vendor, pvt->system_address->device); | 767 | debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", |
778 | edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", | 768 | pci_name(pvt->branchmap_werrors), |
779 | pci_name(pvt->branchmap_werrors), | 769 | pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); |
780 | pvt->branchmap_werrors->vendor, | 770 | debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", |
781 | pvt->branchmap_werrors->device); | 771 | pci_name(pvt->fsb_error_regs), |
782 | edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", | 772 | pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); |
783 | pci_name(pvt->fsb_error_regs), | ||
784 | pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); | ||
785 | 773 | ||
786 | pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, | 774 | pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, |
787 | PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); | 775 | PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); |
@@ -790,10 +778,7 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) | |||
790 | "MC: 'BRANCH 0' device not found:" | 778 | "MC: 'BRANCH 0' device not found:" |
791 | "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", | 779 | "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", |
792 | PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0); | 780 | PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0); |
793 | 781 | goto error; | |
794 | pci_dev_put(pvt->fsb_error_regs); | ||
795 | pci_dev_put(pvt->branchmap_werrors); | ||
796 | return -ENODEV; | ||
797 | } | 782 | } |
798 | 783 | ||
799 | /* If this device claims to have more than 2 channels then | 784 | /* If this device claims to have more than 2 channels then |
@@ -811,21 +796,21 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) | |||
811 | "(broken BIOS?)\n", | 796 | "(broken BIOS?)\n", |
812 | PCI_VENDOR_ID_INTEL, | 797 | PCI_VENDOR_ID_INTEL, |
813 | PCI_DEVICE_ID_INTEL_5400_FBD1); | 798 | PCI_DEVICE_ID_INTEL_5400_FBD1); |
814 | 799 | goto error; | |
815 | pci_dev_put(pvt->branch_0); | ||
816 | pci_dev_put(pvt->fsb_error_regs); | ||
817 | pci_dev_put(pvt->branchmap_werrors); | ||
818 | return -ENODEV; | ||
819 | } | 800 | } |
820 | 801 | ||
821 | return 0; | 802 | return 0; |
803 | |||
804 | error: | ||
805 | i5400_put_devices(mci); | ||
806 | return -ENODEV; | ||
822 | } | 807 | } |
823 | 808 | ||
824 | /* | 809 | /* |
825 | * determine_amb_present | 810 | * determine_amb_present |
826 | * | 811 | * |
827 | * the information is contained in DIMMS_PER_CHANNEL different | 812 | * the information is contained in NUM_MTRS_PER_BRANCH different |
828 | * registers determining which of the DIMMS_PER_CHANNEL requires | 813 | * registers determining which of the NUM_MTRS_PER_BRANCH requires |
829 | * knowing which channel is in question | 814 | * knowing which channel is in question |
830 | * | 815 | * |
831 | * 2 branches, each with 2 channels | 816 | * 2 branches, each with 2 channels |
@@ -854,11 +839,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel) | |||
854 | } | 839 | } |
855 | 840 | ||
856 | /* | 841 | /* |
857 | * determine_mtr(pvt, dimm, channel) | 842 | * determine_mtr(pvt, csrow, channel) |
858 | * | 843 | * |
859 | * return the proper MTR register as determine by the dimm and desired channel | 844 | * return the proper MTR register as determine by the csrow and desired channel |
860 | */ | 845 | */ |
861 | static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel) | 846 | static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel) |
862 | { | 847 | { |
863 | int mtr; | 848 | int mtr; |
864 | int n; | 849 | int n; |
@@ -866,11 +851,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel) | |||
866 | /* There is one MTR for each slot pair of FB-DIMMs, | 851 | /* There is one MTR for each slot pair of FB-DIMMs, |
867 | Each slot pair may be at branch 0 or branch 1. | 852 | Each slot pair may be at branch 0 or branch 1. |
868 | */ | 853 | */ |
869 | n = dimm; | 854 | n = csrow; |
870 | 855 | ||
871 | if (n >= DIMMS_PER_CHANNEL) { | 856 | if (n >= NUM_MTRS_PER_BRANCH) { |
872 | edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n", | 857 | debugf0("ERROR: trying to access an invalid csrow: %d\n", |
873 | dimm); | 858 | csrow); |
874 | return 0; | 859 | return 0; |
875 | } | 860 | } |
876 | 861 | ||
@@ -890,44 +875,35 @@ static void decode_mtr(int slot_row, u16 mtr) | |||
890 | 875 | ||
891 | ans = MTR_DIMMS_PRESENT(mtr); | 876 | ans = MTR_DIMMS_PRESENT(mtr); |
892 | 877 | ||
893 | edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n", | 878 | debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, |
894 | slot_row, mtr, ans ? "" : "NOT "); | 879 | ans ? "Present" : "NOT Present"); |
895 | if (!ans) | 880 | if (!ans) |
896 | return; | 881 | return; |
897 | 882 | ||
898 | edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); | 883 | debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); |
899 | 884 | ||
900 | edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n", | 885 | debugf2("\t\tELECTRICAL THROTTLING is %s\n", |
901 | MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); | 886 | MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); |
902 | 887 | ||
903 | edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); | 888 | debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); |
904 | edac_dbg(2, "\t\tNUMRANK: %s\n", | 889 | debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); |
905 | MTR_DIMM_RANK(mtr) ? "double" : "single"); | 890 | debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); |
906 | edac_dbg(2, "\t\tNUMROW: %s\n", | 891 | debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); |
907 | MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : | ||
908 | MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : | ||
909 | MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : | ||
910 | "65,536 - 16 rows"); | ||
911 | edac_dbg(2, "\t\tNUMCOL: %s\n", | ||
912 | MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : | ||
913 | MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : | ||
914 | MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : | ||
915 | "reserved"); | ||
916 | } | 892 | } |
917 | 893 | ||
918 | static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel, | 894 | static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel, |
919 | struct i5400_dimm_info *dinfo) | 895 | struct i5400_dimm_info *dinfo) |
920 | { | 896 | { |
921 | int mtr; | 897 | int mtr; |
922 | int amb_present_reg; | 898 | int amb_present_reg; |
923 | int addrBits; | 899 | int addrBits; |
924 | 900 | ||
925 | mtr = determine_mtr(pvt, dimm, channel); | 901 | mtr = determine_mtr(pvt, csrow, channel); |
926 | if (MTR_DIMMS_PRESENT(mtr)) { | 902 | if (MTR_DIMMS_PRESENT(mtr)) { |
927 | amb_present_reg = determine_amb_present_reg(pvt, channel); | 903 | amb_present_reg = determine_amb_present_reg(pvt, channel); |
928 | 904 | ||
929 | /* Determine if there is a DIMM present in this DIMM slot */ | 905 | /* Determine if there is a DIMM present in this DIMM slot */ |
930 | if (amb_present_reg & (1 << dimm)) { | 906 | if (amb_present_reg & (1 << csrow)) { |
931 | /* Start with the number of bits for a Bank | 907 | /* Start with the number of bits for a Bank |
932 | * on the DRAM */ | 908 | * on the DRAM */ |
933 | addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); | 909 | addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); |
@@ -956,10 +932,10 @@ static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel, | |||
956 | static void calculate_dimm_size(struct i5400_pvt *pvt) | 932 | static void calculate_dimm_size(struct i5400_pvt *pvt) |
957 | { | 933 | { |
958 | struct i5400_dimm_info *dinfo; | 934 | struct i5400_dimm_info *dinfo; |
959 | int dimm, max_dimms; | 935 | int csrow, max_csrows; |
960 | char *p, *mem_buffer; | 936 | char *p, *mem_buffer; |
961 | int space, n; | 937 | int space, n; |
962 | int channel, branch; | 938 | int channel; |
963 | 939 | ||
964 | /* ================= Generate some debug output ================= */ | 940 | /* ================= Generate some debug output ================= */ |
965 | space = PAGE_SIZE; | 941 | space = PAGE_SIZE; |
@@ -970,52 +946,52 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) | |||
970 | return; | 946 | return; |
971 | } | 947 | } |
972 | 948 | ||
973 | /* Scan all the actual DIMMS | 949 | /* Scan all the actual CSROWS |
974 | * and calculate the information for each DIMM | 950 | * and calculate the information for each DIMM |
975 | * Start with the highest dimm first, to display it first | 951 | * Start with the highest csrow first, to display it first |
976 | * and work toward the 0th dimm | 952 | * and work toward the 0th csrow |
977 | */ | 953 | */ |
978 | max_dimms = pvt->maxdimmperch; | 954 | max_csrows = pvt->maxdimmperch; |
979 | for (dimm = max_dimms - 1; dimm >= 0; dimm--) { | 955 | for (csrow = max_csrows - 1; csrow >= 0; csrow--) { |
980 | 956 | ||
981 | /* on an odd dimm, first output a 'boundary' marker, | 957 | /* on an odd csrow, first output a 'boundary' marker, |
982 | * then reset the message buffer */ | 958 | * then reset the message buffer */ |
983 | if (dimm & 0x1) { | 959 | if (csrow & 0x1) { |
984 | n = snprintf(p, space, "---------------------------" | 960 | n = snprintf(p, space, "---------------------------" |
985 | "-------------------------------"); | 961 | "--------------------------------"); |
986 | p += n; | 962 | p += n; |
987 | space -= n; | 963 | space -= n; |
988 | edac_dbg(2, "%s\n", mem_buffer); | 964 | debugf2("%s\n", mem_buffer); |
989 | p = mem_buffer; | 965 | p = mem_buffer; |
990 | space = PAGE_SIZE; | 966 | space = PAGE_SIZE; |
991 | } | 967 | } |
992 | n = snprintf(p, space, "dimm %2d ", dimm); | 968 | n = snprintf(p, space, "csrow %2d ", csrow); |
993 | p += n; | 969 | p += n; |
994 | space -= n; | 970 | space -= n; |
995 | 971 | ||
996 | for (channel = 0; channel < pvt->maxch; channel++) { | 972 | for (channel = 0; channel < pvt->maxch; channel++) { |
997 | dinfo = &pvt->dimm_info[dimm][channel]; | 973 | dinfo = &pvt->dimm_info[csrow][channel]; |
998 | handle_channel(pvt, dimm, channel, dinfo); | 974 | handle_channel(pvt, csrow, channel, dinfo); |
999 | n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); | 975 | n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); |
1000 | p += n; | 976 | p += n; |
1001 | space -= n; | 977 | space -= n; |
1002 | } | 978 | } |
1003 | edac_dbg(2, "%s\n", mem_buffer); | 979 | debugf2("%s\n", mem_buffer); |
1004 | p = mem_buffer; | 980 | p = mem_buffer; |
1005 | space = PAGE_SIZE; | 981 | space = PAGE_SIZE; |
1006 | } | 982 | } |
1007 | 983 | ||
1008 | /* Output the last bottom 'boundary' marker */ | 984 | /* Output the last bottom 'boundary' marker */ |
1009 | n = snprintf(p, space, "---------------------------" | 985 | n = snprintf(p, space, "---------------------------" |
1010 | "-------------------------------"); | 986 | "--------------------------------"); |
1011 | p += n; | 987 | p += n; |
1012 | space -= n; | 988 | space -= n; |
1013 | edac_dbg(2, "%s\n", mem_buffer); | 989 | debugf2("%s\n", mem_buffer); |
1014 | p = mem_buffer; | 990 | p = mem_buffer; |
1015 | space = PAGE_SIZE; | 991 | space = PAGE_SIZE; |
1016 | 992 | ||
1017 | /* now output the 'channel' labels */ | 993 | /* now output the 'channel' labels */ |
1018 | n = snprintf(p, space, " "); | 994 | n = snprintf(p, space, " "); |
1019 | p += n; | 995 | p += n; |
1020 | space -= n; | 996 | space -= n; |
1021 | for (channel = 0; channel < pvt->maxch; channel++) { | 997 | for (channel = 0; channel < pvt->maxch; channel++) { |
@@ -1024,21 +1000,8 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) | |||
1024 | space -= n; | 1000 | space -= n; |
1025 | } | 1001 | } |
1026 | 1002 | ||
1027 | space -= n; | ||
1028 | edac_dbg(2, "%s\n", mem_buffer); | ||
1029 | p = mem_buffer; | ||
1030 | space = PAGE_SIZE; | ||
1031 | |||
1032 | n = snprintf(p, space, " "); | ||
1033 | p += n; | ||
1034 | for (branch = 0; branch < MAX_BRANCHES; branch++) { | ||
1035 | n = snprintf(p, space, " branch %d | ", branch); | ||
1036 | p += n; | ||
1037 | space -= n; | ||
1038 | } | ||
1039 | |||
1040 | /* output the last message and free buffer */ | 1003 | /* output the last message and free buffer */ |
1041 | edac_dbg(2, "%s\n", mem_buffer); | 1004 | debugf2("%s\n", mem_buffer); |
1042 | kfree(mem_buffer); | 1005 | kfree(mem_buffer); |
1043 | } | 1006 | } |
1044 | 1007 | ||
@@ -1061,25 +1024,25 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) | |||
1061 | pvt = mci->pvt_info; | 1024 | pvt = mci->pvt_info; |
1062 | 1025 | ||
1063 | pci_read_config_dword(pvt->system_address, AMBASE, | 1026 | pci_read_config_dword(pvt->system_address, AMBASE, |
1064 | &pvt->u.ambase_bottom); | 1027 | (u32 *) &pvt->ambase); |
1065 | pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), | 1028 | pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), |
1066 | &pvt->u.ambase_top); | 1029 | ((u32 *) &pvt->ambase) + sizeof(u32)); |
1067 | 1030 | ||
1068 | maxdimmperch = pvt->maxdimmperch; | 1031 | maxdimmperch = pvt->maxdimmperch; |
1069 | maxch = pvt->maxch; | 1032 | maxch = pvt->maxch; |
1070 | 1033 | ||
1071 | edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", | 1034 | debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", |
1072 | (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); | 1035 | (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); |
1073 | 1036 | ||
1074 | /* Get the Branch Map regs */ | 1037 | /* Get the Branch Map regs */ |
1075 | pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); | 1038 | pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); |
1076 | pvt->tolm >>= 12; | 1039 | pvt->tolm >>= 12; |
1077 | edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n", | 1040 | debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, |
1078 | pvt->tolm, pvt->tolm); | 1041 | pvt->tolm); |
1079 | 1042 | ||
1080 | actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); | 1043 | actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); |
1081 | edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n", | 1044 | debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", |
1082 | actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); | 1045 | actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); |
1083 | 1046 | ||
1084 | pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); | 1047 | pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); |
1085 | pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); | 1048 | pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); |
@@ -1088,24 +1051,22 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) | |||
1088 | limit = (pvt->mir0 >> 4) & 0x0fff; | 1051 | limit = (pvt->mir0 >> 4) & 0x0fff; |
1089 | way0 = pvt->mir0 & 0x1; | 1052 | way0 = pvt->mir0 & 0x1; |
1090 | way1 = pvt->mir0 & 0x2; | 1053 | way1 = pvt->mir0 & 0x2; |
1091 | edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", | 1054 | debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); |
1092 | limit, way1, way0); | ||
1093 | limit = (pvt->mir1 >> 4) & 0xfff; | 1055 | limit = (pvt->mir1 >> 4) & 0xfff; |
1094 | way0 = pvt->mir1 & 0x1; | 1056 | way0 = pvt->mir1 & 0x1; |
1095 | way1 = pvt->mir1 & 0x2; | 1057 | way1 = pvt->mir1 & 0x2; |
1096 | edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", | 1058 | debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); |
1097 | limit, way1, way0); | ||
1098 | 1059 | ||
1099 | /* Get the set of MTR[0-3] regs by each branch */ | 1060 | /* Get the set of MTR[0-3] regs by each branch */ |
1100 | for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) { | 1061 | for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) { |
1101 | int where = MTR0 + (slot_row * sizeof(u16)); | 1062 | int where = MTR0 + (slot_row * sizeof(u16)); |
1102 | 1063 | ||
1103 | /* Branch 0 set of MTR registers */ | 1064 | /* Branch 0 set of MTR registers */ |
1104 | pci_read_config_word(pvt->branch_0, where, | 1065 | pci_read_config_word(pvt->branch_0, where, |
1105 | &pvt->b0_mtr[slot_row]); | 1066 | &pvt->b0_mtr[slot_row]); |
1106 | 1067 | ||
1107 | edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n", | 1068 | debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, |
1108 | slot_row, where, pvt->b0_mtr[slot_row]); | 1069 | pvt->b0_mtr[slot_row]); |
1109 | 1070 | ||
1110 | if (pvt->maxch < CHANNELS_PER_BRANCH) { | 1071 | if (pvt->maxch < CHANNELS_PER_BRANCH) { |
1111 | pvt->b1_mtr[slot_row] = 0; | 1072 | pvt->b1_mtr[slot_row] = 0; |
@@ -1115,22 +1076,22 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) | |||
1115 | /* Branch 1 set of MTR registers */ | 1076 | /* Branch 1 set of MTR registers */ |
1116 | pci_read_config_word(pvt->branch_1, where, | 1077 | pci_read_config_word(pvt->branch_1, where, |
1117 | &pvt->b1_mtr[slot_row]); | 1078 | &pvt->b1_mtr[slot_row]); |
1118 | edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n", | 1079 | debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, |
1119 | slot_row, where, pvt->b1_mtr[slot_row]); | 1080 | pvt->b1_mtr[slot_row]); |
1120 | } | 1081 | } |
1121 | 1082 | ||
1122 | /* Read and dump branch 0's MTRs */ | 1083 | /* Read and dump branch 0's MTRs */ |
1123 | edac_dbg(2, "Memory Technology Registers:\n"); | 1084 | debugf2("\nMemory Technology Registers:\n"); |
1124 | edac_dbg(2, " Branch 0:\n"); | 1085 | debugf2(" Branch 0:\n"); |
1125 | for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) | 1086 | for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) |
1126 | decode_mtr(slot_row, pvt->b0_mtr[slot_row]); | 1087 | decode_mtr(slot_row, pvt->b0_mtr[slot_row]); |
1127 | 1088 | ||
1128 | pci_read_config_word(pvt->branch_0, AMBPRESENT_0, | 1089 | pci_read_config_word(pvt->branch_0, AMBPRESENT_0, |
1129 | &pvt->b0_ambpresent0); | 1090 | &pvt->b0_ambpresent0); |
1130 | edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); | 1091 | debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); |
1131 | pci_read_config_word(pvt->branch_0, AMBPRESENT_1, | 1092 | pci_read_config_word(pvt->branch_0, AMBPRESENT_1, |
1132 | &pvt->b0_ambpresent1); | 1093 | &pvt->b0_ambpresent1); |
1133 | edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); | 1094 | debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); |
1134 | 1095 | ||
1135 | /* Only if we have 2 branchs (4 channels) */ | 1096 | /* Only if we have 2 branchs (4 channels) */ |
1136 | if (pvt->maxch < CHANNELS_PER_BRANCH) { | 1097 | if (pvt->maxch < CHANNELS_PER_BRANCH) { |
@@ -1138,18 +1099,18 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) | |||
1138 | pvt->b1_ambpresent1 = 0; | 1099 | pvt->b1_ambpresent1 = 0; |
1139 | } else { | 1100 | } else { |
1140 | /* Read and dump branch 1's MTRs */ | 1101 | /* Read and dump branch 1's MTRs */ |
1141 | edac_dbg(2, " Branch 1:\n"); | 1102 | debugf2(" Branch 1:\n"); |
1142 | for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) | 1103 | for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) |
1143 | decode_mtr(slot_row, pvt->b1_mtr[slot_row]); | 1104 | decode_mtr(slot_row, pvt->b1_mtr[slot_row]); |
1144 | 1105 | ||
1145 | pci_read_config_word(pvt->branch_1, AMBPRESENT_0, | 1106 | pci_read_config_word(pvt->branch_1, AMBPRESENT_0, |
1146 | &pvt->b1_ambpresent0); | 1107 | &pvt->b1_ambpresent0); |
1147 | edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n", | 1108 | debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", |
1148 | pvt->b1_ambpresent0); | 1109 | pvt->b1_ambpresent0); |
1149 | pci_read_config_word(pvt->branch_1, AMBPRESENT_1, | 1110 | pci_read_config_word(pvt->branch_1, AMBPRESENT_1, |
1150 | &pvt->b1_ambpresent1); | 1111 | &pvt->b1_ambpresent1); |
1151 | edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n", | 1112 | debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", |
1152 | pvt->b1_ambpresent1); | 1113 | pvt->b1_ambpresent1); |
1153 | } | 1114 | } |
1154 | 1115 | ||
1155 | /* Go and determine the size of each DIMM and place in an | 1116 | /* Go and determine the size of each DIMM and place in an |
@@ -1158,7 +1119,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) | |||
1158 | } | 1119 | } |
1159 | 1120 | ||
1160 | /* | 1121 | /* |
1161 | * i5400_init_dimms Initialize the 'dimms' table within | 1122 | * i5400_init_csrows Initialize the 'csrows' table within |
1162 | * the mci control structure with the | 1123 | * the mci control structure with the |
1163 | * addressing of memory. | 1124 | * addressing of memory. |
1164 | * | 1125 | * |
@@ -1166,67 +1127,64 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) | |||
1166 | * 0 success | 1127 | * 0 success |
1167 | * 1 no actual memory found on this MC | 1128 | * 1 no actual memory found on this MC |
1168 | */ | 1129 | */ |
1169 | static int i5400_init_dimms(struct mem_ctl_info *mci) | 1130 | static int i5400_init_csrows(struct mem_ctl_info *mci) |
1170 | { | 1131 | { |
1171 | struct i5400_pvt *pvt; | 1132 | struct i5400_pvt *pvt; |
1172 | struct dimm_info *dimm; | 1133 | struct csrow_info *p_csrow; |
1173 | int ndimms, channel_count; | 1134 | int empty, channel_count; |
1174 | int max_dimms; | 1135 | int max_csrows; |
1175 | int mtr; | 1136 | int mtr; |
1176 | int size_mb; | 1137 | int csrow_megs; |
1177 | int channel, slot; | 1138 | int channel; |
1139 | int csrow; | ||
1178 | 1140 | ||
1179 | pvt = mci->pvt_info; | 1141 | pvt = mci->pvt_info; |
1180 | 1142 | ||
1181 | channel_count = pvt->maxch; | 1143 | channel_count = pvt->maxch; |
1182 | max_dimms = pvt->maxdimmperch; | 1144 | max_csrows = pvt->maxdimmperch; |
1183 | 1145 | ||
1184 | ndimms = 0; | 1146 | empty = 1; /* Assume NO memory */ |
1185 | 1147 | ||
1186 | /* | 1148 | for (csrow = 0; csrow < max_csrows; csrow++) { |
1187 | * FIXME: remove pvt->dimm_info[slot][channel] and use the 3 | 1149 | p_csrow = &mci->csrows[csrow]; |
1188 | * layers here. | ||
1189 | */ | ||
1190 | for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size; | ||
1191 | channel++) { | ||
1192 | for (slot = 0; slot < mci->layers[2].size; slot++) { | ||
1193 | mtr = determine_mtr(pvt, slot, channel); | ||
1194 | |||
1195 | /* if no DIMMS on this slot, continue */ | ||
1196 | if (!MTR_DIMMS_PRESENT(mtr)) | ||
1197 | continue; | ||
1198 | |||
1199 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, | ||
1200 | channel / 2, channel % 2, slot); | ||
1201 | |||
1202 | size_mb = pvt->dimm_info[slot][channel].megabytes; | ||
1203 | |||
1204 | edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n", | ||
1205 | channel / 2, channel % 2, slot, | ||
1206 | size_mb / 1000, size_mb % 1000); | ||
1207 | |||
1208 | dimm->nr_pages = size_mb << 8; | ||
1209 | dimm->grain = 8; | ||
1210 | dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; | ||
1211 | dimm->mtype = MEM_FB_DDR2; | ||
1212 | /* | ||
1213 | * The eccc mechanism is SDDC (aka SECC), with | ||
1214 | * is similar to Chipkill. | ||
1215 | */ | ||
1216 | dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? | ||
1217 | EDAC_S8ECD8ED : EDAC_S4ECD4ED; | ||
1218 | ndimms++; | ||
1219 | } | ||
1220 | } | ||
1221 | 1150 | ||
1222 | /* | 1151 | p_csrow->csrow_idx = csrow; |
1223 | * When just one memory is provided, it should be at location (0,0,0). | 1152 | |
1224 | * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+. | 1153 | /* use branch 0 for the basis */ |
1225 | */ | 1154 | mtr = determine_mtr(pvt, csrow, 0); |
1226 | if (ndimms == 1) | ||
1227 | mci->dimms[0]->edac_mode = EDAC_SECDED; | ||
1228 | 1155 | ||
1229 | return (ndimms == 0); | 1156 | /* if no DIMMS on this row, continue */ |
1157 | if (!MTR_DIMMS_PRESENT(mtr)) | ||
1158 | continue; | ||
1159 | |||
1160 | /* FAKE OUT VALUES, FIXME */ | ||
1161 | p_csrow->first_page = 0 + csrow * 20; | ||
1162 | p_csrow->last_page = 9 + csrow * 20; | ||
1163 | p_csrow->page_mask = 0xFFF; | ||
1164 | |||
1165 | p_csrow->grain = 8; | ||
1166 | |||
1167 | csrow_megs = 0; | ||
1168 | for (channel = 0; channel < pvt->maxch; channel++) | ||
1169 | csrow_megs += pvt->dimm_info[csrow][channel].megabytes; | ||
1170 | |||
1171 | p_csrow->nr_pages = csrow_megs << 8; | ||
1172 | |||
1173 | /* Assume DDR2 for now */ | ||
1174 | p_csrow->mtype = MEM_FB_DDR2; | ||
1175 | |||
1176 | /* ask what device type on this row */ | ||
1177 | if (MTR_DRAM_WIDTH(mtr)) | ||
1178 | p_csrow->dtype = DEV_X8; | ||
1179 | else | ||
1180 | p_csrow->dtype = DEV_X4; | ||
1181 | |||
1182 | p_csrow->edac_mode = EDAC_S8ECD8ED; | ||
1183 | |||
1184 | empty = 0; | ||
1185 | } | ||
1186 | |||
1187 | return empty; | ||
1230 | } | 1188 | } |
1231 | 1189 | ||
1232 | /* | 1190 | /* |
@@ -1262,45 +1220,50 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) | |||
1262 | { | 1220 | { |
1263 | struct mem_ctl_info *mci; | 1221 | struct mem_ctl_info *mci; |
1264 | struct i5400_pvt *pvt; | 1222 | struct i5400_pvt *pvt; |
1265 | struct edac_mc_layer layers[3]; | 1223 | int num_channels; |
1224 | int num_dimms_per_channel; | ||
1225 | int num_csrows; | ||
1266 | 1226 | ||
1267 | if (dev_idx >= ARRAY_SIZE(i5400_devs)) | 1227 | if (dev_idx >= ARRAY_SIZE(i5400_devs)) |
1268 | return -EINVAL; | 1228 | return -EINVAL; |
1269 | 1229 | ||
1270 | edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", | 1230 | debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", |
1271 | pdev->bus->number, | 1231 | __FILE__, __func__, |
1272 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | 1232 | pdev->bus->number, |
1233 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | ||
1273 | 1234 | ||
1274 | /* We only are looking for func 0 of the set */ | 1235 | /* We only are looking for func 0 of the set */ |
1275 | if (PCI_FUNC(pdev->devfn) != 0) | 1236 | if (PCI_FUNC(pdev->devfn) != 0) |
1276 | return -ENODEV; | 1237 | return -ENODEV; |
1277 | 1238 | ||
1278 | /* | 1239 | /* As we don't have a motherboard identification routine to determine |
1279 | * allocate a new MC control structure | 1240 | * actual number of slots/dimms per channel, we thus utilize the |
1280 | * | 1241 | * resource as specified by the chipset. Thus, we might have |
1281 | * This drivers uses the DIMM slot as "csrow" and the rest as "channel". | 1242 | * have more DIMMs per channel than actually on the mobo, but this |
1243 | * allows the driver to support up to the chipset max, without | ||
1244 | * some fancy mobo determination. | ||
1282 | */ | 1245 | */ |
1283 | layers[0].type = EDAC_MC_LAYER_BRANCH; | 1246 | num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL; |
1284 | layers[0].size = MAX_BRANCHES; | 1247 | num_channels = MAX_CHANNELS; |
1285 | layers[0].is_virt_csrow = false; | 1248 | num_csrows = num_dimms_per_channel; |
1286 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | 1249 | |
1287 | layers[1].size = CHANNELS_PER_BRANCH; | 1250 | debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", |
1288 | layers[1].is_virt_csrow = false; | 1251 | __func__, num_channels, num_dimms_per_channel, num_csrows); |
1289 | layers[2].type = EDAC_MC_LAYER_SLOT; | 1252 | |
1290 | layers[2].size = DIMMS_PER_CHANNEL; | 1253 | /* allocate a new MC control structure */ |
1291 | layers[2].is_virt_csrow = true; | 1254 | mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); |
1292 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | 1255 | |
1293 | if (mci == NULL) | 1256 | if (mci == NULL) |
1294 | return -ENOMEM; | 1257 | return -ENOMEM; |
1295 | 1258 | ||
1296 | edac_dbg(0, "MC: mci = %p\n", mci); | 1259 | debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); |
1297 | 1260 | ||
1298 | mci->pdev = &pdev->dev; /* record ptr to the generic device */ | 1261 | mci->dev = &pdev->dev; /* record ptr to the generic device */ |
1299 | 1262 | ||
1300 | pvt = mci->pvt_info; | 1263 | pvt = mci->pvt_info; |
1301 | pvt->system_address = pdev; /* Record this device in our private */ | 1264 | pvt->system_address = pdev; /* Record this device in our private */ |
1302 | pvt->maxch = MAX_CHANNELS; | 1265 | pvt->maxch = num_channels; |
1303 | pvt->maxdimmperch = DIMMS_PER_CHANNEL; | 1266 | pvt->maxdimmperch = num_dimms_per_channel; |
1304 | 1267 | ||
1305 | /* 'get' the pci devices we want to reserve for our use */ | 1268 | /* 'get' the pci devices we want to reserve for our use */ |
1306 | if (i5400_get_devices(mci, dev_idx)) | 1269 | if (i5400_get_devices(mci, dev_idx)) |
@@ -1322,19 +1285,22 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) | |||
1322 | /* Set the function pointer to an actual operation function */ | 1285 | /* Set the function pointer to an actual operation function */ |
1323 | mci->edac_check = i5400_check_error; | 1286 | mci->edac_check = i5400_check_error; |
1324 | 1287 | ||
1325 | /* initialize the MC control structure 'dimms' table | 1288 | /* initialize the MC control structure 'csrows' table |
1326 | * with the mapping and control information */ | 1289 | * with the mapping and control information */ |
1327 | if (i5400_init_dimms(mci)) { | 1290 | if (i5400_init_csrows(mci)) { |
1328 | edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n"); | 1291 | debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" |
1329 | mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */ | 1292 | " because i5400_init_csrows() returned nonzero " |
1293 | "value\n"); | ||
1294 | mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ | ||
1330 | } else { | 1295 | } else { |
1331 | edac_dbg(1, "MC: Enable error reporting now\n"); | 1296 | debugf1("MC: Enable error reporting now\n"); |
1332 | i5400_enable_error_reporting(mci); | 1297 | i5400_enable_error_reporting(mci); |
1333 | } | 1298 | } |
1334 | 1299 | ||
1335 | /* add this new MC control structure to EDAC's list of MCs */ | 1300 | /* add this new MC control structure to EDAC's list of MCs */ |
1336 | if (edac_mc_add_mc(mci)) { | 1301 | if (edac_mc_add_mc(mci)) { |
1337 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | 1302 | debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", |
1303 | __FILE__, __func__); | ||
1338 | /* FIXME: perhaps some code should go here that disables error | 1304 | /* FIXME: perhaps some code should go here that disables error |
1339 | * reporting if we just enabled it | 1305 | * reporting if we just enabled it |
1340 | */ | 1306 | */ |
@@ -1373,11 +1339,12 @@ fail0: | |||
1373 | * negative on error | 1339 | * negative on error |
1374 | * count (>= 0) | 1340 | * count (>= 0) |
1375 | */ | 1341 | */ |
1376 | static int i5400_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 1342 | static int __devinit i5400_init_one(struct pci_dev *pdev, |
1343 | const struct pci_device_id *id) | ||
1377 | { | 1344 | { |
1378 | int rc; | 1345 | int rc; |
1379 | 1346 | ||
1380 | edac_dbg(0, "MC:\n"); | 1347 | debugf0("MC: %s: %s()\n", __FILE__, __func__); |
1381 | 1348 | ||
1382 | /* wake up device */ | 1349 | /* wake up device */ |
1383 | rc = pci_enable_device(pdev); | 1350 | rc = pci_enable_device(pdev); |
@@ -1392,11 +1359,11 @@ static int i5400_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1392 | * i5400_remove_one destructor for one instance of device | 1359 | * i5400_remove_one destructor for one instance of device |
1393 | * | 1360 | * |
1394 | */ | 1361 | */ |
1395 | static void i5400_remove_one(struct pci_dev *pdev) | 1362 | static void __devexit i5400_remove_one(struct pci_dev *pdev) |
1396 | { | 1363 | { |
1397 | struct mem_ctl_info *mci; | 1364 | struct mem_ctl_info *mci; |
1398 | 1365 | ||
1399 | edac_dbg(0, "\n"); | 1366 | debugf0("%s: %s()\n", __FILE__, __func__); |
1400 | 1367 | ||
1401 | if (i5400_pci) | 1368 | if (i5400_pci) |
1402 | edac_pci_release_generic_ctl(i5400_pci); | 1369 | edac_pci_release_generic_ctl(i5400_pci); |
@@ -1416,7 +1383,7 @@ static void i5400_remove_one(struct pci_dev *pdev) | |||
1416 | * | 1383 | * |
1417 | * The "E500P" device is the first device supported. | 1384 | * The "E500P" device is the first device supported. |
1418 | */ | 1385 | */ |
1419 | static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = { | 1386 | static const struct pci_device_id i5400_pci_tbl[] __devinitdata = { |
1420 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, | 1387 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, |
1421 | {0,} /* 0 terminated list. */ | 1388 | {0,} /* 0 terminated list. */ |
1422 | }; | 1389 | }; |
@@ -1430,7 +1397,7 @@ MODULE_DEVICE_TABLE(pci, i5400_pci_tbl); | |||
1430 | static struct pci_driver i5400_driver = { | 1397 | static struct pci_driver i5400_driver = { |
1431 | .name = "i5400_edac", | 1398 | .name = "i5400_edac", |
1432 | .probe = i5400_init_one, | 1399 | .probe = i5400_init_one, |
1433 | .remove = i5400_remove_one, | 1400 | .remove = __devexit_p(i5400_remove_one), |
1434 | .id_table = i5400_pci_tbl, | 1401 | .id_table = i5400_pci_tbl, |
1435 | }; | 1402 | }; |
1436 | 1403 | ||
@@ -1442,7 +1409,7 @@ static int __init i5400_init(void) | |||
1442 | { | 1409 | { |
1443 | int pci_rc; | 1410 | int pci_rc; |
1444 | 1411 | ||
1445 | edac_dbg(2, "MC:\n"); | 1412 | debugf2("MC: %s: %s()\n", __FILE__, __func__); |
1446 | 1413 | ||
1447 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 1414 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
1448 | opstate_init(); | 1415 | opstate_init(); |
@@ -1458,7 +1425,7 @@ static int __init i5400_init(void) | |||
1458 | */ | 1425 | */ |
1459 | static void __exit i5400_exit(void) | 1426 | static void __exit i5400_exit(void) |
1460 | { | 1427 | { |
1461 | edac_dbg(2, "MC:\n"); | 1428 | debugf2("MC: %s: %s()\n", __FILE__, __func__); |
1462 | pci_unregister_driver(&i5400_driver); | 1429 | pci_unregister_driver(&i5400_driver); |
1463 | } | 1430 | } |
1464 | 1431 | ||
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 087c27bc5d4..a76fe8366b6 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c | |||
@@ -182,6 +182,24 @@ static const u16 mtr_regs[MAX_SLOTS] = { | |||
182 | #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) | 182 | #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) |
183 | #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) | 183 | #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) |
184 | 184 | ||
185 | #ifdef CONFIG_EDAC_DEBUG | ||
186 | /* MTR NUMROW */ | ||
187 | static const char *numrow_toString[] = { | ||
188 | "8,192 - 13 rows", | ||
189 | "16,384 - 14 rows", | ||
190 | "32,768 - 15 rows", | ||
191 | "65,536 - 16 rows" | ||
192 | }; | ||
193 | |||
194 | /* MTR NUMCOL */ | ||
195 | static const char *numcol_toString[] = { | ||
196 | "1,024 - 10 columns", | ||
197 | "2,048 - 11 columns", | ||
198 | "4,096 - 12 columns", | ||
199 | "reserved" | ||
200 | }; | ||
201 | #endif | ||
202 | |||
185 | /************************************************ | 203 | /************************************************ |
186 | * i7300 Register definitions for error detection | 204 | * i7300 Register definitions for error detection |
187 | ************************************************/ | 205 | ************************************************/ |
@@ -197,8 +215,8 @@ static const char *ferr_fat_fbd_name[] = { | |||
197 | [0] = "Memory Write error on non-redundant retry or " | 215 | [0] = "Memory Write error on non-redundant retry or " |
198 | "FBD configuration Write error on retry", | 216 | "FBD configuration Write error on retry", |
199 | }; | 217 | }; |
200 | #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3) | 218 | #define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28)) |
201 | #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22)) | 219 | #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)) |
202 | 220 | ||
203 | #define FERR_NF_FBD 0xa0 | 221 | #define FERR_NF_FBD 0xa0 |
204 | static const char *ferr_nf_fbd_name[] = { | 222 | static const char *ferr_nf_fbd_name[] = { |
@@ -225,7 +243,7 @@ static const char *ferr_nf_fbd_name[] = { | |||
225 | [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", | 243 | [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", |
226 | [0] = "Uncorrectable Data ECC on Replay", | 244 | [0] = "Uncorrectable Data ECC on Replay", |
227 | }; | 245 | }; |
228 | #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3) | 246 | #define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28)) |
229 | #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ | 247 | #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ |
230 | (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ | 248 | (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ |
231 | (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ | 249 | (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ |
@@ -354,7 +372,7 @@ static const char *get_err_from_table(const char *table[], int size, int pos) | |||
354 | static void i7300_process_error_global(struct mem_ctl_info *mci) | 372 | static void i7300_process_error_global(struct mem_ctl_info *mci) |
355 | { | 373 | { |
356 | struct i7300_pvt *pvt; | 374 | struct i7300_pvt *pvt; |
357 | u32 errnum, error_reg; | 375 | u32 errnum, value; |
358 | unsigned long errors; | 376 | unsigned long errors; |
359 | const char *specific; | 377 | const char *specific; |
360 | bool is_fatal; | 378 | bool is_fatal; |
@@ -363,9 +381,9 @@ static void i7300_process_error_global(struct mem_ctl_info *mci) | |||
363 | 381 | ||
364 | /* read in the 1st FATAL error register */ | 382 | /* read in the 1st FATAL error register */ |
365 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | 383 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, |
366 | FERR_GLOBAL_HI, &error_reg); | 384 | FERR_GLOBAL_HI, &value); |
367 | if (unlikely(error_reg)) { | 385 | if (unlikely(value)) { |
368 | errors = error_reg; | 386 | errors = value; |
369 | errnum = find_first_bit(&errors, | 387 | errnum = find_first_bit(&errors, |
370 | ARRAY_SIZE(ferr_global_hi_name)); | 388 | ARRAY_SIZE(ferr_global_hi_name)); |
371 | specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum); | 389 | specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum); |
@@ -373,15 +391,15 @@ static void i7300_process_error_global(struct mem_ctl_info *mci) | |||
373 | 391 | ||
374 | /* Clear the error bit */ | 392 | /* Clear the error bit */ |
375 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | 393 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, |
376 | FERR_GLOBAL_HI, error_reg); | 394 | FERR_GLOBAL_HI, value); |
377 | 395 | ||
378 | goto error_global; | 396 | goto error_global; |
379 | } | 397 | } |
380 | 398 | ||
381 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | 399 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, |
382 | FERR_GLOBAL_LO, &error_reg); | 400 | FERR_GLOBAL_LO, &value); |
383 | if (unlikely(error_reg)) { | 401 | if (unlikely(value)) { |
384 | errors = error_reg; | 402 | errors = value; |
385 | errnum = find_first_bit(&errors, | 403 | errnum = find_first_bit(&errors, |
386 | ARRAY_SIZE(ferr_global_lo_name)); | 404 | ARRAY_SIZE(ferr_global_lo_name)); |
387 | specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum); | 405 | specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum); |
@@ -389,7 +407,7 @@ static void i7300_process_error_global(struct mem_ctl_info *mci) | |||
389 | 407 | ||
390 | /* Clear the error bit */ | 408 | /* Clear the error bit */ |
391 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | 409 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, |
392 | FERR_GLOBAL_LO, error_reg); | 410 | FERR_GLOBAL_LO, value); |
393 | 411 | ||
394 | goto error_global; | 412 | goto error_global; |
395 | } | 413 | } |
@@ -409,7 +427,7 @@ error_global: | |||
409 | static void i7300_process_fbd_error(struct mem_ctl_info *mci) | 427 | static void i7300_process_fbd_error(struct mem_ctl_info *mci) |
410 | { | 428 | { |
411 | struct i7300_pvt *pvt; | 429 | struct i7300_pvt *pvt; |
412 | u32 errnum, value, error_reg; | 430 | u32 errnum, value; |
413 | u16 val16; | 431 | u16 val16; |
414 | unsigned branch, channel, bank, rank, cas, ras; | 432 | unsigned branch, channel, bank, rank, cas, ras; |
415 | u32 syndrome; | 433 | u32 syndrome; |
@@ -422,14 +440,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) | |||
422 | 440 | ||
423 | /* read in the 1st FATAL error register */ | 441 | /* read in the 1st FATAL error register */ |
424 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | 442 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, |
425 | FERR_FAT_FBD, &error_reg); | 443 | FERR_FAT_FBD, &value); |
426 | if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) { | 444 | if (unlikely(value & FERR_FAT_FBD_ERR_MASK)) { |
427 | errors = error_reg & FERR_FAT_FBD_ERR_MASK ; | 445 | errors = value & FERR_FAT_FBD_ERR_MASK ; |
428 | errnum = find_first_bit(&errors, | 446 | errnum = find_first_bit(&errors, |
429 | ARRAY_SIZE(ferr_fat_fbd_name)); | 447 | ARRAY_SIZE(ferr_fat_fbd_name)); |
430 | specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum); | 448 | specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum); |
431 | branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0; | ||
432 | 449 | ||
450 | branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0; | ||
433 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, | 451 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, |
434 | NRECMEMA, &val16); | 452 | NRECMEMA, &val16); |
435 | bank = NRECMEMA_BANK(val16); | 453 | bank = NRECMEMA_BANK(val16); |
@@ -437,38 +455,42 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) | |||
437 | 455 | ||
438 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | 456 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, |
439 | NRECMEMB, &value); | 457 | NRECMEMB, &value); |
458 | |||
440 | is_wr = NRECMEMB_IS_WR(value); | 459 | is_wr = NRECMEMB_IS_WR(value); |
441 | cas = NRECMEMB_CAS(value); | 460 | cas = NRECMEMB_CAS(value); |
442 | ras = NRECMEMB_RAS(value); | 461 | ras = NRECMEMB_RAS(value); |
443 | 462 | ||
444 | /* Clean the error register */ | ||
445 | pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
446 | FERR_FAT_FBD, error_reg); | ||
447 | |||
448 | snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, | 463 | snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, |
449 | "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))", | 464 | "FATAL (Branch=%d DRAM-Bank=%d %s " |
450 | bank, ras, cas, errors, specific); | 465 | "RAS=%d CAS=%d Err=0x%lx (%s))", |
451 | 466 | branch, bank, | |
452 | edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0, | 467 | is_wr ? "RDWR" : "RD", |
453 | branch, -1, rank, | 468 | ras, cas, |
454 | is_wr ? "Write error" : "Read error", | 469 | errors, specific); |
455 | pvt->tmp_prt_buffer); | 470 | |
456 | 471 | /* Call the helper to output message */ | |
472 | edac_mc_handle_fbd_ue(mci, rank, branch << 1, | ||
473 | (branch << 1) + 1, | ||
474 | pvt->tmp_prt_buffer); | ||
457 | } | 475 | } |
458 | 476 | ||
459 | /* read in the 1st NON-FATAL error register */ | 477 | /* read in the 1st NON-FATAL error register */ |
460 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | 478 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, |
461 | FERR_NF_FBD, &error_reg); | 479 | FERR_NF_FBD, &value); |
462 | if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) { | 480 | if (unlikely(value & FERR_NF_FBD_ERR_MASK)) { |
463 | errors = error_reg & FERR_NF_FBD_ERR_MASK; | 481 | errors = value & FERR_NF_FBD_ERR_MASK; |
464 | errnum = find_first_bit(&errors, | 482 | errnum = find_first_bit(&errors, |
465 | ARRAY_SIZE(ferr_nf_fbd_name)); | 483 | ARRAY_SIZE(ferr_nf_fbd_name)); |
466 | specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); | 484 | specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); |
467 | branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0; | 485 | |
486 | /* Clear the error bit */ | ||
487 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
488 | FERR_GLOBAL_LO, value); | ||
468 | 489 | ||
469 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | 490 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, |
470 | REDMEMA, &syndrome); | 491 | REDMEMA, &syndrome); |
471 | 492 | ||
493 | branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0; | ||
472 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, | 494 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, |
473 | RECMEMA, &val16); | 495 | RECMEMA, &val16); |
474 | bank = RECMEMA_BANK(val16); | 496 | bank = RECMEMA_BANK(val16); |
@@ -476,30 +498,37 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) | |||
476 | 498 | ||
477 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | 499 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, |
478 | RECMEMB, &value); | 500 | RECMEMB, &value); |
501 | |||
479 | is_wr = RECMEMB_IS_WR(value); | 502 | is_wr = RECMEMB_IS_WR(value); |
480 | cas = RECMEMB_CAS(value); | 503 | cas = RECMEMB_CAS(value); |
481 | ras = RECMEMB_RAS(value); | 504 | ras = RECMEMB_RAS(value); |
482 | 505 | ||
483 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | 506 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, |
484 | REDMEMB, &value); | 507 | REDMEMB, &value); |
508 | |||
485 | channel = (branch << 1); | 509 | channel = (branch << 1); |
486 | if (IS_SECOND_CH(value)) | 510 | if (IS_SECOND_CH(value)) |
487 | channel++; | 511 | channel++; |
488 | 512 | ||
489 | /* Clear the error bit */ | ||
490 | pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
491 | FERR_NF_FBD, error_reg); | ||
492 | |||
493 | /* Form out message */ | 513 | /* Form out message */ |
494 | snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, | 514 | snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, |
495 | "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))", | 515 | "Corrected error (Branch=%d, Channel %d), " |
496 | bank, ras, cas, errors, specific); | 516 | " DRAM-Bank=%d %s " |
497 | 517 | "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))", | |
498 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, | 518 | branch, channel, |
499 | syndrome, | 519 | bank, |
500 | branch >> 1, channel % 2, rank, | 520 | is_wr ? "RDWR" : "RD", |
501 | is_wr ? "Write error" : "Read error", | 521 | ras, cas, |
502 | pvt->tmp_prt_buffer); | 522 | errors, syndrome, specific); |
523 | |||
524 | /* | ||
525 | * Call the helper to output message | ||
526 | * NOTE: Errors are reported per-branch, and not per-channel | ||
527 | * Currently, we don't know how to identify the right | ||
528 | * channel. | ||
529 | */ | ||
530 | edac_mc_handle_fbd_ce(mci, rank, channel, | ||
531 | pvt->tmp_prt_buffer); | ||
503 | } | 532 | } |
504 | return; | 533 | return; |
505 | } | 534 | } |
@@ -587,7 +616,8 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci) | |||
587 | static int decode_mtr(struct i7300_pvt *pvt, | 616 | static int decode_mtr(struct i7300_pvt *pvt, |
588 | int slot, int ch, int branch, | 617 | int slot, int ch, int branch, |
589 | struct i7300_dimm_info *dinfo, | 618 | struct i7300_dimm_info *dinfo, |
590 | struct dimm_info *dimm) | 619 | struct csrow_info *p_csrow, |
620 | u32 *nr_pages) | ||
591 | { | 621 | { |
592 | int mtr, ans, addrBits, channel; | 622 | int mtr, ans, addrBits, channel; |
593 | 623 | ||
@@ -596,8 +626,9 @@ static int decode_mtr(struct i7300_pvt *pvt, | |||
596 | mtr = pvt->mtr[slot][branch]; | 626 | mtr = pvt->mtr[slot][branch]; |
597 | ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; | 627 | ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; |
598 | 628 | ||
599 | edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n", | 629 | debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n", |
600 | slot, channel, ans ? "" : "NOT "); | 630 | slot, channel, |
631 | ans ? "Present" : "NOT Present"); | ||
601 | 632 | ||
602 | /* Determine if there is a DIMM present in this DIMM slot */ | 633 | /* Determine if there is a DIMM present in this DIMM slot */ |
603 | if (!ans) | 634 | if (!ans) |
@@ -618,26 +649,23 @@ static int decode_mtr(struct i7300_pvt *pvt, | |||
618 | addrBits -= 3; /* 8 bits per bytes */ | 649 | addrBits -= 3; /* 8 bits per bytes */ |
619 | 650 | ||
620 | dinfo->megabytes = 1 << addrBits; | 651 | dinfo->megabytes = 1 << addrBits; |
652 | *nr_pages = dinfo->megabytes << 8; | ||
653 | |||
654 | debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); | ||
655 | |||
656 | debugf2("\t\tELECTRICAL THROTTLING is %s\n", | ||
657 | MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); | ||
658 | |||
659 | debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); | ||
660 | debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single"); | ||
661 | debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); | ||
662 | debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); | ||
663 | debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); | ||
621 | 664 | ||
622 | edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); | 665 | p_csrow->grain = 8; |
623 | 666 | p_csrow->mtype = MEM_FB_DDR2; | |
624 | edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n", | 667 | p_csrow->csrow_idx = slot; |
625 | MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); | 668 | p_csrow->page_mask = 0; |
626 | |||
627 | edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); | ||
628 | edac_dbg(2, "\t\tNUMRANK: %s\n", | ||
629 | MTR_DIMM_RANKS(mtr) ? "double" : "single"); | ||
630 | edac_dbg(2, "\t\tNUMROW: %s\n", | ||
631 | MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : | ||
632 | MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : | ||
633 | MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : | ||
634 | "65,536 - 16 rows"); | ||
635 | edac_dbg(2, "\t\tNUMCOL: %s\n", | ||
636 | MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : | ||
637 | MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : | ||
638 | MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : | ||
639 | "reserved"); | ||
640 | edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes); | ||
641 | 669 | ||
642 | /* | 670 | /* |
643 | * The type of error detection actually depends of the | 671 | * The type of error detection actually depends of the |
@@ -648,29 +676,26 @@ static int decode_mtr(struct i7300_pvt *pvt, | |||
648 | * See datasheet Sections 7.3.6 to 7.3.8 | 676 | * See datasheet Sections 7.3.6 to 7.3.8 |
649 | */ | 677 | */ |
650 | 678 | ||
651 | dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes); | ||
652 | dimm->grain = 8; | ||
653 | dimm->mtype = MEM_FB_DDR2; | ||
654 | if (IS_SINGLE_MODE(pvt->mc_settings_a)) { | 679 | if (IS_SINGLE_MODE(pvt->mc_settings_a)) { |
655 | dimm->edac_mode = EDAC_SECDED; | 680 | p_csrow->edac_mode = EDAC_SECDED; |
656 | edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); | 681 | debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); |
657 | } else { | 682 | } else { |
658 | edac_dbg(2, "\t\tECC code is on Lockstep mode\n"); | 683 | debugf2("\t\tECC code is on Lockstep mode\n"); |
659 | if (MTR_DRAM_WIDTH(mtr) == 8) | 684 | if (MTR_DRAM_WIDTH(mtr) == 8) |
660 | dimm->edac_mode = EDAC_S8ECD8ED; | 685 | p_csrow->edac_mode = EDAC_S8ECD8ED; |
661 | else | 686 | else |
662 | dimm->edac_mode = EDAC_S4ECD4ED; | 687 | p_csrow->edac_mode = EDAC_S4ECD4ED; |
663 | } | 688 | } |
664 | 689 | ||
665 | /* ask what device type on this row */ | 690 | /* ask what device type on this row */ |
666 | if (MTR_DRAM_WIDTH(mtr) == 8) { | 691 | if (MTR_DRAM_WIDTH(mtr) == 8) { |
667 | edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n", | 692 | debugf2("\t\tScrub algorithm for x8 is on %s mode\n", |
668 | IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? | 693 | IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? |
669 | "enhanced" : "normal"); | 694 | "enhanced" : "normal"); |
670 | 695 | ||
671 | dimm->dtype = DEV_X8; | 696 | p_csrow->dtype = DEV_X8; |
672 | } else | 697 | } else |
673 | dimm->dtype = DEV_X4; | 698 | p_csrow->dtype = DEV_X4; |
674 | 699 | ||
675 | return mtr; | 700 | return mtr; |
676 | } | 701 | } |
@@ -700,14 +725,14 @@ static void print_dimm_size(struct i7300_pvt *pvt) | |||
700 | p += n; | 725 | p += n; |
701 | space -= n; | 726 | space -= n; |
702 | } | 727 | } |
703 | edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); | 728 | debugf2("%s\n", pvt->tmp_prt_buffer); |
704 | p = pvt->tmp_prt_buffer; | 729 | p = pvt->tmp_prt_buffer; |
705 | space = PAGE_SIZE; | 730 | space = PAGE_SIZE; |
706 | n = snprintf(p, space, "-------------------------------" | 731 | n = snprintf(p, space, "-------------------------------" |
707 | "------------------------------"); | 732 | "------------------------------"); |
708 | p += n; | 733 | p += n; |
709 | space -= n; | 734 | space -= n; |
710 | edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); | 735 | debugf2("%s\n", pvt->tmp_prt_buffer); |
711 | p = pvt->tmp_prt_buffer; | 736 | p = pvt->tmp_prt_buffer; |
712 | space = PAGE_SIZE; | 737 | space = PAGE_SIZE; |
713 | 738 | ||
@@ -723,7 +748,7 @@ static void print_dimm_size(struct i7300_pvt *pvt) | |||
723 | space -= n; | 748 | space -= n; |
724 | } | 749 | } |
725 | 750 | ||
726 | edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); | 751 | debugf2("%s\n", pvt->tmp_prt_buffer); |
727 | p = pvt->tmp_prt_buffer; | 752 | p = pvt->tmp_prt_buffer; |
728 | space = PAGE_SIZE; | 753 | space = PAGE_SIZE; |
729 | } | 754 | } |
@@ -732,7 +757,7 @@ static void print_dimm_size(struct i7300_pvt *pvt) | |||
732 | "------------------------------"); | 757 | "------------------------------"); |
733 | p += n; | 758 | p += n; |
734 | space -= n; | 759 | space -= n; |
735 | edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); | 760 | debugf2("%s\n", pvt->tmp_prt_buffer); |
736 | p = pvt->tmp_prt_buffer; | 761 | p = pvt->tmp_prt_buffer; |
737 | space = PAGE_SIZE; | 762 | space = PAGE_SIZE; |
738 | #endif | 763 | #endif |
@@ -748,14 +773,15 @@ static int i7300_init_csrows(struct mem_ctl_info *mci) | |||
748 | { | 773 | { |
749 | struct i7300_pvt *pvt; | 774 | struct i7300_pvt *pvt; |
750 | struct i7300_dimm_info *dinfo; | 775 | struct i7300_dimm_info *dinfo; |
776 | struct csrow_info *p_csrow; | ||
751 | int rc = -ENODEV; | 777 | int rc = -ENODEV; |
752 | int mtr; | 778 | int mtr; |
753 | int ch, branch, slot, channel; | 779 | int ch, branch, slot, channel; |
754 | struct dimm_info *dimm; | 780 | u32 last_page = 0, nr_pages; |
755 | 781 | ||
756 | pvt = mci->pvt_info; | 782 | pvt = mci->pvt_info; |
757 | 783 | ||
758 | edac_dbg(2, "Memory Technology Registers:\n"); | 784 | debugf2("Memory Technology Registers:\n"); |
759 | 785 | ||
760 | /* Get the AMB present registers for the four channels */ | 786 | /* Get the AMB present registers for the four channels */ |
761 | for (branch = 0; branch < MAX_BRANCHES; branch++) { | 787 | for (branch = 0; branch < MAX_BRANCHES; branch++) { |
@@ -764,15 +790,15 @@ static int i7300_init_csrows(struct mem_ctl_info *mci) | |||
764 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], | 790 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], |
765 | AMBPRESENT_0, | 791 | AMBPRESENT_0, |
766 | &pvt->ambpresent[channel]); | 792 | &pvt->ambpresent[channel]); |
767 | edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", | 793 | debugf2("\t\tAMB-present CH%d = 0x%x:\n", |
768 | channel, pvt->ambpresent[channel]); | 794 | channel, pvt->ambpresent[channel]); |
769 | 795 | ||
770 | channel = to_channel(1, branch); | 796 | channel = to_channel(1, branch); |
771 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], | 797 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], |
772 | AMBPRESENT_1, | 798 | AMBPRESENT_1, |
773 | &pvt->ambpresent[channel]); | 799 | &pvt->ambpresent[channel]); |
774 | edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", | 800 | debugf2("\t\tAMB-present CH%d = 0x%x:\n", |
775 | channel, pvt->ambpresent[channel]); | 801 | channel, pvt->ambpresent[channel]); |
776 | } | 802 | } |
777 | 803 | ||
778 | /* Get the set of MTR[0-7] regs by each branch */ | 804 | /* Get the set of MTR[0-7] regs by each branch */ |
@@ -782,23 +808,25 @@ static int i7300_init_csrows(struct mem_ctl_info *mci) | |||
782 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], | 808 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], |
783 | where, | 809 | where, |
784 | &pvt->mtr[slot][branch]); | 810 | &pvt->mtr[slot][branch]); |
785 | for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) { | 811 | for (ch = 0; ch < MAX_BRANCHES; ch++) { |
786 | int channel = to_channel(ch, branch); | 812 | int channel = to_channel(ch, branch); |
787 | 813 | ||
788 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, | ||
789 | mci->n_layers, branch, ch, slot); | ||
790 | |||
791 | dinfo = &pvt->dimm_info[slot][channel]; | 814 | dinfo = &pvt->dimm_info[slot][channel]; |
815 | p_csrow = &mci->csrows[slot]; | ||
792 | 816 | ||
793 | mtr = decode_mtr(pvt, slot, ch, branch, | 817 | mtr = decode_mtr(pvt, slot, ch, branch, |
794 | dinfo, dimm); | 818 | dinfo, p_csrow, &nr_pages); |
795 | |||
796 | /* if no DIMMS on this row, continue */ | 819 | /* if no DIMMS on this row, continue */ |
797 | if (!MTR_DIMMS_PRESENT(mtr)) | 820 | if (!MTR_DIMMS_PRESENT(mtr)) |
798 | continue; | 821 | continue; |
799 | 822 | ||
800 | rc = 0; | 823 | /* Update per_csrow memory count */ |
824 | p_csrow->nr_pages += nr_pages; | ||
825 | p_csrow->first_page = last_page; | ||
826 | last_page += nr_pages; | ||
827 | p_csrow->last_page = last_page; | ||
801 | 828 | ||
829 | rc = 0; | ||
802 | } | 830 | } |
803 | } | 831 | } |
804 | } | 832 | } |
@@ -814,11 +842,12 @@ static int i7300_init_csrows(struct mem_ctl_info *mci) | |||
814 | static void decode_mir(int mir_no, u16 mir[MAX_MIR]) | 842 | static void decode_mir(int mir_no, u16 mir[MAX_MIR]) |
815 | { | 843 | { |
816 | if (mir[mir_no] & 3) | 844 | if (mir[mir_no] & 3) |
817 | edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n", | 845 | debugf2("MIR%d: limit= 0x%x Branch(es) that participate:" |
818 | mir_no, | 846 | " %s %s\n", |
819 | (mir[mir_no] >> 4) & 0xfff, | 847 | mir_no, |
820 | (mir[mir_no] & 1) ? "B0" : "", | 848 | (mir[mir_no] >> 4) & 0xfff, |
821 | (mir[mir_no] & 2) ? "B1" : ""); | 849 | (mir[mir_no] & 1) ? "B0" : "", |
850 | (mir[mir_no] & 2) ? "B1" : ""); | ||
822 | } | 851 | } |
823 | 852 | ||
824 | /** | 853 | /** |
@@ -838,17 +867,17 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci) | |||
838 | pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, | 867 | pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, |
839 | (u32 *) &pvt->ambase); | 868 | (u32 *) &pvt->ambase); |
840 | 869 | ||
841 | edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); | 870 | debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); |
842 | 871 | ||
843 | /* Get the Branch Map regs */ | 872 | /* Get the Branch Map regs */ |
844 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); | 873 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); |
845 | pvt->tolm >>= 12; | 874 | pvt->tolm >>= 12; |
846 | edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n", | 875 | debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, |
847 | pvt->tolm, pvt->tolm); | 876 | pvt->tolm); |
848 | 877 | ||
849 | actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); | 878 | actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); |
850 | edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n", | 879 | debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", |
851 | actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); | 880 | actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); |
852 | 881 | ||
853 | /* Get memory controller settings */ | 882 | /* Get memory controller settings */ |
854 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, | 883 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, |
@@ -857,15 +886,15 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci) | |||
857 | &pvt->mc_settings_a); | 886 | &pvt->mc_settings_a); |
858 | 887 | ||
859 | if (IS_SINGLE_MODE(pvt->mc_settings_a)) | 888 | if (IS_SINGLE_MODE(pvt->mc_settings_a)) |
860 | edac_dbg(0, "Memory controller operating on single mode\n"); | 889 | debugf0("Memory controller operating on single mode\n"); |
861 | else | 890 | else |
862 | edac_dbg(0, "Memory controller operating on %smirrored mode\n", | 891 | debugf0("Memory controller operating on %s mode\n", |
863 | IS_MIRRORED(pvt->mc_settings) ? "" : "non-"); | 892 | IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored"); |
864 | 893 | ||
865 | edac_dbg(0, "Error detection is %s\n", | 894 | debugf0("Error detection is %s\n", |
866 | IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); | 895 | IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); |
867 | edac_dbg(0, "Retry is %s\n", | 896 | debugf0("Retry is %s\n", |
868 | IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); | 897 | IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); |
869 | 898 | ||
870 | /* Get Memory Interleave Range registers */ | 899 | /* Get Memory Interleave Range registers */ |
871 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, | 900 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, |
@@ -923,7 +952,7 @@ static void i7300_put_devices(struct mem_ctl_info *mci) | |||
923 | * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 | 952 | * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 |
924 | * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 | 953 | * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 |
925 | */ | 954 | */ |
926 | static int i7300_get_devices(struct mem_ctl_info *mci) | 955 | static int __devinit i7300_get_devices(struct mem_ctl_info *mci) |
927 | { | 956 | { |
928 | struct i7300_pvt *pvt; | 957 | struct i7300_pvt *pvt; |
929 | struct pci_dev *pdev; | 958 | struct pci_dev *pdev; |
@@ -959,18 +988,18 @@ static int i7300_get_devices(struct mem_ctl_info *mci) | |||
959 | } | 988 | } |
960 | } | 989 | } |
961 | 990 | ||
962 | edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", | 991 | debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", |
963 | pci_name(pvt->pci_dev_16_0_fsb_ctlr), | 992 | pci_name(pvt->pci_dev_16_0_fsb_ctlr), |
964 | pvt->pci_dev_16_0_fsb_ctlr->vendor, | 993 | pvt->pci_dev_16_0_fsb_ctlr->vendor, |
965 | pvt->pci_dev_16_0_fsb_ctlr->device); | 994 | pvt->pci_dev_16_0_fsb_ctlr->device); |
966 | edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", | 995 | debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", |
967 | pci_name(pvt->pci_dev_16_1_fsb_addr_map), | 996 | pci_name(pvt->pci_dev_16_1_fsb_addr_map), |
968 | pvt->pci_dev_16_1_fsb_addr_map->vendor, | 997 | pvt->pci_dev_16_1_fsb_addr_map->vendor, |
969 | pvt->pci_dev_16_1_fsb_addr_map->device); | 998 | pvt->pci_dev_16_1_fsb_addr_map->device); |
970 | edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", | 999 | debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", |
971 | pci_name(pvt->pci_dev_16_2_fsb_err_regs), | 1000 | pci_name(pvt->pci_dev_16_2_fsb_err_regs), |
972 | pvt->pci_dev_16_2_fsb_err_regs->vendor, | 1001 | pvt->pci_dev_16_2_fsb_err_regs->vendor, |
973 | pvt->pci_dev_16_2_fsb_err_regs->device); | 1002 | pvt->pci_dev_16_2_fsb_err_regs->device); |
974 | 1003 | ||
975 | pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, | 1004 | pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, |
976 | PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, | 1005 | PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, |
@@ -1008,11 +1037,14 @@ error: | |||
1008 | * @pdev: struct pci_dev pointer | 1037 | * @pdev: struct pci_dev pointer |
1009 | * @id: struct pci_device_id pointer - currently unused | 1038 | * @id: struct pci_device_id pointer - currently unused |
1010 | */ | 1039 | */ |
1011 | static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 1040 | static int __devinit i7300_init_one(struct pci_dev *pdev, |
1041 | const struct pci_device_id *id) | ||
1012 | { | 1042 | { |
1013 | struct mem_ctl_info *mci; | 1043 | struct mem_ctl_info *mci; |
1014 | struct edac_mc_layer layers[3]; | ||
1015 | struct i7300_pvt *pvt; | 1044 | struct i7300_pvt *pvt; |
1045 | int num_channels; | ||
1046 | int num_dimms_per_channel; | ||
1047 | int num_csrows; | ||
1016 | int rc; | 1048 | int rc; |
1017 | 1049 | ||
1018 | /* wake up device */ | 1050 | /* wake up device */ |
@@ -1020,31 +1052,38 @@ static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1020 | if (rc == -EIO) | 1052 | if (rc == -EIO) |
1021 | return rc; | 1053 | return rc; |
1022 | 1054 | ||
1023 | edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", | 1055 | debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", |
1024 | pdev->bus->number, | 1056 | __func__, |
1025 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | 1057 | pdev->bus->number, |
1058 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | ||
1026 | 1059 | ||
1027 | /* We only are looking for func 0 of the set */ | 1060 | /* We only are looking for func 0 of the set */ |
1028 | if (PCI_FUNC(pdev->devfn) != 0) | 1061 | if (PCI_FUNC(pdev->devfn) != 0) |
1029 | return -ENODEV; | 1062 | return -ENODEV; |
1030 | 1063 | ||
1064 | /* As we don't have a motherboard identification routine to determine | ||
1065 | * actual number of slots/dimms per channel, we thus utilize the | ||
1066 | * resource as specified by the chipset. Thus, we might have | ||
1067 | * have more DIMMs per channel than actually on the mobo, but this | ||
1068 | * allows the driver to support up to the chipset max, without | ||
1069 | * some fancy mobo determination. | ||
1070 | */ | ||
1071 | num_dimms_per_channel = MAX_SLOTS; | ||
1072 | num_channels = MAX_CHANNELS; | ||
1073 | num_csrows = MAX_SLOTS * MAX_CHANNELS; | ||
1074 | |||
1075 | debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", | ||
1076 | __func__, num_channels, num_dimms_per_channel, num_csrows); | ||
1077 | |||
1031 | /* allocate a new MC control structure */ | 1078 | /* allocate a new MC control structure */ |
1032 | layers[0].type = EDAC_MC_LAYER_BRANCH; | 1079 | mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); |
1033 | layers[0].size = MAX_BRANCHES; | 1080 | |
1034 | layers[0].is_virt_csrow = false; | ||
1035 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
1036 | layers[1].size = MAX_CH_PER_BRANCH; | ||
1037 | layers[1].is_virt_csrow = true; | ||
1038 | layers[2].type = EDAC_MC_LAYER_SLOT; | ||
1039 | layers[2].size = MAX_SLOTS; | ||
1040 | layers[2].is_virt_csrow = true; | ||
1041 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | ||
1042 | if (mci == NULL) | 1081 | if (mci == NULL) |
1043 | return -ENOMEM; | 1082 | return -ENOMEM; |
1044 | 1083 | ||
1045 | edac_dbg(0, "MC: mci = %p\n", mci); | 1084 | debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); |
1046 | 1085 | ||
1047 | mci->pdev = &pdev->dev; /* record ptr to the generic device */ | 1086 | mci->dev = &pdev->dev; /* record ptr to the generic device */ |
1048 | 1087 | ||
1049 | pvt = mci->pvt_info; | 1088 | pvt = mci->pvt_info; |
1050 | pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ | 1089 | pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ |
@@ -1075,16 +1114,19 @@ static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1075 | /* initialize the MC control structure 'csrows' table | 1114 | /* initialize the MC control structure 'csrows' table |
1076 | * with the mapping and control information */ | 1115 | * with the mapping and control information */ |
1077 | if (i7300_get_mc_regs(mci)) { | 1116 | if (i7300_get_mc_regs(mci)) { |
1078 | edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n"); | 1117 | debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" |
1118 | " because i7300_init_csrows() returned nonzero " | ||
1119 | "value\n"); | ||
1079 | mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ | 1120 | mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ |
1080 | } else { | 1121 | } else { |
1081 | edac_dbg(1, "MC: Enable error reporting now\n"); | 1122 | debugf1("MC: Enable error reporting now\n"); |
1082 | i7300_enable_error_reporting(mci); | 1123 | i7300_enable_error_reporting(mci); |
1083 | } | 1124 | } |
1084 | 1125 | ||
1085 | /* add this new MC control structure to EDAC's list of MCs */ | 1126 | /* add this new MC control structure to EDAC's list of MCs */ |
1086 | if (edac_mc_add_mc(mci)) { | 1127 | if (edac_mc_add_mc(mci)) { |
1087 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | 1128 | debugf0("MC: " __FILE__ |
1129 | ": %s(): failed edac_mc_add_mc()\n", __func__); | ||
1088 | /* FIXME: perhaps some code should go here that disables error | 1130 | /* FIXME: perhaps some code should go here that disables error |
1089 | * reporting if we just enabled it | 1131 | * reporting if we just enabled it |
1090 | */ | 1132 | */ |
@@ -1121,12 +1163,12 @@ fail0: | |||
1121 | * i7300_remove_one() - Remove the driver | 1163 | * i7300_remove_one() - Remove the driver |
1122 | * @pdev: struct pci_dev pointer | 1164 | * @pdev: struct pci_dev pointer |
1123 | */ | 1165 | */ |
1124 | static void i7300_remove_one(struct pci_dev *pdev) | 1166 | static void __devexit i7300_remove_one(struct pci_dev *pdev) |
1125 | { | 1167 | { |
1126 | struct mem_ctl_info *mci; | 1168 | struct mem_ctl_info *mci; |
1127 | char *tmp; | 1169 | char *tmp; |
1128 | 1170 | ||
1129 | edac_dbg(0, "\n"); | 1171 | debugf0(__FILE__ ": %s()\n", __func__); |
1130 | 1172 | ||
1131 | if (i7300_pci) | 1173 | if (i7300_pci) |
1132 | edac_pci_release_generic_ctl(i7300_pci); | 1174 | edac_pci_release_generic_ctl(i7300_pci); |
@@ -1149,7 +1191,7 @@ static void i7300_remove_one(struct pci_dev *pdev) | |||
1149 | * | 1191 | * |
1150 | * Has only 8086:360c PCI ID | 1192 | * Has only 8086:360c PCI ID |
1151 | */ | 1193 | */ |
1152 | static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = { | 1194 | static const struct pci_device_id i7300_pci_tbl[] __devinitdata = { |
1153 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, | 1195 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, |
1154 | {0,} /* 0 terminated list. */ | 1196 | {0,} /* 0 terminated list. */ |
1155 | }; | 1197 | }; |
@@ -1162,7 +1204,7 @@ MODULE_DEVICE_TABLE(pci, i7300_pci_tbl); | |||
1162 | static struct pci_driver i7300_driver = { | 1204 | static struct pci_driver i7300_driver = { |
1163 | .name = "i7300_edac", | 1205 | .name = "i7300_edac", |
1164 | .probe = i7300_init_one, | 1206 | .probe = i7300_init_one, |
1165 | .remove = i7300_remove_one, | 1207 | .remove = __devexit_p(i7300_remove_one), |
1166 | .id_table = i7300_pci_tbl, | 1208 | .id_table = i7300_pci_tbl, |
1167 | }; | 1209 | }; |
1168 | 1210 | ||
@@ -1173,7 +1215,7 @@ static int __init i7300_init(void) | |||
1173 | { | 1215 | { |
1174 | int pci_rc; | 1216 | int pci_rc; |
1175 | 1217 | ||
1176 | edac_dbg(2, "\n"); | 1218 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
1177 | 1219 | ||
1178 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 1220 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
1179 | opstate_init(); | 1221 | opstate_init(); |
@@ -1188,7 +1230,7 @@ static int __init i7300_init(void) | |||
1188 | */ | 1230 | */ |
1189 | static void __exit i7300_exit(void) | 1231 | static void __exit i7300_exit(void) |
1190 | { | 1232 | { |
1191 | edac_dbg(2, "\n"); | 1233 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
1192 | pci_unregister_driver(&i7300_driver); | 1234 | pci_unregister_driver(&i7300_driver); |
1193 | } | 1235 | } |
1194 | 1236 | ||
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index e213d030b0d..f6cf448d69b 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -31,13 +31,11 @@ | |||
31 | #include <linux/pci_ids.h> | 31 | #include <linux/pci_ids.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/dmi.h> | ||
35 | #include <linux/edac.h> | 34 | #include <linux/edac.h> |
36 | #include <linux/mmzone.h> | 35 | #include <linux/mmzone.h> |
36 | #include <linux/edac_mce.h> | ||
37 | #include <linux/smp.h> | 37 | #include <linux/smp.h> |
38 | #include <asm/mce.h> | ||
39 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
40 | #include <asm/div64.h> | ||
41 | 39 | ||
42 | #include "edac_core.h" | 40 | #include "edac_core.h" |
43 | 41 | ||
@@ -80,8 +78,6 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); | |||
80 | /* OFFSETS for Device 0 Function 0 */ | 78 | /* OFFSETS for Device 0 Function 0 */ |
81 | 79 | ||
82 | #define MC_CFG_CONTROL 0x90 | 80 | #define MC_CFG_CONTROL 0x90 |
83 | #define MC_CFG_UNLOCK 0x02 | ||
84 | #define MC_CFG_LOCK 0x00 | ||
85 | 81 | ||
86 | /* OFFSETS for Device 3 Function 0 */ | 82 | /* OFFSETS for Device 3 Function 0 */ |
87 | 83 | ||
@@ -90,7 +86,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); | |||
90 | #define MC_MAX_DOD 0x64 | 86 | #define MC_MAX_DOD 0x64 |
91 | 87 | ||
92 | /* | 88 | /* |
93 | * OFFSETS for Device 3 Function 4, as indicated on Xeon 5500 datasheet: | 89 | * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet: |
94 | * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf | 90 | * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf |
95 | */ | 91 | */ |
96 | 92 | ||
@@ -101,16 +97,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); | |||
101 | #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff) | 97 | #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff) |
102 | #define DIMM0_COR_ERR(r) ((r) & 0x7fff) | 98 | #define DIMM0_COR_ERR(r) ((r) & 0x7fff) |
103 | 99 | ||
104 | /* OFFSETS for Device 3 Function 2, as indicated on Xeon 5500 datasheet */ | 100 | /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */ |
105 | #define MC_SSRCONTROL 0x48 | ||
106 | #define SSR_MODE_DISABLE 0x00 | ||
107 | #define SSR_MODE_ENABLE 0x01 | ||
108 | #define SSR_MODE_MASK 0x03 | ||
109 | |||
110 | #define MC_SCRUB_CONTROL 0x4c | ||
111 | #define STARTSCRUB (1 << 24) | ||
112 | #define SCRUBINTERVAL_MASK 0xffffff | ||
113 | |||
114 | #define MC_COR_ECC_CNT_0 0x80 | 101 | #define MC_COR_ECC_CNT_0 0x80 |
115 | #define MC_COR_ECC_CNT_1 0x84 | 102 | #define MC_COR_ECC_CNT_1 0x84 |
116 | #define MC_COR_ECC_CNT_2 0x88 | 103 | #define MC_COR_ECC_CNT_2 0x88 |
@@ -221,9 +208,7 @@ struct i7core_inject { | |||
221 | }; | 208 | }; |
222 | 209 | ||
223 | struct i7core_channel { | 210 | struct i7core_channel { |
224 | bool is_3dimms_present; | 211 | u32 ranks; |
225 | bool is_single_4rank; | ||
226 | bool has_4rank; | ||
227 | u32 dimms; | 212 | u32 dimms; |
228 | }; | 213 | }; |
229 | 214 | ||
@@ -248,8 +233,6 @@ struct i7core_dev { | |||
248 | }; | 233 | }; |
249 | 234 | ||
250 | struct i7core_pvt { | 235 | struct i7core_pvt { |
251 | struct device *addrmatch_dev, *chancounts_dev; | ||
252 | |||
253 | struct pci_dev *pci_noncore; | 236 | struct pci_dev *pci_noncore; |
254 | struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; | 237 | struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; |
255 | struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; | 238 | struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; |
@@ -261,6 +244,7 @@ struct i7core_pvt { | |||
261 | struct i7core_channel channel[NUM_CHANS]; | 244 | struct i7core_channel channel[NUM_CHANS]; |
262 | 245 | ||
263 | int ce_count_available; | 246 | int ce_count_available; |
247 | int csrow_map[NUM_CHANS][MAX_DIMMS]; | ||
264 | 248 | ||
265 | /* ECC corrected errors counts per udimm */ | 249 | /* ECC corrected errors counts per udimm */ |
266 | unsigned long udimm_ce_count[MAX_DIMMS]; | 250 | unsigned long udimm_ce_count[MAX_DIMMS]; |
@@ -269,7 +253,10 @@ struct i7core_pvt { | |||
269 | unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS]; | 253 | unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS]; |
270 | int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS]; | 254 | int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS]; |
271 | 255 | ||
272 | bool is_registered, enable_scrub; | 256 | unsigned int is_registered; |
257 | |||
258 | /* mcelog glue */ | ||
259 | struct edac_mce edac_mce; | ||
273 | 260 | ||
274 | /* Fifo double buffers */ | 261 | /* Fifo double buffers */ |
275 | struct mce mce_entry[MCE_LOG_LEN]; | 262 | struct mce mce_entry[MCE_LOG_LEN]; |
@@ -281,9 +268,6 @@ struct i7core_pvt { | |||
281 | /* Count indicator to show errors not got */ | 268 | /* Count indicator to show errors not got */ |
282 | unsigned mce_overrun; | 269 | unsigned mce_overrun; |
283 | 270 | ||
284 | /* DCLK Frequency used for computing scrub rate */ | ||
285 | int dclk_freq; | ||
286 | |||
287 | /* Struct to control EDAC polling */ | 271 | /* Struct to control EDAC polling */ |
288 | struct edac_pci_ctl_info *i7core_pci; | 272 | struct edac_pci_ctl_info *i7core_pci; |
289 | }; | 273 | }; |
@@ -297,7 +281,8 @@ static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { | |||
297 | /* Memory controller */ | 281 | /* Memory controller */ |
298 | { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, | 282 | { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, |
299 | { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, | 283 | { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, |
300 | /* Exists only for RDIMM */ | 284 | |
285 | /* Exists only for RDIMM */ | ||
301 | { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, | 286 | { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, |
302 | { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, | 287 | { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, |
303 | 288 | ||
@@ -318,16 +303,6 @@ static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { | |||
318 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, | 303 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, |
319 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, | 304 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, |
320 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, | 305 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, |
321 | |||
322 | /* Generic Non-core registers */ | ||
323 | /* | ||
324 | * This is the PCI device on i7core and on Xeon 35xx (8086:2c41) | ||
325 | * On Xeon 55xx, however, it has a different id (8086:2c40). So, | ||
326 | * the probing code needs to test for the other address in case of | ||
327 | * failure of this one | ||
328 | */ | ||
329 | { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) }, | ||
330 | |||
331 | }; | 306 | }; |
332 | 307 | ||
333 | static const struct pci_id_descr pci_dev_descr_lynnfield[] = { | 308 | static const struct pci_id_descr pci_dev_descr_lynnfield[] = { |
@@ -344,12 +319,6 @@ static const struct pci_id_descr pci_dev_descr_lynnfield[] = { | |||
344 | { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, | 319 | { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, |
345 | { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, | 320 | { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, |
346 | { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, | 321 | { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, |
347 | |||
348 | /* | ||
349 | * This is the PCI device has an alternate address on some | ||
350 | * processors like Core i7 860 | ||
351 | */ | ||
352 | { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, | ||
353 | }; | 322 | }; |
354 | 323 | ||
355 | static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { | 324 | static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { |
@@ -377,10 +346,6 @@ static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { | |||
377 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, | 346 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, |
378 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, | 347 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, |
379 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, | 348 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, |
380 | |||
381 | /* Generic Non-core registers */ | ||
382 | { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) }, | ||
383 | |||
384 | }; | 349 | }; |
385 | 350 | ||
386 | #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } | 351 | #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } |
@@ -394,14 +359,14 @@ static const struct pci_id_table pci_dev_table[] = { | |||
394 | /* | 359 | /* |
395 | * pci_device_id table for which devices we are looking for | 360 | * pci_device_id table for which devices we are looking for |
396 | */ | 361 | */ |
397 | static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = { | 362 | static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { |
398 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, | 363 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, |
399 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, | 364 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, |
400 | {0,} /* 0 terminated list. */ | 365 | {0,} /* 0 terminated list. */ |
401 | }; | 366 | }; |
402 | 367 | ||
403 | /**************************************************************************** | 368 | /**************************************************************************** |
404 | Ancillary status routines | 369 | Anciliary status routines |
405 | ****************************************************************************/ | 370 | ****************************************************************************/ |
406 | 371 | ||
407 | /* MC_CONTROL bits */ | 372 | /* MC_CONTROL bits */ |
@@ -495,15 +460,116 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev) | |||
495 | /**************************************************************************** | 460 | /**************************************************************************** |
496 | Memory check routines | 461 | Memory check routines |
497 | ****************************************************************************/ | 462 | ****************************************************************************/ |
463 | static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot, | ||
464 | unsigned func) | ||
465 | { | ||
466 | struct i7core_dev *i7core_dev = get_i7core_dev(socket); | ||
467 | int i; | ||
498 | 468 | ||
499 | static int get_dimm_config(struct mem_ctl_info *mci) | 469 | if (!i7core_dev) |
470 | return NULL; | ||
471 | |||
472 | for (i = 0; i < i7core_dev->n_devs; i++) { | ||
473 | if (!i7core_dev->pdev[i]) | ||
474 | continue; | ||
475 | |||
476 | if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot && | ||
477 | PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) { | ||
478 | return i7core_dev->pdev[i]; | ||
479 | } | ||
480 | } | ||
481 | |||
482 | return NULL; | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * i7core_get_active_channels() - gets the number of channels and csrows | ||
487 | * @socket: Quick Path Interconnect socket | ||
488 | * @channels: Number of channels that will be returned | ||
489 | * @csrows: Number of csrows found | ||
490 | * | ||
491 | * Since EDAC core needs to know in advance the number of available channels | ||
492 | * and csrows, in order to allocate memory for csrows/channels, it is needed | ||
493 | * to run two similar steps. At the first step, implemented on this function, | ||
494 | * it checks the number of csrows/channels present at one socket. | ||
495 | * this is used in order to properly allocate the size of mci components. | ||
496 | * | ||
497 | * It should be noticed that none of the current available datasheets explain | ||
498 | * or even mention how csrows are seen by the memory controller. So, we need | ||
499 | * to add a fake description for csrows. | ||
500 | * So, this driver is attributing one DIMM memory for one csrow. | ||
501 | */ | ||
502 | static int i7core_get_active_channels(const u8 socket, unsigned *channels, | ||
503 | unsigned *csrows) | ||
504 | { | ||
505 | struct pci_dev *pdev = NULL; | ||
506 | int i, j; | ||
507 | u32 status, control; | ||
508 | |||
509 | *channels = 0; | ||
510 | *csrows = 0; | ||
511 | |||
512 | pdev = get_pdev_slot_func(socket, 3, 0); | ||
513 | if (!pdev) { | ||
514 | i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n", | ||
515 | socket); | ||
516 | return -ENODEV; | ||
517 | } | ||
518 | |||
519 | /* Device 3 function 0 reads */ | ||
520 | pci_read_config_dword(pdev, MC_STATUS, &status); | ||
521 | pci_read_config_dword(pdev, MC_CONTROL, &control); | ||
522 | |||
523 | for (i = 0; i < NUM_CHANS; i++) { | ||
524 | u32 dimm_dod[3]; | ||
525 | /* Check if the channel is active */ | ||
526 | if (!(control & (1 << (8 + i)))) | ||
527 | continue; | ||
528 | |||
529 | /* Check if the channel is disabled */ | ||
530 | if (status & (1 << i)) | ||
531 | continue; | ||
532 | |||
533 | pdev = get_pdev_slot_func(socket, i + 4, 1); | ||
534 | if (!pdev) { | ||
535 | i7core_printk(KERN_ERR, "Couldn't find socket %d " | ||
536 | "fn %d.%d!!!\n", | ||
537 | socket, i + 4, 1); | ||
538 | return -ENODEV; | ||
539 | } | ||
540 | /* Devices 4-6 function 1 */ | ||
541 | pci_read_config_dword(pdev, | ||
542 | MC_DOD_CH_DIMM0, &dimm_dod[0]); | ||
543 | pci_read_config_dword(pdev, | ||
544 | MC_DOD_CH_DIMM1, &dimm_dod[1]); | ||
545 | pci_read_config_dword(pdev, | ||
546 | MC_DOD_CH_DIMM2, &dimm_dod[2]); | ||
547 | |||
548 | (*channels)++; | ||
549 | |||
550 | for (j = 0; j < 3; j++) { | ||
551 | if (!DIMM_PRESENT(dimm_dod[j])) | ||
552 | continue; | ||
553 | (*csrows)++; | ||
554 | } | ||
555 | } | ||
556 | |||
557 | debugf0("Number of active channels on socket %d: %d\n", | ||
558 | socket, *channels); | ||
559 | |||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | static int get_dimm_config(const struct mem_ctl_info *mci) | ||
500 | { | 564 | { |
501 | struct i7core_pvt *pvt = mci->pvt_info; | 565 | struct i7core_pvt *pvt = mci->pvt_info; |
566 | struct csrow_info *csr; | ||
502 | struct pci_dev *pdev; | 567 | struct pci_dev *pdev; |
503 | int i, j; | 568 | int i, j; |
569 | int csrow = 0; | ||
570 | unsigned long last_page = 0; | ||
504 | enum edac_type mode; | 571 | enum edac_type mode; |
505 | enum mem_type mtype; | 572 | enum mem_type mtype; |
506 | struct dimm_info *dimm; | ||
507 | 573 | ||
508 | /* Get data from the MC register, function 0 */ | 574 | /* Get data from the MC register, function 0 */ |
509 | pdev = pvt->pci_mcr[0]; | 575 | pdev = pvt->pci_mcr[0]; |
@@ -516,28 +582,29 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
516 | pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); | 582 | pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); |
517 | pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); | 583 | pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); |
518 | 584 | ||
519 | edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", | 585 | debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", |
520 | pvt->i7core_dev->socket, pvt->info.mc_control, | 586 | pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, |
521 | pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map); | 587 | pvt->info.max_dod, pvt->info.ch_map); |
522 | 588 | ||
523 | if (ECC_ENABLED(pvt)) { | 589 | if (ECC_ENABLED(pvt)) { |
524 | edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); | 590 | debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); |
525 | if (ECCx8(pvt)) | 591 | if (ECCx8(pvt)) |
526 | mode = EDAC_S8ECD8ED; | 592 | mode = EDAC_S8ECD8ED; |
527 | else | 593 | else |
528 | mode = EDAC_S4ECD4ED; | 594 | mode = EDAC_S4ECD4ED; |
529 | } else { | 595 | } else { |
530 | edac_dbg(0, "ECC disabled\n"); | 596 | debugf0("ECC disabled\n"); |
531 | mode = EDAC_NONE; | 597 | mode = EDAC_NONE; |
532 | } | 598 | } |
533 | 599 | ||
534 | /* FIXME: need to handle the error codes */ | 600 | /* FIXME: need to handle the error codes */ |
535 | edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n", | 601 | debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked " |
536 | numdimms(pvt->info.max_dod), | 602 | "x%x x 0x%x\n", |
537 | numrank(pvt->info.max_dod >> 2), | 603 | numdimms(pvt->info.max_dod), |
538 | numbank(pvt->info.max_dod >> 4), | 604 | numrank(pvt->info.max_dod >> 2), |
539 | numrow(pvt->info.max_dod >> 6), | 605 | numbank(pvt->info.max_dod >> 4), |
540 | numcol(pvt->info.max_dod >> 9)); | 606 | numrow(pvt->info.max_dod >> 6), |
607 | numcol(pvt->info.max_dod >> 9)); | ||
541 | 608 | ||
542 | for (i = 0; i < NUM_CHANS; i++) { | 609 | for (i = 0; i < NUM_CHANS; i++) { |
543 | u32 data, dimm_dod[3], value[8]; | 610 | u32 data, dimm_dod[3], value[8]; |
@@ -546,11 +613,11 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
546 | continue; | 613 | continue; |
547 | 614 | ||
548 | if (!CH_ACTIVE(pvt, i)) { | 615 | if (!CH_ACTIVE(pvt, i)) { |
549 | edac_dbg(0, "Channel %i is not active\n", i); | 616 | debugf0("Channel %i is not active\n", i); |
550 | continue; | 617 | continue; |
551 | } | 618 | } |
552 | if (CH_DISABLED(pvt, i)) { | 619 | if (CH_DISABLED(pvt, i)) { |
553 | edac_dbg(0, "Channel %i is disabled\n", i); | 620 | debugf0("Channel %i is disabled\n", i); |
554 | continue; | 621 | continue; |
555 | } | 622 | } |
556 | 623 | ||
@@ -558,20 +625,21 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
558 | pci_read_config_dword(pvt->pci_ch[i][0], | 625 | pci_read_config_dword(pvt->pci_ch[i][0], |
559 | MC_CHANNEL_DIMM_INIT_PARAMS, &data); | 626 | MC_CHANNEL_DIMM_INIT_PARAMS, &data); |
560 | 627 | ||
561 | 628 | pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ? | |
562 | if (data & THREE_DIMMS_PRESENT) | 629 | 4 : 2; |
563 | pvt->channel[i].is_3dimms_present = true; | ||
564 | |||
565 | if (data & SINGLE_QUAD_RANK_PRESENT) | ||
566 | pvt->channel[i].is_single_4rank = true; | ||
567 | |||
568 | if (data & QUAD_RANK_PRESENT) | ||
569 | pvt->channel[i].has_4rank = true; | ||
570 | 630 | ||
571 | if (data & REGISTERED_DIMM) | 631 | if (data & REGISTERED_DIMM) |
572 | mtype = MEM_RDDR3; | 632 | mtype = MEM_RDDR3; |
573 | else | 633 | else |
574 | mtype = MEM_DDR3; | 634 | mtype = MEM_DDR3; |
635 | #if 0 | ||
636 | if (data & THREE_DIMMS_PRESENT) | ||
637 | pvt->channel[i].dimms = 3; | ||
638 | else if (data & SINGLE_QUAD_RANK_PRESENT) | ||
639 | pvt->channel[i].dimms = 1; | ||
640 | else | ||
641 | pvt->channel[i].dimms = 2; | ||
642 | #endif | ||
575 | 643 | ||
576 | /* Devices 4-6 function 1 */ | 644 | /* Devices 4-6 function 1 */ |
577 | pci_read_config_dword(pvt->pci_ch[i][1], | 645 | pci_read_config_dword(pvt->pci_ch[i][1], |
@@ -581,14 +649,13 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
581 | pci_read_config_dword(pvt->pci_ch[i][1], | 649 | pci_read_config_dword(pvt->pci_ch[i][1], |
582 | MC_DOD_CH_DIMM2, &dimm_dod[2]); | 650 | MC_DOD_CH_DIMM2, &dimm_dod[2]); |
583 | 651 | ||
584 | edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n", | 652 | debugf0("Ch%d phy rd%d, wr%d (0x%08x): " |
585 | i, | 653 | "%d ranks, %cDIMMs\n", |
586 | RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), | 654 | i, |
587 | data, | 655 | RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), |
588 | pvt->channel[i].is_3dimms_present ? "3DIMMS " : "", | 656 | data, |
589 | pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "", | 657 | pvt->channel[i].ranks, |
590 | pvt->channel[i].has_4rank ? "HAS_4R " : "", | 658 | (data & REGISTERED_DIMM) ? 'R' : 'U'); |
591 | (data & REGISTERED_DIMM) ? 'R' : 'U'); | ||
592 | 659 | ||
593 | for (j = 0; j < 3; j++) { | 660 | for (j = 0; j < 3; j++) { |
594 | u32 banks, ranks, rows, cols; | 661 | u32 banks, ranks, rows, cols; |
@@ -597,8 +664,6 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
597 | if (!DIMM_PRESENT(dimm_dod[j])) | 664 | if (!DIMM_PRESENT(dimm_dod[j])) |
598 | continue; | 665 | continue; |
599 | 666 | ||
600 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, | ||
601 | i, j, 0); | ||
602 | banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); | 667 | banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); |
603 | ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); | 668 | ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); |
604 | rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); | 669 | rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); |
@@ -607,35 +672,50 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
607 | /* DDR3 has 8 I/O banks */ | 672 | /* DDR3 has 8 I/O banks */ |
608 | size = (rows * cols * banks * ranks) >> (20 - 3); | 673 | size = (rows * cols * banks * ranks) >> (20 - 3); |
609 | 674 | ||
610 | edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n", | 675 | pvt->channel[i].dimms++; |
611 | j, size, | 676 | |
612 | RANKOFFSET(dimm_dod[j]), | 677 | debugf0("\tdimm %d %d Mb offset: %x, " |
613 | banks, ranks, rows, cols); | 678 | "bank: %d, rank: %d, row: %#x, col: %#x\n", |
679 | j, size, | ||
680 | RANKOFFSET(dimm_dod[j]), | ||
681 | banks, ranks, rows, cols); | ||
614 | 682 | ||
615 | npages = MiB_TO_PAGES(size); | 683 | npages = MiB_TO_PAGES(size); |
616 | 684 | ||
617 | dimm->nr_pages = npages; | 685 | csr = &mci->csrows[csrow]; |
686 | csr->first_page = last_page + 1; | ||
687 | last_page += npages; | ||
688 | csr->last_page = last_page; | ||
689 | csr->nr_pages = npages; | ||
690 | |||
691 | csr->page_mask = 0; | ||
692 | csr->grain = 8; | ||
693 | csr->csrow_idx = csrow; | ||
694 | csr->nr_channels = 1; | ||
695 | |||
696 | csr->channels[0].chan_idx = i; | ||
697 | csr->channels[0].ce_count = 0; | ||
698 | |||
699 | pvt->csrow_map[i][j] = csrow; | ||
618 | 700 | ||
619 | switch (banks) { | 701 | switch (banks) { |
620 | case 4: | 702 | case 4: |
621 | dimm->dtype = DEV_X4; | 703 | csr->dtype = DEV_X4; |
622 | break; | 704 | break; |
623 | case 8: | 705 | case 8: |
624 | dimm->dtype = DEV_X8; | 706 | csr->dtype = DEV_X8; |
625 | break; | 707 | break; |
626 | case 16: | 708 | case 16: |
627 | dimm->dtype = DEV_X16; | 709 | csr->dtype = DEV_X16; |
628 | break; | 710 | break; |
629 | default: | 711 | default: |
630 | dimm->dtype = DEV_UNKNOWN; | 712 | csr->dtype = DEV_UNKNOWN; |
631 | } | 713 | } |
632 | 714 | ||
633 | snprintf(dimm->label, sizeof(dimm->label), | 715 | csr->edac_mode = mode; |
634 | "CPU#%uChannel#%u_DIMM#%u", | 716 | csr->mtype = mtype; |
635 | pvt->i7core_dev->socket, i, j); | 717 | |
636 | dimm->grain = 8; | 718 | csrow++; |
637 | dimm->edac_mode = mode; | ||
638 | dimm->mtype = mtype; | ||
639 | } | 719 | } |
640 | 720 | ||
641 | pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); | 721 | pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); |
@@ -646,12 +726,12 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
646 | pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); | 726 | pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); |
647 | pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); | 727 | pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); |
648 | pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); | 728 | pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); |
649 | edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); | 729 | debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); |
650 | for (j = 0; j < 8; j++) | 730 | for (j = 0; j < 8; j++) |
651 | edac_dbg(1, "\t\t%#x\t%#x\t%#x\n", | 731 | debugf1("\t\t%#x\t%#x\t%#x\n", |
652 | (value[j] >> 27) & 0x1, | 732 | (value[j] >> 27) & 0x1, |
653 | (value[j] >> 24) & 0x7, | 733 | (value[j] >> 24) & 0x7, |
654 | (value[j] & ((1 << 24) - 1))); | 734 | (value[j] && ((1 << 24) - 1))); |
655 | } | 735 | } |
656 | 736 | ||
657 | return 0; | 737 | return 0; |
@@ -661,8 +741,6 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
661 | Error insertion routines | 741 | Error insertion routines |
662 | ****************************************************************************/ | 742 | ****************************************************************************/ |
663 | 743 | ||
664 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) | ||
665 | |||
666 | /* The i7core has independent error injection features per channel. | 744 | /* The i7core has independent error injection features per channel. |
667 | However, to have a simpler code, we don't allow enabling error injection | 745 | However, to have a simpler code, we don't allow enabling error injection |
668 | on more than one channel. | 746 | on more than one channel. |
@@ -692,11 +770,9 @@ static int disable_inject(const struct mem_ctl_info *mci) | |||
692 | * bit 0 - refers to the lower 32-byte half cacheline | 770 | * bit 0 - refers to the lower 32-byte half cacheline |
693 | * bit 1 - refers to the upper 32-byte half cacheline | 771 | * bit 1 - refers to the upper 32-byte half cacheline |
694 | */ | 772 | */ |
695 | static ssize_t i7core_inject_section_store(struct device *dev, | 773 | static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, |
696 | struct device_attribute *mattr, | ||
697 | const char *data, size_t count) | 774 | const char *data, size_t count) |
698 | { | 775 | { |
699 | struct mem_ctl_info *mci = to_mci(dev); | ||
700 | struct i7core_pvt *pvt = mci->pvt_info; | 776 | struct i7core_pvt *pvt = mci->pvt_info; |
701 | unsigned long value; | 777 | unsigned long value; |
702 | int rc; | 778 | int rc; |
@@ -712,11 +788,9 @@ static ssize_t i7core_inject_section_store(struct device *dev, | |||
712 | return count; | 788 | return count; |
713 | } | 789 | } |
714 | 790 | ||
715 | static ssize_t i7core_inject_section_show(struct device *dev, | 791 | static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, |
716 | struct device_attribute *mattr, | 792 | char *data) |
717 | char *data) | ||
718 | { | 793 | { |
719 | struct mem_ctl_info *mci = to_mci(dev); | ||
720 | struct i7core_pvt *pvt = mci->pvt_info; | 794 | struct i7core_pvt *pvt = mci->pvt_info; |
721 | return sprintf(data, "0x%08x\n", pvt->inject.section); | 795 | return sprintf(data, "0x%08x\n", pvt->inject.section); |
722 | } | 796 | } |
@@ -729,12 +803,10 @@ static ssize_t i7core_inject_section_show(struct device *dev, | |||
729 | * bit 1 - inject ECC error | 803 | * bit 1 - inject ECC error |
730 | * bit 2 - inject parity error | 804 | * bit 2 - inject parity error |
731 | */ | 805 | */ |
732 | static ssize_t i7core_inject_type_store(struct device *dev, | 806 | static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, |
733 | struct device_attribute *mattr, | ||
734 | const char *data, size_t count) | 807 | const char *data, size_t count) |
735 | { | 808 | { |
736 | struct mem_ctl_info *mci = to_mci(dev); | 809 | struct i7core_pvt *pvt = mci->pvt_info; |
737 | struct i7core_pvt *pvt = mci->pvt_info; | ||
738 | unsigned long value; | 810 | unsigned long value; |
739 | int rc; | 811 | int rc; |
740 | 812 | ||
@@ -749,13 +821,10 @@ struct i7core_pvt *pvt = mci->pvt_info; | |||
749 | return count; | 821 | return count; |
750 | } | 822 | } |
751 | 823 | ||
752 | static ssize_t i7core_inject_type_show(struct device *dev, | 824 | static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, |
753 | struct device_attribute *mattr, | 825 | char *data) |
754 | char *data) | ||
755 | { | 826 | { |
756 | struct mem_ctl_info *mci = to_mci(dev); | ||
757 | struct i7core_pvt *pvt = mci->pvt_info; | 827 | struct i7core_pvt *pvt = mci->pvt_info; |
758 | |||
759 | return sprintf(data, "0x%08x\n", pvt->inject.type); | 828 | return sprintf(data, "0x%08x\n", pvt->inject.type); |
760 | } | 829 | } |
761 | 830 | ||
@@ -769,11 +838,9 @@ static ssize_t i7core_inject_type_show(struct device *dev, | |||
769 | * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an | 838 | * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an |
770 | * uncorrectable error to be injected. | 839 | * uncorrectable error to be injected. |
771 | */ | 840 | */ |
772 | static ssize_t i7core_inject_eccmask_store(struct device *dev, | 841 | static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, |
773 | struct device_attribute *mattr, | 842 | const char *data, size_t count) |
774 | const char *data, size_t count) | ||
775 | { | 843 | { |
776 | struct mem_ctl_info *mci = to_mci(dev); | ||
777 | struct i7core_pvt *pvt = mci->pvt_info; | 844 | struct i7core_pvt *pvt = mci->pvt_info; |
778 | unsigned long value; | 845 | unsigned long value; |
779 | int rc; | 846 | int rc; |
@@ -789,13 +856,10 @@ static ssize_t i7core_inject_eccmask_store(struct device *dev, | |||
789 | return count; | 856 | return count; |
790 | } | 857 | } |
791 | 858 | ||
792 | static ssize_t i7core_inject_eccmask_show(struct device *dev, | 859 | static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, |
793 | struct device_attribute *mattr, | 860 | char *data) |
794 | char *data) | ||
795 | { | 861 | { |
796 | struct mem_ctl_info *mci = to_mci(dev); | ||
797 | struct i7core_pvt *pvt = mci->pvt_info; | 862 | struct i7core_pvt *pvt = mci->pvt_info; |
798 | |||
799 | return sprintf(data, "0x%08x\n", pvt->inject.eccmask); | 863 | return sprintf(data, "0x%08x\n", pvt->inject.eccmask); |
800 | } | 864 | } |
801 | 865 | ||
@@ -812,16 +876,14 @@ static ssize_t i7core_inject_eccmask_show(struct device *dev, | |||
812 | 876 | ||
813 | #define DECLARE_ADDR_MATCH(param, limit) \ | 877 | #define DECLARE_ADDR_MATCH(param, limit) \ |
814 | static ssize_t i7core_inject_store_##param( \ | 878 | static ssize_t i7core_inject_store_##param( \ |
815 | struct device *dev, \ | 879 | struct mem_ctl_info *mci, \ |
816 | struct device_attribute *mattr, \ | 880 | const char *data, size_t count) \ |
817 | const char *data, size_t count) \ | ||
818 | { \ | 881 | { \ |
819 | struct mem_ctl_info *mci = dev_get_drvdata(dev); \ | ||
820 | struct i7core_pvt *pvt; \ | 882 | struct i7core_pvt *pvt; \ |
821 | long value; \ | 883 | long value; \ |
822 | int rc; \ | 884 | int rc; \ |
823 | \ | 885 | \ |
824 | edac_dbg(1, "\n"); \ | 886 | debugf1("%s()\n", __func__); \ |
825 | pvt = mci->pvt_info; \ | 887 | pvt = mci->pvt_info; \ |
826 | \ | 888 | \ |
827 | if (pvt->inject.enable) \ | 889 | if (pvt->inject.enable) \ |
@@ -841,15 +903,13 @@ static ssize_t i7core_inject_store_##param( \ | |||
841 | } \ | 903 | } \ |
842 | \ | 904 | \ |
843 | static ssize_t i7core_inject_show_##param( \ | 905 | static ssize_t i7core_inject_show_##param( \ |
844 | struct device *dev, \ | 906 | struct mem_ctl_info *mci, \ |
845 | struct device_attribute *mattr, \ | 907 | char *data) \ |
846 | char *data) \ | ||
847 | { \ | 908 | { \ |
848 | struct mem_ctl_info *mci = dev_get_drvdata(dev); \ | ||
849 | struct i7core_pvt *pvt; \ | 909 | struct i7core_pvt *pvt; \ |
850 | \ | 910 | \ |
851 | pvt = mci->pvt_info; \ | 911 | pvt = mci->pvt_info; \ |
852 | edac_dbg(1, "pvt=%p\n", pvt); \ | 912 | debugf1("%s() pvt=%p\n", __func__, pvt); \ |
853 | if (pvt->inject.param < 0) \ | 913 | if (pvt->inject.param < 0) \ |
854 | return sprintf(data, "any\n"); \ | 914 | return sprintf(data, "any\n"); \ |
855 | else \ | 915 | else \ |
@@ -857,9 +917,14 @@ static ssize_t i7core_inject_show_##param( \ | |||
857 | } | 917 | } |
858 | 918 | ||
859 | #define ATTR_ADDR_MATCH(param) \ | 919 | #define ATTR_ADDR_MATCH(param) \ |
860 | static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \ | 920 | { \ |
861 | i7core_inject_show_##param, \ | 921 | .attr = { \ |
862 | i7core_inject_store_##param) | 922 | .name = #param, \ |
923 | .mode = (S_IRUGO | S_IWUSR) \ | ||
924 | }, \ | ||
925 | .show = i7core_inject_show_##param, \ | ||
926 | .store = i7core_inject_store_##param, \ | ||
927 | } | ||
863 | 928 | ||
864 | DECLARE_ADDR_MATCH(channel, 3); | 929 | DECLARE_ADDR_MATCH(channel, 3); |
865 | DECLARE_ADDR_MATCH(dimm, 3); | 930 | DECLARE_ADDR_MATCH(dimm, 3); |
@@ -868,21 +933,14 @@ DECLARE_ADDR_MATCH(bank, 32); | |||
868 | DECLARE_ADDR_MATCH(page, 0x10000); | 933 | DECLARE_ADDR_MATCH(page, 0x10000); |
869 | DECLARE_ADDR_MATCH(col, 0x4000); | 934 | DECLARE_ADDR_MATCH(col, 0x4000); |
870 | 935 | ||
871 | ATTR_ADDR_MATCH(channel); | ||
872 | ATTR_ADDR_MATCH(dimm); | ||
873 | ATTR_ADDR_MATCH(rank); | ||
874 | ATTR_ADDR_MATCH(bank); | ||
875 | ATTR_ADDR_MATCH(page); | ||
876 | ATTR_ADDR_MATCH(col); | ||
877 | |||
878 | static int write_and_test(struct pci_dev *dev, const int where, const u32 val) | 936 | static int write_and_test(struct pci_dev *dev, const int where, const u32 val) |
879 | { | 937 | { |
880 | u32 read; | 938 | u32 read; |
881 | int count; | 939 | int count; |
882 | 940 | ||
883 | edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n", | 941 | debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n", |
884 | dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), | 942 | dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), |
885 | where, val); | 943 | where, val); |
886 | 944 | ||
887 | for (count = 0; count < 10; count++) { | 945 | for (count = 0; count < 10; count++) { |
888 | if (count) | 946 | if (count) |
@@ -920,11 +978,9 @@ static int write_and_test(struct pci_dev *dev, const int where, const u32 val) | |||
920 | * is reliable enough to check if the MC is using the | 978 | * is reliable enough to check if the MC is using the |
921 | * three channels. However, this is not clear at the datasheet. | 979 | * three channels. However, this is not clear at the datasheet. |
922 | */ | 980 | */ |
923 | static ssize_t i7core_inject_enable_store(struct device *dev, | 981 | static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, |
924 | struct device_attribute *mattr, | 982 | const char *data, size_t count) |
925 | const char *data, size_t count) | ||
926 | { | 983 | { |
927 | struct mem_ctl_info *mci = to_mci(dev); | ||
928 | struct i7core_pvt *pvt = mci->pvt_info; | 984 | struct i7core_pvt *pvt = mci->pvt_info; |
929 | u32 injectmask; | 985 | u32 injectmask; |
930 | u64 mask = 0; | 986 | u64 mask = 0; |
@@ -1017,18 +1073,17 @@ static ssize_t i7core_inject_enable_store(struct device *dev, | |||
1017 | pci_write_config_dword(pvt->pci_noncore, | 1073 | pci_write_config_dword(pvt->pci_noncore, |
1018 | MC_CFG_CONTROL, 8); | 1074 | MC_CFG_CONTROL, 8); |
1019 | 1075 | ||
1020 | edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n", | 1076 | debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," |
1021 | mask, pvt->inject.eccmask, injectmask); | 1077 | " inject 0x%08x\n", |
1078 | mask, pvt->inject.eccmask, injectmask); | ||
1022 | 1079 | ||
1023 | 1080 | ||
1024 | return count; | 1081 | return count; |
1025 | } | 1082 | } |
1026 | 1083 | ||
1027 | static ssize_t i7core_inject_enable_show(struct device *dev, | 1084 | static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, |
1028 | struct device_attribute *mattr, | 1085 | char *data) |
1029 | char *data) | ||
1030 | { | 1086 | { |
1031 | struct mem_ctl_info *mci = to_mci(dev); | ||
1032 | struct i7core_pvt *pvt = mci->pvt_info; | 1087 | struct i7core_pvt *pvt = mci->pvt_info; |
1033 | u32 injectmask; | 1088 | u32 injectmask; |
1034 | 1089 | ||
@@ -1038,7 +1093,7 @@ static ssize_t i7core_inject_enable_show(struct device *dev, | |||
1038 | pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], | 1093 | pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], |
1039 | MC_CHANNEL_ERROR_INJECT, &injectmask); | 1094 | MC_CHANNEL_ERROR_INJECT, &injectmask); |
1040 | 1095 | ||
1041 | edac_dbg(0, "Inject error read: 0x%018x\n", injectmask); | 1096 | debugf0("Inject error read: 0x%018x\n", injectmask); |
1042 | 1097 | ||
1043 | if (injectmask & 0x0c) | 1098 | if (injectmask & 0x0c) |
1044 | pvt->inject.enable = 1; | 1099 | pvt->inject.enable = 1; |
@@ -1048,14 +1103,12 @@ static ssize_t i7core_inject_enable_show(struct device *dev, | |||
1048 | 1103 | ||
1049 | #define DECLARE_COUNTER(param) \ | 1104 | #define DECLARE_COUNTER(param) \ |
1050 | static ssize_t i7core_show_counter_##param( \ | 1105 | static ssize_t i7core_show_counter_##param( \ |
1051 | struct device *dev, \ | 1106 | struct mem_ctl_info *mci, \ |
1052 | struct device_attribute *mattr, \ | 1107 | char *data) \ |
1053 | char *data) \ | ||
1054 | { \ | 1108 | { \ |
1055 | struct mem_ctl_info *mci = dev_get_drvdata(dev); \ | ||
1056 | struct i7core_pvt *pvt = mci->pvt_info; \ | 1109 | struct i7core_pvt *pvt = mci->pvt_info; \ |
1057 | \ | 1110 | \ |
1058 | edac_dbg(1, "\n"); \ | 1111 | debugf1("%s() \n", __func__); \ |
1059 | if (!pvt->ce_count_available || (pvt->is_registered)) \ | 1112 | if (!pvt->ce_count_available || (pvt->is_registered)) \ |
1060 | return sprintf(data, "data unavailable\n"); \ | 1113 | return sprintf(data, "data unavailable\n"); \ |
1061 | return sprintf(data, "%lu\n", \ | 1114 | return sprintf(data, "%lu\n", \ |
@@ -1063,179 +1116,121 @@ static ssize_t i7core_show_counter_##param( \ | |||
1063 | } | 1116 | } |
1064 | 1117 | ||
1065 | #define ATTR_COUNTER(param) \ | 1118 | #define ATTR_COUNTER(param) \ |
1066 | static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \ | 1119 | { \ |
1067 | i7core_show_counter_##param, \ | 1120 | .attr = { \ |
1068 | NULL) | 1121 | .name = __stringify(udimm##param), \ |
1122 | .mode = (S_IRUGO | S_IWUSR) \ | ||
1123 | }, \ | ||
1124 | .show = i7core_show_counter_##param \ | ||
1125 | } | ||
1069 | 1126 | ||
1070 | DECLARE_COUNTER(0); | 1127 | DECLARE_COUNTER(0); |
1071 | DECLARE_COUNTER(1); | 1128 | DECLARE_COUNTER(1); |
1072 | DECLARE_COUNTER(2); | 1129 | DECLARE_COUNTER(2); |
1073 | 1130 | ||
1074 | ATTR_COUNTER(0); | ||
1075 | ATTR_COUNTER(1); | ||
1076 | ATTR_COUNTER(2); | ||
1077 | |||
1078 | /* | 1131 | /* |
1079 | * inject_addrmatch device sysfs struct | 1132 | * Sysfs struct |
1080 | */ | 1133 | */ |
1081 | 1134 | ||
1082 | static struct attribute *i7core_addrmatch_attrs[] = { | 1135 | static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { |
1083 | &dev_attr_channel.attr, | 1136 | ATTR_ADDR_MATCH(channel), |
1084 | &dev_attr_dimm.attr, | 1137 | ATTR_ADDR_MATCH(dimm), |
1085 | &dev_attr_rank.attr, | 1138 | ATTR_ADDR_MATCH(rank), |
1086 | &dev_attr_bank.attr, | 1139 | ATTR_ADDR_MATCH(bank), |
1087 | &dev_attr_page.attr, | 1140 | ATTR_ADDR_MATCH(page), |
1088 | &dev_attr_col.attr, | 1141 | ATTR_ADDR_MATCH(col), |
1089 | NULL | 1142 | { } /* End of list */ |
1090 | }; | 1143 | }; |
1091 | 1144 | ||
1092 | static struct attribute_group addrmatch_grp = { | 1145 | static const struct mcidev_sysfs_group i7core_inject_addrmatch = { |
1093 | .attrs = i7core_addrmatch_attrs, | 1146 | .name = "inject_addrmatch", |
1147 | .mcidev_attr = i7core_addrmatch_attrs, | ||
1094 | }; | 1148 | }; |
1095 | 1149 | ||
1096 | static const struct attribute_group *addrmatch_groups[] = { | 1150 | static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { |
1097 | &addrmatch_grp, | 1151 | ATTR_COUNTER(0), |
1098 | NULL | 1152 | ATTR_COUNTER(1), |
1153 | ATTR_COUNTER(2), | ||
1154 | { .attr = { .name = NULL } } | ||
1099 | }; | 1155 | }; |
1100 | 1156 | ||
1101 | static void addrmatch_release(struct device *device) | 1157 | static const struct mcidev_sysfs_group i7core_udimm_counters = { |
1102 | { | 1158 | .name = "all_channel_counts", |
1103 | edac_dbg(1, "Releasing device %s\n", dev_name(device)); | 1159 | .mcidev_attr = i7core_udimm_counters_attrs, |
1104 | kfree(device); | ||
1105 | } | ||
1106 | |||
1107 | static struct device_type addrmatch_type = { | ||
1108 | .groups = addrmatch_groups, | ||
1109 | .release = addrmatch_release, | ||
1110 | }; | ||
1111 | |||
1112 | /* | ||
1113 | * all_channel_counts sysfs struct | ||
1114 | */ | ||
1115 | |||
1116 | static struct attribute *i7core_udimm_counters_attrs[] = { | ||
1117 | &dev_attr_udimm0.attr, | ||
1118 | &dev_attr_udimm1.attr, | ||
1119 | &dev_attr_udimm2.attr, | ||
1120 | NULL | ||
1121 | }; | ||
1122 | |||
1123 | static struct attribute_group all_channel_counts_grp = { | ||
1124 | .attrs = i7core_udimm_counters_attrs, | ||
1125 | }; | 1160 | }; |
1126 | 1161 | ||
1127 | static const struct attribute_group *all_channel_counts_groups[] = { | 1162 | static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { |
1128 | &all_channel_counts_grp, | 1163 | { |
1129 | NULL | 1164 | .attr = { |
1165 | .name = "inject_section", | ||
1166 | .mode = (S_IRUGO | S_IWUSR) | ||
1167 | }, | ||
1168 | .show = i7core_inject_section_show, | ||
1169 | .store = i7core_inject_section_store, | ||
1170 | }, { | ||
1171 | .attr = { | ||
1172 | .name = "inject_type", | ||
1173 | .mode = (S_IRUGO | S_IWUSR) | ||
1174 | }, | ||
1175 | .show = i7core_inject_type_show, | ||
1176 | .store = i7core_inject_type_store, | ||
1177 | }, { | ||
1178 | .attr = { | ||
1179 | .name = "inject_eccmask", | ||
1180 | .mode = (S_IRUGO | S_IWUSR) | ||
1181 | }, | ||
1182 | .show = i7core_inject_eccmask_show, | ||
1183 | .store = i7core_inject_eccmask_store, | ||
1184 | }, { | ||
1185 | .grp = &i7core_inject_addrmatch, | ||
1186 | }, { | ||
1187 | .attr = { | ||
1188 | .name = "inject_enable", | ||
1189 | .mode = (S_IRUGO | S_IWUSR) | ||
1190 | }, | ||
1191 | .show = i7core_inject_enable_show, | ||
1192 | .store = i7core_inject_enable_store, | ||
1193 | }, | ||
1194 | { } /* End of list */ | ||
1130 | }; | 1195 | }; |
1131 | 1196 | ||
1132 | static void all_channel_counts_release(struct device *device) | 1197 | static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { |
1133 | { | 1198 | { |
1134 | edac_dbg(1, "Releasing device %s\n", dev_name(device)); | 1199 | .attr = { |
1135 | kfree(device); | 1200 | .name = "inject_section", |
1136 | } | 1201 | .mode = (S_IRUGO | S_IWUSR) |
1137 | 1202 | }, | |
1138 | static struct device_type all_channel_counts_type = { | 1203 | .show = i7core_inject_section_show, |
1139 | .groups = all_channel_counts_groups, | 1204 | .store = i7core_inject_section_store, |
1140 | .release = all_channel_counts_release, | 1205 | }, { |
1206 | .attr = { | ||
1207 | .name = "inject_type", | ||
1208 | .mode = (S_IRUGO | S_IWUSR) | ||
1209 | }, | ||
1210 | .show = i7core_inject_type_show, | ||
1211 | .store = i7core_inject_type_store, | ||
1212 | }, { | ||
1213 | .attr = { | ||
1214 | .name = "inject_eccmask", | ||
1215 | .mode = (S_IRUGO | S_IWUSR) | ||
1216 | }, | ||
1217 | .show = i7core_inject_eccmask_show, | ||
1218 | .store = i7core_inject_eccmask_store, | ||
1219 | }, { | ||
1220 | .grp = &i7core_inject_addrmatch, | ||
1221 | }, { | ||
1222 | .attr = { | ||
1223 | .name = "inject_enable", | ||
1224 | .mode = (S_IRUGO | S_IWUSR) | ||
1225 | }, | ||
1226 | .show = i7core_inject_enable_show, | ||
1227 | .store = i7core_inject_enable_store, | ||
1228 | }, { | ||
1229 | .grp = &i7core_udimm_counters, | ||
1230 | }, | ||
1231 | { } /* End of list */ | ||
1141 | }; | 1232 | }; |
1142 | 1233 | ||
1143 | /* | ||
1144 | * inject sysfs attributes | ||
1145 | */ | ||
1146 | |||
1147 | static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR, | ||
1148 | i7core_inject_section_show, i7core_inject_section_store); | ||
1149 | |||
1150 | static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR, | ||
1151 | i7core_inject_type_show, i7core_inject_type_store); | ||
1152 | |||
1153 | |||
1154 | static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR, | ||
1155 | i7core_inject_eccmask_show, i7core_inject_eccmask_store); | ||
1156 | |||
1157 | static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR, | ||
1158 | i7core_inject_enable_show, i7core_inject_enable_store); | ||
1159 | |||
1160 | static int i7core_create_sysfs_devices(struct mem_ctl_info *mci) | ||
1161 | { | ||
1162 | struct i7core_pvt *pvt = mci->pvt_info; | ||
1163 | int rc; | ||
1164 | |||
1165 | rc = device_create_file(&mci->dev, &dev_attr_inject_section); | ||
1166 | if (rc < 0) | ||
1167 | return rc; | ||
1168 | rc = device_create_file(&mci->dev, &dev_attr_inject_type); | ||
1169 | if (rc < 0) | ||
1170 | return rc; | ||
1171 | rc = device_create_file(&mci->dev, &dev_attr_inject_eccmask); | ||
1172 | if (rc < 0) | ||
1173 | return rc; | ||
1174 | rc = device_create_file(&mci->dev, &dev_attr_inject_enable); | ||
1175 | if (rc < 0) | ||
1176 | return rc; | ||
1177 | |||
1178 | pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL); | ||
1179 | if (!pvt->addrmatch_dev) | ||
1180 | return rc; | ||
1181 | |||
1182 | pvt->addrmatch_dev->type = &addrmatch_type; | ||
1183 | pvt->addrmatch_dev->bus = mci->dev.bus; | ||
1184 | device_initialize(pvt->addrmatch_dev); | ||
1185 | pvt->addrmatch_dev->parent = &mci->dev; | ||
1186 | dev_set_name(pvt->addrmatch_dev, "inject_addrmatch"); | ||
1187 | dev_set_drvdata(pvt->addrmatch_dev, mci); | ||
1188 | |||
1189 | edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev)); | ||
1190 | |||
1191 | rc = device_add(pvt->addrmatch_dev); | ||
1192 | if (rc < 0) | ||
1193 | return rc; | ||
1194 | |||
1195 | if (!pvt->is_registered) { | ||
1196 | pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev), | ||
1197 | GFP_KERNEL); | ||
1198 | if (!pvt->chancounts_dev) { | ||
1199 | put_device(pvt->addrmatch_dev); | ||
1200 | device_del(pvt->addrmatch_dev); | ||
1201 | return rc; | ||
1202 | } | ||
1203 | |||
1204 | pvt->chancounts_dev->type = &all_channel_counts_type; | ||
1205 | pvt->chancounts_dev->bus = mci->dev.bus; | ||
1206 | device_initialize(pvt->chancounts_dev); | ||
1207 | pvt->chancounts_dev->parent = &mci->dev; | ||
1208 | dev_set_name(pvt->chancounts_dev, "all_channel_counts"); | ||
1209 | dev_set_drvdata(pvt->chancounts_dev, mci); | ||
1210 | |||
1211 | edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev)); | ||
1212 | |||
1213 | rc = device_add(pvt->chancounts_dev); | ||
1214 | if (rc < 0) | ||
1215 | return rc; | ||
1216 | } | ||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci) | ||
1221 | { | ||
1222 | struct i7core_pvt *pvt = mci->pvt_info; | ||
1223 | |||
1224 | edac_dbg(1, "\n"); | ||
1225 | |||
1226 | device_remove_file(&mci->dev, &dev_attr_inject_section); | ||
1227 | device_remove_file(&mci->dev, &dev_attr_inject_type); | ||
1228 | device_remove_file(&mci->dev, &dev_attr_inject_eccmask); | ||
1229 | device_remove_file(&mci->dev, &dev_attr_inject_enable); | ||
1230 | |||
1231 | if (!pvt->is_registered) { | ||
1232 | put_device(pvt->chancounts_dev); | ||
1233 | device_del(pvt->chancounts_dev); | ||
1234 | } | ||
1235 | put_device(pvt->addrmatch_dev); | ||
1236 | device_del(pvt->addrmatch_dev); | ||
1237 | } | ||
1238 | |||
1239 | /**************************************************************************** | 1234 | /**************************************************************************** |
1240 | Device initialization routines: put/get, init/exit | 1235 | Device initialization routines: put/get, init/exit |
1241 | ****************************************************************************/ | 1236 | ****************************************************************************/ |
@@ -1248,14 +1243,14 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev) | |||
1248 | { | 1243 | { |
1249 | int i; | 1244 | int i; |
1250 | 1245 | ||
1251 | edac_dbg(0, "\n"); | 1246 | debugf0(__FILE__ ": %s()\n", __func__); |
1252 | for (i = 0; i < i7core_dev->n_devs; i++) { | 1247 | for (i = 0; i < i7core_dev->n_devs; i++) { |
1253 | struct pci_dev *pdev = i7core_dev->pdev[i]; | 1248 | struct pci_dev *pdev = i7core_dev->pdev[i]; |
1254 | if (!pdev) | 1249 | if (!pdev) |
1255 | continue; | 1250 | continue; |
1256 | edac_dbg(0, "Removing dev %02x:%02x.%d\n", | 1251 | debugf0("Removing dev %02x:%02x.%d\n", |
1257 | pdev->bus->number, | 1252 | pdev->bus->number, |
1258 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | 1253 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); |
1259 | pci_dev_put(pdev); | 1254 | pci_dev_put(pdev); |
1260 | } | 1255 | } |
1261 | } | 1256 | } |
@@ -1298,12 +1293,12 @@ static unsigned i7core_pci_lastbus(void) | |||
1298 | 1293 | ||
1299 | while ((b = pci_find_next_bus(b)) != NULL) { | 1294 | while ((b = pci_find_next_bus(b)) != NULL) { |
1300 | bus = b->number; | 1295 | bus = b->number; |
1301 | edac_dbg(0, "Found bus %d\n", bus); | 1296 | debugf0("Found bus %d\n", bus); |
1302 | if (bus > last_bus) | 1297 | if (bus > last_bus) |
1303 | last_bus = bus; | 1298 | last_bus = bus; |
1304 | } | 1299 | } |
1305 | 1300 | ||
1306 | edac_dbg(0, "Last bus %d\n", last_bus); | 1301 | debugf0("Last bus %d\n", last_bus); |
1307 | 1302 | ||
1308 | return last_bus; | 1303 | return last_bus; |
1309 | } | 1304 | } |
@@ -1329,20 +1324,6 @@ static int i7core_get_onedevice(struct pci_dev **prev, | |||
1329 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1324 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
1330 | dev_descr->dev_id, *prev); | 1325 | dev_descr->dev_id, *prev); |
1331 | 1326 | ||
1332 | /* | ||
1333 | * On Xeon 55xx, the Intel QuickPath Arch Generic Non-core regs | ||
1334 | * is at addr 8086:2c40, instead of 8086:2c41. So, we need | ||
1335 | * to probe for the alternate address in case of failure | ||
1336 | */ | ||
1337 | if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) | ||
1338 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
1339 | PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); | ||
1340 | |||
1341 | if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) | ||
1342 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
1343 | PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, | ||
1344 | *prev); | ||
1345 | |||
1346 | if (!pdev) { | 1327 | if (!pdev) { |
1347 | if (*prev) { | 1328 | if (*prev) { |
1348 | *prev = pdev; | 1329 | *prev = pdev; |
@@ -1410,10 +1391,10 @@ static int i7core_get_onedevice(struct pci_dev **prev, | |||
1410 | return -ENODEV; | 1391 | return -ENODEV; |
1411 | } | 1392 | } |
1412 | 1393 | ||
1413 | edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", | 1394 | debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", |
1414 | socket, bus, dev_descr->dev, | 1395 | socket, bus, dev_descr->dev, |
1415 | dev_descr->func, | 1396 | dev_descr->func, |
1416 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | 1397 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); |
1417 | 1398 | ||
1418 | /* | 1399 | /* |
1419 | * As stated on drivers/pci/search.c, the reference count for | 1400 | * As stated on drivers/pci/search.c, the reference count for |
@@ -1463,10 +1444,8 @@ static int mci_bind_devs(struct mem_ctl_info *mci, | |||
1463 | struct i7core_pvt *pvt = mci->pvt_info; | 1444 | struct i7core_pvt *pvt = mci->pvt_info; |
1464 | struct pci_dev *pdev; | 1445 | struct pci_dev *pdev; |
1465 | int i, func, slot; | 1446 | int i, func, slot; |
1466 | char *family; | ||
1467 | 1447 | ||
1468 | pvt->is_registered = false; | 1448 | pvt->is_registered = 0; |
1469 | pvt->enable_scrub = false; | ||
1470 | for (i = 0; i < i7core_dev->n_devs; i++) { | 1449 | for (i = 0; i < i7core_dev->n_devs; i++) { |
1471 | pdev = i7core_dev->pdev[i]; | 1450 | pdev = i7core_dev->pdev[i]; |
1472 | if (!pdev) | 1451 | if (!pdev) |
@@ -1482,46 +1461,18 @@ static int mci_bind_devs(struct mem_ctl_info *mci, | |||
1482 | if (unlikely(func > MAX_CHAN_FUNC)) | 1461 | if (unlikely(func > MAX_CHAN_FUNC)) |
1483 | goto error; | 1462 | goto error; |
1484 | pvt->pci_ch[slot - 4][func] = pdev; | 1463 | pvt->pci_ch[slot - 4][func] = pdev; |
1485 | } else if (!slot && !func) { | 1464 | } else if (!slot && !func) |
1486 | pvt->pci_noncore = pdev; | 1465 | pvt->pci_noncore = pdev; |
1487 | 1466 | else | |
1488 | /* Detect the processor family */ | ||
1489 | switch (pdev->device) { | ||
1490 | case PCI_DEVICE_ID_INTEL_I7_NONCORE: | ||
1491 | family = "Xeon 35xx/ i7core"; | ||
1492 | pvt->enable_scrub = false; | ||
1493 | break; | ||
1494 | case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT: | ||
1495 | family = "i7-800/i5-700"; | ||
1496 | pvt->enable_scrub = false; | ||
1497 | break; | ||
1498 | case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE: | ||
1499 | family = "Xeon 34xx"; | ||
1500 | pvt->enable_scrub = false; | ||
1501 | break; | ||
1502 | case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT: | ||
1503 | family = "Xeon 55xx"; | ||
1504 | pvt->enable_scrub = true; | ||
1505 | break; | ||
1506 | case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2: | ||
1507 | family = "Xeon 56xx / i7-900"; | ||
1508 | pvt->enable_scrub = true; | ||
1509 | break; | ||
1510 | default: | ||
1511 | family = "unknown"; | ||
1512 | pvt->enable_scrub = false; | ||
1513 | } | ||
1514 | edac_dbg(0, "Detected a processor type %s\n", family); | ||
1515 | } else | ||
1516 | goto error; | 1467 | goto error; |
1517 | 1468 | ||
1518 | edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n", | 1469 | debugf0("Associated fn %d.%d, dev = %p, socket %d\n", |
1519 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), | 1470 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), |
1520 | pdev, i7core_dev->socket); | 1471 | pdev, i7core_dev->socket); |
1521 | 1472 | ||
1522 | if (PCI_SLOT(pdev->devfn) == 3 && | 1473 | if (PCI_SLOT(pdev->devfn) == 3 && |
1523 | PCI_FUNC(pdev->devfn) == 2) | 1474 | PCI_FUNC(pdev->devfn) == 2) |
1524 | pvt->is_registered = true; | 1475 | pvt->is_registered = 1; |
1525 | } | 1476 | } |
1526 | 1477 | ||
1527 | return 0; | 1478 | return 0; |
@@ -1536,6 +1487,24 @@ error: | |||
1536 | /**************************************************************************** | 1487 | /**************************************************************************** |
1537 | Error check routines | 1488 | Error check routines |
1538 | ****************************************************************************/ | 1489 | ****************************************************************************/ |
1490 | static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, | ||
1491 | const int chan, | ||
1492 | const int dimm, | ||
1493 | const int add) | ||
1494 | { | ||
1495 | char *msg; | ||
1496 | struct i7core_pvt *pvt = mci->pvt_info; | ||
1497 | int row = pvt->csrow_map[chan][dimm], i; | ||
1498 | |||
1499 | for (i = 0; i < add; i++) { | ||
1500 | msg = kasprintf(GFP_KERNEL, "Corrected error " | ||
1501 | "(Socket=%d channel=%d dimm=%d)", | ||
1502 | pvt->i7core_dev->socket, chan, dimm); | ||
1503 | |||
1504 | edac_mc_handle_fbd_ce(mci, row, 0, msg); | ||
1505 | kfree (msg); | ||
1506 | } | ||
1507 | } | ||
1539 | 1508 | ||
1540 | static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, | 1509 | static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, |
1541 | const int chan, | 1510 | const int chan, |
@@ -1574,17 +1543,12 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, | |||
1574 | 1543 | ||
1575 | /*updated the edac core */ | 1544 | /*updated the edac core */ |
1576 | if (add0 != 0) | 1545 | if (add0 != 0) |
1577 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0, | 1546 | i7core_rdimm_update_csrow(mci, chan, 0, add0); |
1578 | 0, 0, 0, | ||
1579 | chan, 0, -1, "error", ""); | ||
1580 | if (add1 != 0) | 1547 | if (add1 != 0) |
1581 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1, | 1548 | i7core_rdimm_update_csrow(mci, chan, 1, add1); |
1582 | 0, 0, 0, | ||
1583 | chan, 1, -1, "error", ""); | ||
1584 | if (add2 != 0) | 1549 | if (add2 != 0) |
1585 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2, | 1550 | i7core_rdimm_update_csrow(mci, chan, 2, add2); |
1586 | 0, 0, 0, | 1551 | |
1587 | chan, 2, -1, "error", ""); | ||
1588 | } | 1552 | } |
1589 | 1553 | ||
1590 | static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) | 1554 | static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) |
@@ -1607,8 +1571,8 @@ static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) | |||
1607 | pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, | 1571 | pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, |
1608 | &rcv[2][1]); | 1572 | &rcv[2][1]); |
1609 | for (i = 0 ; i < 3; i++) { | 1573 | for (i = 0 ; i < 3; i++) { |
1610 | edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", | 1574 | debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", |
1611 | (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); | 1575 | (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); |
1612 | /*if the channel has 3 dimms*/ | 1576 | /*if the channel has 3 dimms*/ |
1613 | if (pvt->channel[i].dimms > 2) { | 1577 | if (pvt->channel[i].dimms > 2) { |
1614 | new0 = DIMM_BOT_COR_ERR(rcv[i][0]); | 1578 | new0 = DIMM_BOT_COR_ERR(rcv[i][0]); |
@@ -1639,7 +1603,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci) | |||
1639 | int new0, new1, new2; | 1603 | int new0, new1, new2; |
1640 | 1604 | ||
1641 | if (!pvt->pci_mcr[4]) { | 1605 | if (!pvt->pci_mcr[4]) { |
1642 | edac_dbg(0, "MCR registers not found\n"); | 1606 | debugf0("%s MCR registers not found\n", __func__); |
1643 | return; | 1607 | return; |
1644 | } | 1608 | } |
1645 | 1609 | ||
@@ -1703,30 +1667,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, | |||
1703 | const struct mce *m) | 1667 | const struct mce *m) |
1704 | { | 1668 | { |
1705 | struct i7core_pvt *pvt = mci->pvt_info; | 1669 | struct i7core_pvt *pvt = mci->pvt_info; |
1706 | char *type, *optype, *err; | 1670 | char *type, *optype, *err, *msg; |
1707 | enum hw_event_mc_err_type tp_event; | ||
1708 | unsigned long error = m->status & 0x1ff0000l; | 1671 | unsigned long error = m->status & 0x1ff0000l; |
1709 | bool uncorrected_error = m->mcgstatus & 1ll << 61; | ||
1710 | bool ripv = m->mcgstatus & 1; | ||
1711 | u32 optypenum = (m->status >> 4) & 0x07; | 1672 | u32 optypenum = (m->status >> 4) & 0x07; |
1712 | u32 core_err_cnt = (m->status >> 38) & 0x7fff; | 1673 | u32 core_err_cnt = (m->status >> 38) & 0x7fff; |
1713 | u32 dimm = (m->misc >> 16) & 0x3; | 1674 | u32 dimm = (m->misc >> 16) & 0x3; |
1714 | u32 channel = (m->misc >> 18) & 0x3; | 1675 | u32 channel = (m->misc >> 18) & 0x3; |
1715 | u32 syndrome = m->misc >> 32; | 1676 | u32 syndrome = m->misc >> 32; |
1716 | u32 errnum = find_first_bit(&error, 32); | 1677 | u32 errnum = find_first_bit(&error, 32); |
1678 | int csrow; | ||
1717 | 1679 | ||
1718 | if (uncorrected_error) { | 1680 | if (m->mcgstatus & 1) |
1719 | if (ripv) { | 1681 | type = "FATAL"; |
1720 | type = "FATAL"; | 1682 | else |
1721 | tp_event = HW_EVENT_ERR_FATAL; | 1683 | type = "NON_FATAL"; |
1722 | } else { | ||
1723 | type = "NON_FATAL"; | ||
1724 | tp_event = HW_EVENT_ERR_UNCORRECTED; | ||
1725 | } | ||
1726 | } else { | ||
1727 | type = "CORRECTED"; | ||
1728 | tp_event = HW_EVENT_ERR_CORRECTED; | ||
1729 | } | ||
1730 | 1684 | ||
1731 | switch (optypenum) { | 1685 | switch (optypenum) { |
1732 | case 0: | 1686 | case 0: |
@@ -1781,18 +1735,27 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, | |||
1781 | err = "unknown"; | 1735 | err = "unknown"; |
1782 | } | 1736 | } |
1783 | 1737 | ||
1784 | /* | 1738 | /* FIXME: should convert addr into bank and rank information */ |
1785 | * Call the helper to output message | 1739 | msg = kasprintf(GFP_ATOMIC, |
1786 | * FIXME: what to do if core_err_cnt > 1? Currently, it generates | 1740 | "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, " |
1787 | * only one event | 1741 | "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n", |
1788 | */ | 1742 | type, (long long) m->addr, m->cpu, dimm, channel, |
1789 | if (uncorrected_error || !pvt->is_registered) | 1743 | syndrome, core_err_cnt, (long long)m->status, |
1790 | edac_mc_handle_error(tp_event, mci, core_err_cnt, | 1744 | (long long)m->misc, optype, err); |
1791 | m->addr >> PAGE_SHIFT, | 1745 | |
1792 | m->addr & ~PAGE_MASK, | 1746 | debugf0("%s", msg); |
1793 | syndrome, | 1747 | |
1794 | channel, dimm, -1, | 1748 | csrow = pvt->csrow_map[channel][dimm]; |
1795 | err, optype); | 1749 | |
1750 | /* Call the helper to output message */ | ||
1751 | if (m->mcgstatus & 1) | ||
1752 | edac_mc_handle_fbd_ue(mci, csrow, 0, | ||
1753 | 0 /* FIXME: should be channel here */, msg); | ||
1754 | else if (!pvt->is_registered) | ||
1755 | edac_mc_handle_fbd_ce(mci, csrow, | ||
1756 | 0 /* FIXME: should be channel here */, msg); | ||
1757 | |||
1758 | kfree(msg); | ||
1796 | } | 1759 | } |
1797 | 1760 | ||
1798 | /* | 1761 | /* |
@@ -1863,37 +1826,33 @@ check_ce_error: | |||
1863 | * WARNING: As this routine should be called at NMI time, extra care should | 1826 | * WARNING: As this routine should be called at NMI time, extra care should |
1864 | * be taken to avoid deadlocks, and to be as fast as possible. | 1827 | * be taken to avoid deadlocks, and to be as fast as possible. |
1865 | */ | 1828 | */ |
1866 | static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, | 1829 | static int i7core_mce_check_error(void *priv, struct mce *mce) |
1867 | void *data) | ||
1868 | { | 1830 | { |
1869 | struct mce *mce = (struct mce *)data; | 1831 | struct mem_ctl_info *mci = priv; |
1870 | struct i7core_dev *i7_dev; | 1832 | struct i7core_pvt *pvt = mci->pvt_info; |
1871 | struct mem_ctl_info *mci; | ||
1872 | struct i7core_pvt *pvt; | ||
1873 | |||
1874 | i7_dev = get_i7core_dev(mce->socketid); | ||
1875 | if (!i7_dev) | ||
1876 | return NOTIFY_BAD; | ||
1877 | |||
1878 | mci = i7_dev->mci; | ||
1879 | pvt = mci->pvt_info; | ||
1880 | 1833 | ||
1881 | /* | 1834 | /* |
1882 | * Just let mcelog handle it if the error is | 1835 | * Just let mcelog handle it if the error is |
1883 | * outside the memory controller | 1836 | * outside the memory controller |
1884 | */ | 1837 | */ |
1885 | if (((mce->status & 0xffff) >> 7) != 1) | 1838 | if (((mce->status & 0xffff) >> 7) != 1) |
1886 | return NOTIFY_DONE; | 1839 | return 0; |
1887 | 1840 | ||
1888 | /* Bank 8 registers are the only ones that we know how to handle */ | 1841 | /* Bank 8 registers are the only ones that we know how to handle */ |
1889 | if (mce->bank != 8) | 1842 | if (mce->bank != 8) |
1890 | return NOTIFY_DONE; | 1843 | return 0; |
1844 | |||
1845 | #ifdef CONFIG_SMP | ||
1846 | /* Only handle if it is the right mc controller */ | ||
1847 | if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket) | ||
1848 | return 0; | ||
1849 | #endif | ||
1891 | 1850 | ||
1892 | smp_rmb(); | 1851 | smp_rmb(); |
1893 | if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { | 1852 | if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { |
1894 | smp_wmb(); | 1853 | smp_wmb(); |
1895 | pvt->mce_overrun++; | 1854 | pvt->mce_overrun++; |
1896 | return NOTIFY_DONE; | 1855 | return 0; |
1897 | } | 1856 | } |
1898 | 1857 | ||
1899 | /* Copy memory error at the ringbuffer */ | 1858 | /* Copy memory error at the ringbuffer */ |
@@ -1906,240 +1865,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, | |||
1906 | i7core_check_error(mci); | 1865 | i7core_check_error(mci); |
1907 | 1866 | ||
1908 | /* Advise mcelog that the errors were handled */ | 1867 | /* Advise mcelog that the errors were handled */ |
1909 | return NOTIFY_STOP; | 1868 | return 1; |
1910 | } | ||
1911 | |||
1912 | static struct notifier_block i7_mce_dec = { | ||
1913 | .notifier_call = i7core_mce_check_error, | ||
1914 | }; | ||
1915 | |||
1916 | struct memdev_dmi_entry { | ||
1917 | u8 type; | ||
1918 | u8 length; | ||
1919 | u16 handle; | ||
1920 | u16 phys_mem_array_handle; | ||
1921 | u16 mem_err_info_handle; | ||
1922 | u16 total_width; | ||
1923 | u16 data_width; | ||
1924 | u16 size; | ||
1925 | u8 form; | ||
1926 | u8 device_set; | ||
1927 | u8 device_locator; | ||
1928 | u8 bank_locator; | ||
1929 | u8 memory_type; | ||
1930 | u16 type_detail; | ||
1931 | u16 speed; | ||
1932 | u8 manufacturer; | ||
1933 | u8 serial_number; | ||
1934 | u8 asset_tag; | ||
1935 | u8 part_number; | ||
1936 | u8 attributes; | ||
1937 | u32 extended_size; | ||
1938 | u16 conf_mem_clk_speed; | ||
1939 | } __attribute__((__packed__)); | ||
1940 | |||
1941 | |||
1942 | /* | ||
1943 | * Decode the DRAM Clock Frequency, be paranoid, make sure that all | ||
1944 | * memory devices show the same speed, and if they don't then consider | ||
1945 | * all speeds to be invalid. | ||
1946 | */ | ||
1947 | static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq) | ||
1948 | { | ||
1949 | int *dclk_freq = _dclk_freq; | ||
1950 | u16 dmi_mem_clk_speed; | ||
1951 | |||
1952 | if (*dclk_freq == -1) | ||
1953 | return; | ||
1954 | |||
1955 | if (dh->type == DMI_ENTRY_MEM_DEVICE) { | ||
1956 | struct memdev_dmi_entry *memdev_dmi_entry = | ||
1957 | (struct memdev_dmi_entry *)dh; | ||
1958 | unsigned long conf_mem_clk_speed_offset = | ||
1959 | (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed - | ||
1960 | (unsigned long)&memdev_dmi_entry->type; | ||
1961 | unsigned long speed_offset = | ||
1962 | (unsigned long)&memdev_dmi_entry->speed - | ||
1963 | (unsigned long)&memdev_dmi_entry->type; | ||
1964 | |||
1965 | /* Check that a DIMM is present */ | ||
1966 | if (memdev_dmi_entry->size == 0) | ||
1967 | return; | ||
1968 | |||
1969 | /* | ||
1970 | * Pick the configured speed if it's available, otherwise | ||
1971 | * pick the DIMM speed, or we don't have a speed. | ||
1972 | */ | ||
1973 | if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) { | ||
1974 | dmi_mem_clk_speed = | ||
1975 | memdev_dmi_entry->conf_mem_clk_speed; | ||
1976 | } else if (memdev_dmi_entry->length > speed_offset) { | ||
1977 | dmi_mem_clk_speed = memdev_dmi_entry->speed; | ||
1978 | } else { | ||
1979 | *dclk_freq = -1; | ||
1980 | return; | ||
1981 | } | ||
1982 | |||
1983 | if (*dclk_freq == 0) { | ||
1984 | /* First pass, speed was 0 */ | ||
1985 | if (dmi_mem_clk_speed > 0) { | ||
1986 | /* Set speed if a valid speed is read */ | ||
1987 | *dclk_freq = dmi_mem_clk_speed; | ||
1988 | } else { | ||
1989 | /* Otherwise we don't have a valid speed */ | ||
1990 | *dclk_freq = -1; | ||
1991 | } | ||
1992 | } else if (*dclk_freq > 0 && | ||
1993 | *dclk_freq != dmi_mem_clk_speed) { | ||
1994 | /* | ||
1995 | * If we have a speed, check that all DIMMS are the same | ||
1996 | * speed, otherwise set the speed as invalid. | ||
1997 | */ | ||
1998 | *dclk_freq = -1; | ||
1999 | } | ||
2000 | } | ||
2001 | } | ||
2002 | |||
2003 | /* | ||
2004 | * The default DCLK frequency is used as a fallback if we | ||
2005 | * fail to find anything reliable in the DMI. The value | ||
2006 | * is taken straight from the datasheet. | ||
2007 | */ | ||
2008 | #define DEFAULT_DCLK_FREQ 800 | ||
2009 | |||
2010 | static int get_dclk_freq(void) | ||
2011 | { | ||
2012 | int dclk_freq = 0; | ||
2013 | |||
2014 | dmi_walk(decode_dclk, (void *)&dclk_freq); | ||
2015 | |||
2016 | if (dclk_freq < 1) | ||
2017 | return DEFAULT_DCLK_FREQ; | ||
2018 | |||
2019 | return dclk_freq; | ||
2020 | } | ||
2021 | |||
2022 | /* | ||
2023 | * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate | ||
2024 | * to hardware according to SCRUBINTERVAL formula | ||
2025 | * found in datasheet. | ||
2026 | */ | ||
2027 | static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) | ||
2028 | { | ||
2029 | struct i7core_pvt *pvt = mci->pvt_info; | ||
2030 | struct pci_dev *pdev; | ||
2031 | u32 dw_scrub; | ||
2032 | u32 dw_ssr; | ||
2033 | |||
2034 | /* Get data from the MC register, function 2 */ | ||
2035 | pdev = pvt->pci_mcr[2]; | ||
2036 | if (!pdev) | ||
2037 | return -ENODEV; | ||
2038 | |||
2039 | pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub); | ||
2040 | |||
2041 | if (new_bw == 0) { | ||
2042 | /* Prepare to disable petrol scrub */ | ||
2043 | dw_scrub &= ~STARTSCRUB; | ||
2044 | /* Stop the patrol scrub engine */ | ||
2045 | write_and_test(pdev, MC_SCRUB_CONTROL, | ||
2046 | dw_scrub & ~SCRUBINTERVAL_MASK); | ||
2047 | |||
2048 | /* Get current status of scrub rate and set bit to disable */ | ||
2049 | pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); | ||
2050 | dw_ssr &= ~SSR_MODE_MASK; | ||
2051 | dw_ssr |= SSR_MODE_DISABLE; | ||
2052 | } else { | ||
2053 | const int cache_line_size = 64; | ||
2054 | const u32 freq_dclk_mhz = pvt->dclk_freq; | ||
2055 | unsigned long long scrub_interval; | ||
2056 | /* | ||
2057 | * Translate the desired scrub rate to a register value and | ||
2058 | * program the corresponding register value. | ||
2059 | */ | ||
2060 | scrub_interval = (unsigned long long)freq_dclk_mhz * | ||
2061 | cache_line_size * 1000000; | ||
2062 | do_div(scrub_interval, new_bw); | ||
2063 | |||
2064 | if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK) | ||
2065 | return -EINVAL; | ||
2066 | |||
2067 | dw_scrub = SCRUBINTERVAL_MASK & scrub_interval; | ||
2068 | |||
2069 | /* Start the patrol scrub engine */ | ||
2070 | pci_write_config_dword(pdev, MC_SCRUB_CONTROL, | ||
2071 | STARTSCRUB | dw_scrub); | ||
2072 | |||
2073 | /* Get current status of scrub rate and set bit to enable */ | ||
2074 | pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); | ||
2075 | dw_ssr &= ~SSR_MODE_MASK; | ||
2076 | dw_ssr |= SSR_MODE_ENABLE; | ||
2077 | } | ||
2078 | /* Disable or enable scrubbing */ | ||
2079 | pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr); | ||
2080 | |||
2081 | return new_bw; | ||
2082 | } | ||
2083 | |||
2084 | /* | ||
2085 | * get_sdram_scrub_rate This routine convert current scrub rate value | ||
2086 | * into byte/sec bandwidth according to | ||
2087 | * SCRUBINTERVAL formula found in datasheet. | ||
2088 | */ | ||
2089 | static int get_sdram_scrub_rate(struct mem_ctl_info *mci) | ||
2090 | { | ||
2091 | struct i7core_pvt *pvt = mci->pvt_info; | ||
2092 | struct pci_dev *pdev; | ||
2093 | const u32 cache_line_size = 64; | ||
2094 | const u32 freq_dclk_mhz = pvt->dclk_freq; | ||
2095 | unsigned long long scrub_rate; | ||
2096 | u32 scrubval; | ||
2097 | |||
2098 | /* Get data from the MC register, function 2 */ | ||
2099 | pdev = pvt->pci_mcr[2]; | ||
2100 | if (!pdev) | ||
2101 | return -ENODEV; | ||
2102 | |||
2103 | /* Get current scrub control data */ | ||
2104 | pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval); | ||
2105 | |||
2106 | /* Mask highest 8-bits to 0 */ | ||
2107 | scrubval &= SCRUBINTERVAL_MASK; | ||
2108 | if (!scrubval) | ||
2109 | return 0; | ||
2110 | |||
2111 | /* Calculate scrub rate value into byte/sec bandwidth */ | ||
2112 | scrub_rate = (unsigned long long)freq_dclk_mhz * | ||
2113 | 1000000 * cache_line_size; | ||
2114 | do_div(scrub_rate, scrubval); | ||
2115 | return (int)scrub_rate; | ||
2116 | } | ||
2117 | |||
2118 | static void enable_sdram_scrub_setting(struct mem_ctl_info *mci) | ||
2119 | { | ||
2120 | struct i7core_pvt *pvt = mci->pvt_info; | ||
2121 | u32 pci_lock; | ||
2122 | |||
2123 | /* Unlock writes to pci registers */ | ||
2124 | pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); | ||
2125 | pci_lock &= ~0x3; | ||
2126 | pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, | ||
2127 | pci_lock | MC_CFG_UNLOCK); | ||
2128 | |||
2129 | mci->set_sdram_scrub_rate = set_sdram_scrub_rate; | ||
2130 | mci->get_sdram_scrub_rate = get_sdram_scrub_rate; | ||
2131 | } | ||
2132 | |||
2133 | static void disable_sdram_scrub_setting(struct mem_ctl_info *mci) | ||
2134 | { | ||
2135 | struct i7core_pvt *pvt = mci->pvt_info; | ||
2136 | u32 pci_lock; | ||
2137 | |||
2138 | /* Lock writes to pci registers */ | ||
2139 | pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); | ||
2140 | pci_lock &= ~0x3; | ||
2141 | pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, | ||
2142 | pci_lock | MC_CFG_LOCK); | ||
2143 | } | 1869 | } |
2144 | 1870 | ||
2145 | static void i7core_pci_ctl_create(struct i7core_pvt *pvt) | 1871 | static void i7core_pci_ctl_create(struct i7core_pvt *pvt) |
@@ -2148,8 +1874,7 @@ static void i7core_pci_ctl_create(struct i7core_pvt *pvt) | |||
2148 | &pvt->i7core_dev->pdev[0]->dev, | 1874 | &pvt->i7core_dev->pdev[0]->dev, |
2149 | EDAC_MOD_STR); | 1875 | EDAC_MOD_STR); |
2150 | if (unlikely(!pvt->i7core_pci)) | 1876 | if (unlikely(!pvt->i7core_pci)) |
2151 | i7core_printk(KERN_WARNING, | 1877 | pr_warn("Unable to setup PCI error report via EDAC\n"); |
2152 | "Unable to setup PCI error report via EDAC\n"); | ||
2153 | } | 1878 | } |
2154 | 1879 | ||
2155 | static void i7core_pci_ctl_release(struct i7core_pvt *pvt) | 1880 | static void i7core_pci_ctl_release(struct i7core_pvt *pvt) |
@@ -2169,7 +1894,8 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev) | |||
2169 | struct i7core_pvt *pvt; | 1894 | struct i7core_pvt *pvt; |
2170 | 1895 | ||
2171 | if (unlikely(!mci || !mci->pvt_info)) { | 1896 | if (unlikely(!mci || !mci->pvt_info)) { |
2172 | edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev); | 1897 | debugf0("MC: " __FILE__ ": %s(): dev = %p\n", |
1898 | __func__, &i7core_dev->pdev[0]->dev); | ||
2173 | 1899 | ||
2174 | i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); | 1900 | i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); |
2175 | return; | 1901 | return; |
@@ -2177,20 +1903,19 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev) | |||
2177 | 1903 | ||
2178 | pvt = mci->pvt_info; | 1904 | pvt = mci->pvt_info; |
2179 | 1905 | ||
2180 | edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev); | 1906 | debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", |
1907 | __func__, mci, &i7core_dev->pdev[0]->dev); | ||
2181 | 1908 | ||
2182 | /* Disable scrubrate setting */ | 1909 | /* Disable MCE NMI handler */ |
2183 | if (pvt->enable_scrub) | 1910 | edac_mce_unregister(&pvt->edac_mce); |
2184 | disable_sdram_scrub_setting(mci); | ||
2185 | 1911 | ||
2186 | /* Disable EDAC polling */ | 1912 | /* Disable EDAC polling */ |
2187 | i7core_pci_ctl_release(pvt); | 1913 | i7core_pci_ctl_release(pvt); |
2188 | 1914 | ||
2189 | /* Remove MC sysfs nodes */ | 1915 | /* Remove MC sysfs nodes */ |
2190 | i7core_delete_sysfs_devices(mci); | 1916 | edac_mc_del_mc(mci->dev); |
2191 | edac_mc_del_mc(mci->pdev); | ||
2192 | 1917 | ||
2193 | edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); | 1918 | debugf1("%s: free mci struct\n", mci->ctl_name); |
2194 | kfree(mci->ctl_name); | 1919 | kfree(mci->ctl_name); |
2195 | edac_mc_free(mci); | 1920 | edac_mc_free(mci); |
2196 | i7core_dev->mci = NULL; | 1921 | i7core_dev->mci = NULL; |
@@ -2200,23 +1925,20 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) | |||
2200 | { | 1925 | { |
2201 | struct mem_ctl_info *mci; | 1926 | struct mem_ctl_info *mci; |
2202 | struct i7core_pvt *pvt; | 1927 | struct i7core_pvt *pvt; |
2203 | int rc; | 1928 | int rc, channels, csrows; |
2204 | struct edac_mc_layer layers[2]; | ||
2205 | 1929 | ||
2206 | /* allocate a new MC control structure */ | 1930 | /* Check the number of active and not disabled channels */ |
1931 | rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows); | ||
1932 | if (unlikely(rc < 0)) | ||
1933 | return rc; | ||
2207 | 1934 | ||
2208 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | 1935 | /* allocate a new MC control structure */ |
2209 | layers[0].size = NUM_CHANS; | 1936 | mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket); |
2210 | layers[0].is_virt_csrow = false; | ||
2211 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
2212 | layers[1].size = MAX_DIMMS; | ||
2213 | layers[1].is_virt_csrow = true; | ||
2214 | mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers, | ||
2215 | sizeof(*pvt)); | ||
2216 | if (unlikely(!mci)) | 1937 | if (unlikely(!mci)) |
2217 | return -ENOMEM; | 1938 | return -ENOMEM; |
2218 | 1939 | ||
2219 | edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev); | 1940 | debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", |
1941 | __func__, mci, &i7core_dev->pdev[0]->dev); | ||
2220 | 1942 | ||
2221 | pvt = mci->pvt_info; | 1943 | pvt = mci->pvt_info; |
2222 | memset(pvt, 0, sizeof(*pvt)); | 1944 | memset(pvt, 0, sizeof(*pvt)); |
@@ -2245,21 +1967,22 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) | |||
2245 | if (unlikely(rc < 0)) | 1967 | if (unlikely(rc < 0)) |
2246 | goto fail0; | 1968 | goto fail0; |
2247 | 1969 | ||
1970 | if (pvt->is_registered) | ||
1971 | mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs; | ||
1972 | else | ||
1973 | mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs; | ||
2248 | 1974 | ||
2249 | /* Get dimm basic config */ | 1975 | /* Get dimm basic config */ |
2250 | get_dimm_config(mci); | 1976 | get_dimm_config(mci); |
2251 | /* record ptr to the generic device */ | 1977 | /* record ptr to the generic device */ |
2252 | mci->pdev = &i7core_dev->pdev[0]->dev; | 1978 | mci->dev = &i7core_dev->pdev[0]->dev; |
2253 | /* Set the function pointer to an actual operation function */ | 1979 | /* Set the function pointer to an actual operation function */ |
2254 | mci->edac_check = i7core_check_error; | 1980 | mci->edac_check = i7core_check_error; |
2255 | 1981 | ||
2256 | /* Enable scrubrate setting */ | ||
2257 | if (pvt->enable_scrub) | ||
2258 | enable_sdram_scrub_setting(mci); | ||
2259 | |||
2260 | /* add this new MC control structure to EDAC's list of MCs */ | 1982 | /* add this new MC control structure to EDAC's list of MCs */ |
2261 | if (unlikely(edac_mc_add_mc(mci))) { | 1983 | if (unlikely(edac_mc_add_mc(mci))) { |
2262 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | 1984 | debugf0("MC: " __FILE__ |
1985 | ": %s(): failed edac_mc_add_mc()\n", __func__); | ||
2263 | /* FIXME: perhaps some code should go here that disables error | 1986 | /* FIXME: perhaps some code should go here that disables error |
2264 | * reporting if we just enabled it | 1987 | * reporting if we just enabled it |
2265 | */ | 1988 | */ |
@@ -2267,12 +1990,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) | |||
2267 | rc = -EINVAL; | 1990 | rc = -EINVAL; |
2268 | goto fail0; | 1991 | goto fail0; |
2269 | } | 1992 | } |
2270 | if (i7core_create_sysfs_devices(mci)) { | ||
2271 | edac_dbg(0, "MC: failed to create sysfs nodes\n"); | ||
2272 | edac_mc_del_mc(mci->pdev); | ||
2273 | rc = -EINVAL; | ||
2274 | goto fail0; | ||
2275 | } | ||
2276 | 1993 | ||
2277 | /* Default error mask is any memory */ | 1994 | /* Default error mask is any memory */ |
2278 | pvt->inject.channel = 0; | 1995 | pvt->inject.channel = 0; |
@@ -2285,11 +2002,21 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) | |||
2285 | /* allocating generic PCI control info */ | 2002 | /* allocating generic PCI control info */ |
2286 | i7core_pci_ctl_create(pvt); | 2003 | i7core_pci_ctl_create(pvt); |
2287 | 2004 | ||
2288 | /* DCLK for scrub rate setting */ | 2005 | /* Registers on edac_mce in order to receive memory errors */ |
2289 | pvt->dclk_freq = get_dclk_freq(); | 2006 | pvt->edac_mce.priv = mci; |
2007 | pvt->edac_mce.check_error = i7core_mce_check_error; | ||
2008 | rc = edac_mce_register(&pvt->edac_mce); | ||
2009 | if (unlikely(rc < 0)) { | ||
2010 | debugf0("MC: " __FILE__ | ||
2011 | ": %s(): failed edac_mce_register()\n", __func__); | ||
2012 | goto fail1; | ||
2013 | } | ||
2290 | 2014 | ||
2291 | return 0; | 2015 | return 0; |
2292 | 2016 | ||
2017 | fail1: | ||
2018 | i7core_pci_ctl_release(pvt); | ||
2019 | edac_mc_del_mc(mci->dev); | ||
2293 | fail0: | 2020 | fail0: |
2294 | kfree(mci->ctl_name); | 2021 | kfree(mci->ctl_name); |
2295 | edac_mc_free(mci); | 2022 | edac_mc_free(mci); |
@@ -2305,9 +2032,10 @@ fail0: | |||
2305 | * < 0 for error code | 2032 | * < 0 for error code |
2306 | */ | 2033 | */ |
2307 | 2034 | ||
2308 | static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 2035 | static int __devinit i7core_probe(struct pci_dev *pdev, |
2036 | const struct pci_device_id *id) | ||
2309 | { | 2037 | { |
2310 | int rc, count = 0; | 2038 | int rc; |
2311 | struct i7core_dev *i7core_dev; | 2039 | struct i7core_dev *i7core_dev; |
2312 | 2040 | ||
2313 | /* get the pci devices we want to reserve for our use */ | 2041 | /* get the pci devices we want to reserve for our use */ |
@@ -2327,28 +2055,12 @@ static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2327 | goto fail0; | 2055 | goto fail0; |
2328 | 2056 | ||
2329 | list_for_each_entry(i7core_dev, &i7core_edac_list, list) { | 2057 | list_for_each_entry(i7core_dev, &i7core_edac_list, list) { |
2330 | count++; | ||
2331 | rc = i7core_register_mci(i7core_dev); | 2058 | rc = i7core_register_mci(i7core_dev); |
2332 | if (unlikely(rc < 0)) | 2059 | if (unlikely(rc < 0)) |
2333 | goto fail1; | 2060 | goto fail1; |
2334 | } | 2061 | } |
2335 | 2062 | ||
2336 | /* | 2063 | i7core_printk(KERN_INFO, "Driver loaded.\n"); |
2337 | * Nehalem-EX uses a different memory controller. However, as the | ||
2338 | * memory controller is not visible on some Nehalem/Nehalem-EP, we | ||
2339 | * need to indirectly probe via a X58 PCI device. The same devices | ||
2340 | * are found on (some) Nehalem-EX. So, on those machines, the | ||
2341 | * probe routine needs to return -ENODEV, as the actual Memory | ||
2342 | * Controller registers won't be detected. | ||
2343 | */ | ||
2344 | if (!count) { | ||
2345 | rc = -ENODEV; | ||
2346 | goto fail1; | ||
2347 | } | ||
2348 | |||
2349 | i7core_printk(KERN_INFO, | ||
2350 | "Driver loaded, %d memory controller(s) found.\n", | ||
2351 | count); | ||
2352 | 2064 | ||
2353 | mutex_unlock(&i7core_edac_lock); | 2065 | mutex_unlock(&i7core_edac_lock); |
2354 | return 0; | 2066 | return 0; |
@@ -2367,11 +2079,11 @@ fail0: | |||
2367 | * i7core_remove destructor for one instance of device | 2079 | * i7core_remove destructor for one instance of device |
2368 | * | 2080 | * |
2369 | */ | 2081 | */ |
2370 | static void i7core_remove(struct pci_dev *pdev) | 2082 | static void __devexit i7core_remove(struct pci_dev *pdev) |
2371 | { | 2083 | { |
2372 | struct i7core_dev *i7core_dev; | 2084 | struct i7core_dev *i7core_dev; |
2373 | 2085 | ||
2374 | edac_dbg(0, "\n"); | 2086 | debugf0(__FILE__ ": %s()\n", __func__); |
2375 | 2087 | ||
2376 | /* | 2088 | /* |
2377 | * we have a trouble here: pdev value for removal will be wrong, since | 2089 | * we have a trouble here: pdev value for removal will be wrong, since |
@@ -2408,7 +2120,7 @@ MODULE_DEVICE_TABLE(pci, i7core_pci_tbl); | |||
2408 | static struct pci_driver i7core_driver = { | 2120 | static struct pci_driver i7core_driver = { |
2409 | .name = "i7core_edac", | 2121 | .name = "i7core_edac", |
2410 | .probe = i7core_probe, | 2122 | .probe = i7core_probe, |
2411 | .remove = i7core_remove, | 2123 | .remove = __devexit_p(i7core_remove), |
2412 | .id_table = i7core_pci_tbl, | 2124 | .id_table = i7core_pci_tbl, |
2413 | }; | 2125 | }; |
2414 | 2126 | ||
@@ -2420,7 +2132,7 @@ static int __init i7core_init(void) | |||
2420 | { | 2132 | { |
2421 | int pci_rc; | 2133 | int pci_rc; |
2422 | 2134 | ||
2423 | edac_dbg(2, "\n"); | 2135 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
2424 | 2136 | ||
2425 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 2137 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
2426 | opstate_init(); | 2138 | opstate_init(); |
@@ -2430,10 +2142,8 @@ static int __init i7core_init(void) | |||
2430 | 2142 | ||
2431 | pci_rc = pci_register_driver(&i7core_driver); | 2143 | pci_rc = pci_register_driver(&i7core_driver); |
2432 | 2144 | ||
2433 | if (pci_rc >= 0) { | 2145 | if (pci_rc >= 0) |
2434 | mce_register_decode_chain(&i7_mce_dec); | ||
2435 | return 0; | 2146 | return 0; |
2436 | } | ||
2437 | 2147 | ||
2438 | i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", | 2148 | i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", |
2439 | pci_rc); | 2149 | pci_rc); |
@@ -2447,9 +2157,8 @@ static int __init i7core_init(void) | |||
2447 | */ | 2157 | */ |
2448 | static void __exit i7core_exit(void) | 2158 | static void __exit i7core_exit(void) |
2449 | { | 2159 | { |
2450 | edac_dbg(2, "\n"); | 2160 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
2451 | pci_unregister_driver(&i7core_driver); | 2161 | pci_unregister_driver(&i7core_driver); |
2452 | mce_unregister_decode_chain(&i7_mce_dec); | ||
2453 | } | 2162 | } |
2454 | 2163 | ||
2455 | module_init(i7core_init); | 2164 | module_init(i7core_init); |
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index 57fdb77903b..4329d39f902 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. | 12 | * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. |
13 | * | 13 | * |
14 | * Written with reference to 82443BX Host Bridge Datasheet: | 14 | * Written with reference to 82443BX Host Bridge Datasheet: |
15 | * http://download.intel.com/design/chipsets/datashts/29063301.pdf | 15 | * http://download.intel.com/design/chipsets/datashts/29063301.pdf |
16 | * references to this document given in []. | 16 | * references to this document given in []. |
17 | * | 17 | * |
18 | * This module doesn't support the 440LX, but it may be possible to | 18 | * This module doesn't support the 440LX, but it may be possible to |
@@ -124,7 +124,7 @@ static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci, | |||
124 | *info) | 124 | *info) |
125 | { | 125 | { |
126 | struct pci_dev *pdev; | 126 | struct pci_dev *pdev; |
127 | pdev = to_pci_dev(mci->pdev); | 127 | pdev = to_pci_dev(mci->dev); |
128 | pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap); | 128 | pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap); |
129 | if (info->eap & I82443BXGX_EAP_OFFSET_SBE) | 129 | if (info->eap & I82443BXGX_EAP_OFFSET_SBE) |
130 | /* Clear error to allow next error to be reported [p.61] */ | 130 | /* Clear error to allow next error to be reported [p.61] */ |
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci, | |||
156 | if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { | 156 | if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { |
157 | error_found = 1; | 157 | error_found = 1; |
158 | if (handle_errors) | 158 | if (handle_errors) |
159 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 159 | edac_mc_handle_ce(mci, page, pageoffset, |
160 | page, pageoffset, 0, | 160 | /* 440BX/GX don't make syndrome information |
161 | edac_mc_find_csrow_by_page(mci, page), | 161 | * available */ |
162 | 0, -1, mci->ctl_name, ""); | 162 | 0, edac_mc_find_csrow_by_page(mci, page), 0, |
163 | mci->ctl_name); | ||
163 | } | 164 | } |
164 | 165 | ||
165 | if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { | 166 | if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { |
166 | error_found = 1; | 167 | error_found = 1; |
167 | if (handle_errors) | 168 | if (handle_errors) |
168 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 169 | edac_mc_handle_ue(mci, page, pageoffset, |
169 | page, pageoffset, 0, | 170 | edac_mc_find_csrow_by_page(mci, page), |
170 | edac_mc_find_csrow_by_page(mci, page), | 171 | mci->ctl_name); |
171 | 0, -1, mci->ctl_name, ""); | ||
172 | } | 172 | } |
173 | 173 | ||
174 | return error_found; | 174 | return error_found; |
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci) | |||
178 | { | 178 | { |
179 | struct i82443bxgx_edacmc_error_info info; | 179 | struct i82443bxgx_edacmc_error_info info; |
180 | 180 | ||
181 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 181 | debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); |
182 | i82443bxgx_edacmc_get_error_info(mci, &info); | 182 | i82443bxgx_edacmc_get_error_info(mci, &info); |
183 | i82443bxgx_edacmc_process_error_info(mci, &info, 1); | 183 | i82443bxgx_edacmc_process_error_info(mci, &info, 1); |
184 | } | 184 | } |
@@ -189,7 +189,6 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, | |||
189 | enum mem_type mtype) | 189 | enum mem_type mtype) |
190 | { | 190 | { |
191 | struct csrow_info *csrow; | 191 | struct csrow_info *csrow; |
192 | struct dimm_info *dimm; | ||
193 | int index; | 192 | int index; |
194 | u8 drbar, dramc; | 193 | u8 drbar, dramc; |
195 | u32 row_base, row_high_limit, row_high_limit_last; | 194 | u32 row_base, row_high_limit, row_high_limit_last; |
@@ -197,17 +196,16 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, | |||
197 | pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); | 196 | pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); |
198 | row_high_limit_last = 0; | 197 | row_high_limit_last = 0; |
199 | for (index = 0; index < mci->nr_csrows; index++) { | 198 | for (index = 0; index < mci->nr_csrows; index++) { |
200 | csrow = mci->csrows[index]; | 199 | csrow = &mci->csrows[index]; |
201 | dimm = csrow->channels[0]->dimm; | ||
202 | |||
203 | pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); | 200 | pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); |
204 | edac_dbg(1, "MC%d: Row=%d DRB = %#0x\n", | 201 | debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", |
205 | mci->mc_idx, index, drbar); | 202 | mci->mc_idx, __FILE__, __func__, index, drbar); |
206 | row_high_limit = ((u32) drbar << 23); | 203 | row_high_limit = ((u32) drbar << 23); |
207 | /* find the DRAM Chip Select Base address and mask */ | 204 | /* find the DRAM Chip Select Base address and mask */ |
208 | edac_dbg(1, "MC%d: Row=%d, Boundary Address=%#0x, Last = %#0x\n", | 205 | debugf1("MC%d: %s: %s() Row=%d, " |
209 | mci->mc_idx, index, row_high_limit, | 206 | "Boundary Address=%#0x, Last = %#0x\n", |
210 | row_high_limit_last); | 207 | mci->mc_idx, __FILE__, __func__, index, row_high_limit, |
208 | row_high_limit_last); | ||
211 | 209 | ||
212 | /* 440GX goes to 2GB, represented with a DRB of 0. */ | 210 | /* 440GX goes to 2GB, represented with a DRB of 0. */ |
213 | if (row_high_limit_last && !row_high_limit) | 211 | if (row_high_limit_last && !row_high_limit) |
@@ -219,14 +217,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, | |||
219 | row_base = row_high_limit_last; | 217 | row_base = row_high_limit_last; |
220 | csrow->first_page = row_base >> PAGE_SHIFT; | 218 | csrow->first_page = row_base >> PAGE_SHIFT; |
221 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | 219 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; |
222 | dimm->nr_pages = csrow->last_page - csrow->first_page + 1; | 220 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; |
223 | /* EAP reports in 4kilobyte granularity [61] */ | 221 | /* EAP reports in 4kilobyte granularity [61] */ |
224 | dimm->grain = 1 << 12; | 222 | csrow->grain = 1 << 12; |
225 | dimm->mtype = mtype; | 223 | csrow->mtype = mtype; |
226 | /* I don't think 440BX can tell you device type? FIXME? */ | 224 | /* I don't think 440BX can tell you device type? FIXME? */ |
227 | dimm->dtype = DEV_UNKNOWN; | 225 | csrow->dtype = DEV_UNKNOWN; |
228 | /* Mode is global to all rows on 440BX */ | 226 | /* Mode is global to all rows on 440BX */ |
229 | dimm->edac_mode = edac_mode; | 227 | csrow->edac_mode = edac_mode; |
230 | row_high_limit_last = row_high_limit; | 228 | row_high_limit_last = row_high_limit; |
231 | } | 229 | } |
232 | } | 230 | } |
@@ -234,13 +232,12 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, | |||
234 | static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) | 232 | static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) |
235 | { | 233 | { |
236 | struct mem_ctl_info *mci; | 234 | struct mem_ctl_info *mci; |
237 | struct edac_mc_layer layers[2]; | ||
238 | u8 dramc; | 235 | u8 dramc; |
239 | u32 nbxcfg, ecc_mode; | 236 | u32 nbxcfg, ecc_mode; |
240 | enum mem_type mtype; | 237 | enum mem_type mtype; |
241 | enum edac_type edac_mode; | 238 | enum edac_type edac_mode; |
242 | 239 | ||
243 | edac_dbg(0, "MC:\n"); | 240 | debugf0("MC: %s: %s()\n", __FILE__, __func__); |
244 | 241 | ||
245 | /* Something is really hosed if PCI config space reads from | 242 | /* Something is really hosed if PCI config space reads from |
246 | * the MC aren't working. | 243 | * the MC aren't working. |
@@ -248,18 +245,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) | |||
248 | if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg)) | 245 | if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg)) |
249 | return -EIO; | 246 | return -EIO; |
250 | 247 | ||
251 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 248 | mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0); |
252 | layers[0].size = I82443BXGX_NR_CSROWS; | 249 | |
253 | layers[0].is_virt_csrow = true; | ||
254 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
255 | layers[1].size = I82443BXGX_NR_CHANS; | ||
256 | layers[1].is_virt_csrow = false; | ||
257 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); | ||
258 | if (mci == NULL) | 250 | if (mci == NULL) |
259 | return -ENOMEM; | 251 | return -ENOMEM; |
260 | 252 | ||
261 | edac_dbg(0, "MC: mci = %p\n", mci); | 253 | debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); |
262 | mci->pdev = &pdev->dev; | 254 | mci->dev = &pdev->dev; |
263 | mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; | 255 | mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; |
264 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 256 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
265 | pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); | 257 | pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); |
@@ -274,7 +266,8 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) | |||
274 | mtype = MEM_RDR; | 266 | mtype = MEM_RDR; |
275 | break; | 267 | break; |
276 | default: | 268 | default: |
277 | edac_dbg(0, "Unknown/reserved DRAM type value in DRAMC register!\n"); | 269 | debugf0("Unknown/reserved DRAM type value " |
270 | "in DRAMC register!\n"); | ||
278 | mtype = -MEM_UNKNOWN; | 271 | mtype = -MEM_UNKNOWN; |
279 | } | 272 | } |
280 | 273 | ||
@@ -303,7 +296,8 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) | |||
303 | edac_mode = EDAC_SECDED; | 296 | edac_mode = EDAC_SECDED; |
304 | break; | 297 | break; |
305 | default: | 298 | default: |
306 | edac_dbg(0, "Unknown/reserved ECC state in NBXCFG register!\n"); | 299 | debugf0("%s(): Unknown/reserved ECC state " |
300 | "in NBXCFG register!\n", __func__); | ||
307 | edac_mode = EDAC_UNKNOWN; | 301 | edac_mode = EDAC_UNKNOWN; |
308 | break; | 302 | break; |
309 | } | 303 | } |
@@ -327,7 +321,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) | |||
327 | mci->ctl_page_to_phys = NULL; | 321 | mci->ctl_page_to_phys = NULL; |
328 | 322 | ||
329 | if (edac_mc_add_mc(mci)) { | 323 | if (edac_mc_add_mc(mci)) { |
330 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 324 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
331 | goto fail; | 325 | goto fail; |
332 | } | 326 | } |
333 | 327 | ||
@@ -342,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) | |||
342 | __func__); | 336 | __func__); |
343 | } | 337 | } |
344 | 338 | ||
345 | edac_dbg(3, "MC: success\n"); | 339 | debugf3("MC: %s: %s(): success\n", __FILE__, __func__); |
346 | return 0; | 340 | return 0; |
347 | 341 | ||
348 | fail: | 342 | fail: |
@@ -353,12 +347,12 @@ fail: | |||
353 | EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1); | 347 | EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1); |
354 | 348 | ||
355 | /* returns count (>= 0), or negative on error */ | 349 | /* returns count (>= 0), or negative on error */ |
356 | static int i82443bxgx_edacmc_init_one(struct pci_dev *pdev, | 350 | static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev, |
357 | const struct pci_device_id *ent) | 351 | const struct pci_device_id *ent) |
358 | { | 352 | { |
359 | int rc; | 353 | int rc; |
360 | 354 | ||
361 | edac_dbg(0, "MC:\n"); | 355 | debugf0("MC: %s: %s()\n", __FILE__, __func__); |
362 | 356 | ||
363 | /* don't need to call pci_enable_device() */ | 357 | /* don't need to call pci_enable_device() */ |
364 | rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); | 358 | rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); |
@@ -369,11 +363,11 @@ static int i82443bxgx_edacmc_init_one(struct pci_dev *pdev, | |||
369 | return rc; | 363 | return rc; |
370 | } | 364 | } |
371 | 365 | ||
372 | static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) | 366 | static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) |
373 | { | 367 | { |
374 | struct mem_ctl_info *mci; | 368 | struct mem_ctl_info *mci; |
375 | 369 | ||
376 | edac_dbg(0, "\n"); | 370 | debugf0("%s: %s()\n", __FILE__, __func__); |
377 | 371 | ||
378 | if (i82443bxgx_pci) | 372 | if (i82443bxgx_pci) |
379 | edac_pci_release_generic_ctl(i82443bxgx_pci); | 373 | edac_pci_release_generic_ctl(i82443bxgx_pci); |
@@ -386,7 +380,7 @@ static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) | |||
386 | 380 | ||
387 | EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); | 381 | EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); |
388 | 382 | ||
389 | static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = { | 383 | static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = { |
390 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, | 384 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, |
391 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, | 385 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, |
392 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, | 386 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, |
@@ -399,7 +393,7 @@ MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl); | |||
399 | static struct pci_driver i82443bxgx_edacmc_driver = { | 393 | static struct pci_driver i82443bxgx_edacmc_driver = { |
400 | .name = EDAC_MOD_STR, | 394 | .name = EDAC_MOD_STR, |
401 | .probe = i82443bxgx_edacmc_init_one, | 395 | .probe = i82443bxgx_edacmc_init_one, |
402 | .remove = i82443bxgx_edacmc_remove_one, | 396 | .remove = __devexit_p(i82443bxgx_edacmc_remove_one), |
403 | .id_table = i82443bxgx_pci_tbl, | 397 | .id_table = i82443bxgx_pci_tbl, |
404 | }; | 398 | }; |
405 | 399 | ||
@@ -425,7 +419,7 @@ static int __init i82443bxgx_edacmc_init(void) | |||
425 | id = &i82443bxgx_pci_tbl[i]; | 419 | id = &i82443bxgx_pci_tbl[i]; |
426 | } | 420 | } |
427 | if (!mci_pdev) { | 421 | if (!mci_pdev) { |
428 | edac_dbg(0, "i82443bxgx pci_get_device fail\n"); | 422 | debugf0("i82443bxgx pci_get_device fail\n"); |
429 | pci_rc = -ENODEV; | 423 | pci_rc = -ENODEV; |
430 | goto fail1; | 424 | goto fail1; |
431 | } | 425 | } |
@@ -433,7 +427,7 @@ static int __init i82443bxgx_edacmc_init(void) | |||
433 | pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl); | 427 | pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl); |
434 | 428 | ||
435 | if (pci_rc < 0) { | 429 | if (pci_rc < 0) { |
436 | edac_dbg(0, "i82443bxgx init fail\n"); | 430 | debugf0("i82443bxgx init fail\n"); |
437 | pci_rc = -ENODEV; | 431 | pci_rc = -ENODEV; |
438 | goto fail1; | 432 | goto fail1; |
439 | } | 433 | } |
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index 3e3e431c830..931a0577504 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c | |||
@@ -67,7 +67,7 @@ static void i82860_get_error_info(struct mem_ctl_info *mci, | |||
67 | { | 67 | { |
68 | struct pci_dev *pdev; | 68 | struct pci_dev *pdev; |
69 | 69 | ||
70 | pdev = to_pci_dev(mci->pdev); | 70 | pdev = to_pci_dev(mci->dev); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * This is a mess because there is no atomic way to read all the | 73 | * This is a mess because there is no atomic way to read all the |
@@ -99,7 +99,6 @@ static int i82860_process_error_info(struct mem_ctl_info *mci, | |||
99 | struct i82860_error_info *info, | 99 | struct i82860_error_info *info, |
100 | int handle_errors) | 100 | int handle_errors) |
101 | { | 101 | { |
102 | struct dimm_info *dimm; | ||
103 | int row; | 102 | int row; |
104 | 103 | ||
105 | if (!(info->errsts2 & 0x0003)) | 104 | if (!(info->errsts2 & 0x0003)) |
@@ -109,25 +108,18 @@ static int i82860_process_error_info(struct mem_ctl_info *mci, | |||
109 | return 1; | 108 | return 1; |
110 | 109 | ||
111 | if ((info->errsts ^ info->errsts2) & 0x0003) { | 110 | if ((info->errsts ^ info->errsts2) & 0x0003) { |
112 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 111 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
113 | -1, -1, -1, "UE overwrote CE", ""); | ||
114 | info->errsts = info->errsts2; | 112 | info->errsts = info->errsts2; |
115 | } | 113 | } |
116 | 114 | ||
117 | info->eap >>= PAGE_SHIFT; | 115 | info->eap >>= PAGE_SHIFT; |
118 | row = edac_mc_find_csrow_by_page(mci, info->eap); | 116 | row = edac_mc_find_csrow_by_page(mci, info->eap); |
119 | dimm = mci->csrows[row]->channels[0]->dimm; | ||
120 | 117 | ||
121 | if (info->errsts & 0x0002) | 118 | if (info->errsts & 0x0002) |
122 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 119 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); |
123 | info->eap, 0, 0, | ||
124 | dimm->location[0], dimm->location[1], -1, | ||
125 | "i82860 UE", ""); | ||
126 | else | 120 | else |
127 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 121 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, |
128 | info->eap, 0, info->derrsyn, | 122 | "i82860 UE"); |
129 | dimm->location[0], dimm->location[1], -1, | ||
130 | "i82860 CE", ""); | ||
131 | 123 | ||
132 | return 1; | 124 | return 1; |
133 | } | 125 | } |
@@ -136,7 +128,7 @@ static void i82860_check(struct mem_ctl_info *mci) | |||
136 | { | 128 | { |
137 | struct i82860_error_info info; | 129 | struct i82860_error_info info; |
138 | 130 | ||
139 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 131 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
140 | i82860_get_error_info(mci, &info); | 132 | i82860_get_error_info(mci, &info); |
141 | i82860_process_error_info(mci, &info, 1); | 133 | i82860_process_error_info(mci, &info, 1); |
142 | } | 134 | } |
@@ -148,7 +140,6 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) | |||
148 | u16 value; | 140 | u16 value; |
149 | u32 cumul_size; | 141 | u32 cumul_size; |
150 | struct csrow_info *csrow; | 142 | struct csrow_info *csrow; |
151 | struct dimm_info *dimm; | ||
152 | int index; | 143 | int index; |
153 | 144 | ||
154 | pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); | 145 | pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); |
@@ -161,56 +152,47 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) | |||
161 | * in all eight rows. | 152 | * in all eight rows. |
162 | */ | 153 | */ |
163 | for (index = 0; index < mci->nr_csrows; index++) { | 154 | for (index = 0; index < mci->nr_csrows; index++) { |
164 | csrow = mci->csrows[index]; | 155 | csrow = &mci->csrows[index]; |
165 | dimm = csrow->channels[0]->dimm; | ||
166 | |||
167 | pci_read_config_word(pdev, I82860_GBA + index * 2, &value); | 156 | pci_read_config_word(pdev, I82860_GBA + index * 2, &value); |
168 | cumul_size = (value & I82860_GBA_MASK) << | 157 | cumul_size = (value & I82860_GBA_MASK) << |
169 | (I82860_GBA_SHIFT - PAGE_SHIFT); | 158 | (I82860_GBA_SHIFT - PAGE_SHIFT); |
170 | edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); | 159 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
160 | cumul_size); | ||
171 | 161 | ||
172 | if (cumul_size == last_cumul_size) | 162 | if (cumul_size == last_cumul_size) |
173 | continue; /* not populated */ | 163 | continue; /* not populated */ |
174 | 164 | ||
175 | csrow->first_page = last_cumul_size; | 165 | csrow->first_page = last_cumul_size; |
176 | csrow->last_page = cumul_size - 1; | 166 | csrow->last_page = cumul_size - 1; |
177 | dimm->nr_pages = cumul_size - last_cumul_size; | 167 | csrow->nr_pages = cumul_size - last_cumul_size; |
178 | last_cumul_size = cumul_size; | 168 | last_cumul_size = cumul_size; |
179 | dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | 169 | csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ |
180 | dimm->mtype = MEM_RMBS; | 170 | csrow->mtype = MEM_RMBS; |
181 | dimm->dtype = DEV_UNKNOWN; | 171 | csrow->dtype = DEV_UNKNOWN; |
182 | dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | 172 | csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; |
183 | } | 173 | } |
184 | } | 174 | } |
185 | 175 | ||
186 | static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | 176 | static int i82860_probe1(struct pci_dev *pdev, int dev_idx) |
187 | { | 177 | { |
188 | struct mem_ctl_info *mci; | 178 | struct mem_ctl_info *mci; |
189 | struct edac_mc_layer layers[2]; | ||
190 | struct i82860_error_info discard; | 179 | struct i82860_error_info discard; |
191 | 180 | ||
192 | /* | 181 | /* RDRAM has channels but these don't map onto the abstractions that |
193 | * RDRAM has channels but these don't map onto the csrow abstraction. | 182 | edac uses. |
194 | * According with the datasheet, there are 2 Rambus channels, supporting | 183 | The device groups from the GRA registers seem to map reasonably |
195 | * up to 16 direct RDRAM devices. | 184 | well onto the notion of a chip select row. |
196 | * The device groups from the GRA registers seem to map reasonably | 185 | There are 16 GRA registers and since the name is associated with |
197 | * well onto the notion of a chip select row. | 186 | the channel and the GRA registers map to physical devices so we are |
198 | * There are 16 GRA registers and since the name is associated with | 187 | going to make 1 channel for group. |
199 | * the channel and the GRA registers map to physical devices so we are | ||
200 | * going to make 1 channel for group. | ||
201 | */ | 188 | */ |
202 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | 189 | mci = edac_mc_alloc(0, 16, 1, 0); |
203 | layers[0].size = 2; | 190 | |
204 | layers[0].is_virt_csrow = true; | ||
205 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
206 | layers[1].size = 8; | ||
207 | layers[1].is_virt_csrow = true; | ||
208 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); | ||
209 | if (!mci) | 191 | if (!mci) |
210 | return -ENOMEM; | 192 | return -ENOMEM; |
211 | 193 | ||
212 | edac_dbg(3, "init mci\n"); | 194 | debugf3("%s(): init mci\n", __func__); |
213 | mci->pdev = &pdev->dev; | 195 | mci->dev = &pdev->dev; |
214 | mci->mtype_cap = MEM_FLAG_DDR; | 196 | mci->mtype_cap = MEM_FLAG_DDR; |
215 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 197 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
216 | /* I"m not sure about this but I think that all RDRAM is SECDED */ | 198 | /* I"m not sure about this but I think that all RDRAM is SECDED */ |
@@ -228,7 +210,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
228 | * type of memory controller. The ID is therefore hardcoded to 0. | 210 | * type of memory controller. The ID is therefore hardcoded to 0. |
229 | */ | 211 | */ |
230 | if (edac_mc_add_mc(mci)) { | 212 | if (edac_mc_add_mc(mci)) { |
231 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 213 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
232 | goto fail; | 214 | goto fail; |
233 | } | 215 | } |
234 | 216 | ||
@@ -244,7 +226,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
244 | } | 226 | } |
245 | 227 | ||
246 | /* get this far and it's successful */ | 228 | /* get this far and it's successful */ |
247 | edac_dbg(3, "success\n"); | 229 | debugf3("%s(): success\n", __func__); |
248 | 230 | ||
249 | return 0; | 231 | return 0; |
250 | 232 | ||
@@ -254,12 +236,12 @@ fail: | |||
254 | } | 236 | } |
255 | 237 | ||
256 | /* returns count (>= 0), or negative on error */ | 238 | /* returns count (>= 0), or negative on error */ |
257 | static int i82860_init_one(struct pci_dev *pdev, | 239 | static int __devinit i82860_init_one(struct pci_dev *pdev, |
258 | const struct pci_device_id *ent) | 240 | const struct pci_device_id *ent) |
259 | { | 241 | { |
260 | int rc; | 242 | int rc; |
261 | 243 | ||
262 | edac_dbg(0, "\n"); | 244 | debugf0("%s()\n", __func__); |
263 | i82860_printk(KERN_INFO, "i82860 init one\n"); | 245 | i82860_printk(KERN_INFO, "i82860 init one\n"); |
264 | 246 | ||
265 | if (pci_enable_device(pdev) < 0) | 247 | if (pci_enable_device(pdev) < 0) |
@@ -273,11 +255,11 @@ static int i82860_init_one(struct pci_dev *pdev, | |||
273 | return rc; | 255 | return rc; |
274 | } | 256 | } |
275 | 257 | ||
276 | static void i82860_remove_one(struct pci_dev *pdev) | 258 | static void __devexit i82860_remove_one(struct pci_dev *pdev) |
277 | { | 259 | { |
278 | struct mem_ctl_info *mci; | 260 | struct mem_ctl_info *mci; |
279 | 261 | ||
280 | edac_dbg(0, "\n"); | 262 | debugf0("%s()\n", __func__); |
281 | 263 | ||
282 | if (i82860_pci) | 264 | if (i82860_pci) |
283 | edac_pci_release_generic_ctl(i82860_pci); | 265 | edac_pci_release_generic_ctl(i82860_pci); |
@@ -288,7 +270,7 @@ static void i82860_remove_one(struct pci_dev *pdev) | |||
288 | edac_mc_free(mci); | 270 | edac_mc_free(mci); |
289 | } | 271 | } |
290 | 272 | ||
291 | static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = { | 273 | static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { |
292 | { | 274 | { |
293 | PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 275 | PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
294 | I82860}, | 276 | I82860}, |
@@ -302,7 +284,7 @@ MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); | |||
302 | static struct pci_driver i82860_driver = { | 284 | static struct pci_driver i82860_driver = { |
303 | .name = EDAC_MOD_STR, | 285 | .name = EDAC_MOD_STR, |
304 | .probe = i82860_init_one, | 286 | .probe = i82860_init_one, |
305 | .remove = i82860_remove_one, | 287 | .remove = __devexit_p(i82860_remove_one), |
306 | .id_table = i82860_pci_tbl, | 288 | .id_table = i82860_pci_tbl, |
307 | }; | 289 | }; |
308 | 290 | ||
@@ -310,7 +292,7 @@ static int __init i82860_init(void) | |||
310 | { | 292 | { |
311 | int pci_rc; | 293 | int pci_rc; |
312 | 294 | ||
313 | edac_dbg(3, "\n"); | 295 | debugf3("%s()\n", __func__); |
314 | 296 | ||
315 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 297 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
316 | opstate_init(); | 298 | opstate_init(); |
@@ -323,7 +305,7 @@ static int __init i82860_init(void) | |||
323 | PCI_DEVICE_ID_INTEL_82860_0, NULL); | 305 | PCI_DEVICE_ID_INTEL_82860_0, NULL); |
324 | 306 | ||
325 | if (mci_pdev == NULL) { | 307 | if (mci_pdev == NULL) { |
326 | edac_dbg(0, "860 pci_get_device fail\n"); | 308 | debugf0("860 pci_get_device fail\n"); |
327 | pci_rc = -ENODEV; | 309 | pci_rc = -ENODEV; |
328 | goto fail1; | 310 | goto fail1; |
329 | } | 311 | } |
@@ -331,7 +313,7 @@ static int __init i82860_init(void) | |||
331 | pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); | 313 | pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); |
332 | 314 | ||
333 | if (pci_rc < 0) { | 315 | if (pci_rc < 0) { |
334 | edac_dbg(0, "860 init fail\n"); | 316 | debugf0("860 init fail\n"); |
335 | pci_rc = -ENODEV; | 317 | pci_rc = -ENODEV; |
336 | goto fail1; | 318 | goto fail1; |
337 | } | 319 | } |
@@ -351,7 +333,7 @@ fail0: | |||
351 | 333 | ||
352 | static void __exit i82860_exit(void) | 334 | static void __exit i82860_exit(void) |
353 | { | 335 | { |
354 | edac_dbg(3, "\n"); | 336 | debugf3("%s()\n", __func__); |
355 | 337 | ||
356 | pci_unregister_driver(&i82860_driver); | 338 | pci_unregister_driver(&i82860_driver); |
357 | 339 | ||
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 2f8535fc451..33864c63c68 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c | |||
@@ -38,8 +38,7 @@ | |||
38 | #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ | 38 | #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ |
39 | 39 | ||
40 | /* four csrows in dual channel, eight in single channel */ | 40 | /* four csrows in dual channel, eight in single channel */ |
41 | #define I82875P_NR_DIMMS 8 | 41 | #define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) |
42 | #define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans)) | ||
43 | 42 | ||
44 | /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ | 43 | /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ |
45 | #define I82875P_EAP 0x58 /* Error Address Pointer (32b) | 44 | #define I82875P_EAP 0x58 /* Error Address Pointer (32b) |
@@ -189,7 +188,7 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci, | |||
189 | { | 188 | { |
190 | struct pci_dev *pdev; | 189 | struct pci_dev *pdev; |
191 | 190 | ||
192 | pdev = to_pci_dev(mci->pdev); | 191 | pdev = to_pci_dev(mci->dev); |
193 | 192 | ||
194 | /* | 193 | /* |
195 | * This is a mess because there is no atomic way to read all the | 194 | * This is a mess because there is no atomic way to read all the |
@@ -227,7 +226,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci, | |||
227 | { | 226 | { |
228 | int row, multi_chan; | 227 | int row, multi_chan; |
229 | 228 | ||
230 | multi_chan = mci->csrows[0]->nr_channels - 1; | 229 | multi_chan = mci->csrows[0].nr_channels - 1; |
231 | 230 | ||
232 | if (!(info->errsts & 0x0081)) | 231 | if (!(info->errsts & 0x0081)) |
233 | return 0; | 232 | return 0; |
@@ -236,9 +235,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci, | |||
236 | return 1; | 235 | return 1; |
237 | 236 | ||
238 | if ((info->errsts ^ info->errsts2) & 0x0081) { | 237 | if ((info->errsts ^ info->errsts2) & 0x0081) { |
239 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 238 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
240 | -1, -1, -1, | ||
241 | "UE overwrote CE", ""); | ||
242 | info->errsts = info->errsts2; | 239 | info->errsts = info->errsts2; |
243 | } | 240 | } |
244 | 241 | ||
@@ -246,15 +243,11 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci, | |||
246 | row = edac_mc_find_csrow_by_page(mci, info->eap); | 243 | row = edac_mc_find_csrow_by_page(mci, info->eap); |
247 | 244 | ||
248 | if (info->errsts & 0x0080) | 245 | if (info->errsts & 0x0080) |
249 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 246 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); |
250 | info->eap, 0, 0, | ||
251 | row, -1, -1, | ||
252 | "i82875p UE", ""); | ||
253 | else | 247 | else |
254 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 248 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, |
255 | info->eap, 0, info->derrsyn, | 249 | multi_chan ? (info->des & 0x1) : 0, |
256 | row, multi_chan ? (info->des & 0x1) : 0, | 250 | "i82875p CE"); |
257 | -1, "i82875p CE", ""); | ||
258 | 251 | ||
259 | return 1; | 252 | return 1; |
260 | } | 253 | } |
@@ -263,7 +256,7 @@ static void i82875p_check(struct mem_ctl_info *mci) | |||
263 | { | 256 | { |
264 | struct i82875p_error_info info; | 257 | struct i82875p_error_info info; |
265 | 258 | ||
266 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 259 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
267 | i82875p_get_error_info(mci, &info); | 260 | i82875p_get_error_info(mci, &info); |
268 | i82875p_process_error_info(mci, &info, 1); | 261 | i82875p_process_error_info(mci, &info, 1); |
269 | } | 262 | } |
@@ -349,13 +342,11 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci, | |||
349 | void __iomem * ovrfl_window, u32 drc) | 342 | void __iomem * ovrfl_window, u32 drc) |
350 | { | 343 | { |
351 | struct csrow_info *csrow; | 344 | struct csrow_info *csrow; |
352 | struct dimm_info *dimm; | ||
353 | unsigned nr_chans = dual_channel_active(drc) + 1; | ||
354 | unsigned long last_cumul_size; | 345 | unsigned long last_cumul_size; |
355 | u8 value; | 346 | u8 value; |
356 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | 347 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ |
357 | u32 cumul_size, nr_pages; | 348 | u32 cumul_size; |
358 | int index, j; | 349 | int index; |
359 | 350 | ||
360 | drc_ddim = (drc >> 18) & 0x1; | 351 | drc_ddim = (drc >> 18) & 0x1; |
361 | last_cumul_size = 0; | 352 | last_cumul_size = 0; |
@@ -367,28 +358,23 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci, | |||
367 | */ | 358 | */ |
368 | 359 | ||
369 | for (index = 0; index < mci->nr_csrows; index++) { | 360 | for (index = 0; index < mci->nr_csrows; index++) { |
370 | csrow = mci->csrows[index]; | 361 | csrow = &mci->csrows[index]; |
371 | 362 | ||
372 | value = readb(ovrfl_window + I82875P_DRB + index); | 363 | value = readb(ovrfl_window + I82875P_DRB + index); |
373 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); | 364 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); |
374 | edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); | 365 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
366 | cumul_size); | ||
375 | if (cumul_size == last_cumul_size) | 367 | if (cumul_size == last_cumul_size) |
376 | continue; /* not populated */ | 368 | continue; /* not populated */ |
377 | 369 | ||
378 | csrow->first_page = last_cumul_size; | 370 | csrow->first_page = last_cumul_size; |
379 | csrow->last_page = cumul_size - 1; | 371 | csrow->last_page = cumul_size - 1; |
380 | nr_pages = cumul_size - last_cumul_size; | 372 | csrow->nr_pages = cumul_size - last_cumul_size; |
381 | last_cumul_size = cumul_size; | 373 | last_cumul_size = cumul_size; |
382 | 374 | csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ | |
383 | for (j = 0; j < nr_chans; j++) { | 375 | csrow->mtype = MEM_DDR; |
384 | dimm = csrow->channels[j]->dimm; | 376 | csrow->dtype = DEV_UNKNOWN; |
385 | 377 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; | |
386 | dimm->nr_pages = nr_pages / nr_chans; | ||
387 | dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ | ||
388 | dimm->mtype = MEM_DDR; | ||
389 | dimm->dtype = DEV_UNKNOWN; | ||
390 | dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; | ||
391 | } | ||
392 | } | 378 | } |
393 | } | 379 | } |
394 | 380 | ||
@@ -396,7 +382,6 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
396 | { | 382 | { |
397 | int rc = -ENODEV; | 383 | int rc = -ENODEV; |
398 | struct mem_ctl_info *mci; | 384 | struct mem_ctl_info *mci; |
399 | struct edac_mc_layer layers[2]; | ||
400 | struct i82875p_pvt *pvt; | 385 | struct i82875p_pvt *pvt; |
401 | struct pci_dev *ovrfl_pdev; | 386 | struct pci_dev *ovrfl_pdev; |
402 | void __iomem *ovrfl_window; | 387 | void __iomem *ovrfl_window; |
@@ -404,7 +389,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
404 | u32 nr_chans; | 389 | u32 nr_chans; |
405 | struct i82875p_error_info discard; | 390 | struct i82875p_error_info discard; |
406 | 391 | ||
407 | edac_dbg(0, "\n"); | 392 | debugf0("%s()\n", __func__); |
408 | 393 | ||
409 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | 394 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); |
410 | 395 | ||
@@ -412,21 +397,19 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
412 | return -ENODEV; | 397 | return -ENODEV; |
413 | drc = readl(ovrfl_window + I82875P_DRC); | 398 | drc = readl(ovrfl_window + I82875P_DRC); |
414 | nr_chans = dual_channel_active(drc) + 1; | 399 | nr_chans = dual_channel_active(drc) + 1; |
400 | mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | ||
401 | nr_chans, 0); | ||
415 | 402 | ||
416 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | ||
417 | layers[0].size = I82875P_NR_CSROWS(nr_chans); | ||
418 | layers[0].is_virt_csrow = true; | ||
419 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
420 | layers[1].size = nr_chans; | ||
421 | layers[1].is_virt_csrow = false; | ||
422 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | ||
423 | if (!mci) { | 403 | if (!mci) { |
424 | rc = -ENOMEM; | 404 | rc = -ENOMEM; |
425 | goto fail0; | 405 | goto fail0; |
426 | } | 406 | } |
427 | 407 | ||
428 | edac_dbg(3, "init mci\n"); | 408 | /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */ |
429 | mci->pdev = &pdev->dev; | 409 | kobject_get(&mci->edac_mci_kobj); |
410 | |||
411 | debugf3("%s(): init mci\n", __func__); | ||
412 | mci->dev = &pdev->dev; | ||
430 | mci->mtype_cap = MEM_FLAG_DDR; | 413 | mci->mtype_cap = MEM_FLAG_DDR; |
431 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 414 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
432 | mci->edac_cap = EDAC_FLAG_UNKNOWN; | 415 | mci->edac_cap = EDAC_FLAG_UNKNOWN; |
@@ -436,7 +419,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
436 | mci->dev_name = pci_name(pdev); | 419 | mci->dev_name = pci_name(pdev); |
437 | mci->edac_check = i82875p_check; | 420 | mci->edac_check = i82875p_check; |
438 | mci->ctl_page_to_phys = NULL; | 421 | mci->ctl_page_to_phys = NULL; |
439 | edac_dbg(3, "init pvt\n"); | 422 | debugf3("%s(): init pvt\n", __func__); |
440 | pvt = (struct i82875p_pvt *)mci->pvt_info; | 423 | pvt = (struct i82875p_pvt *)mci->pvt_info; |
441 | pvt->ovrfl_pdev = ovrfl_pdev; | 424 | pvt->ovrfl_pdev = ovrfl_pdev; |
442 | pvt->ovrfl_window = ovrfl_window; | 425 | pvt->ovrfl_window = ovrfl_window; |
@@ -447,7 +430,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
447 | * type of memory controller. The ID is therefore hardcoded to 0. | 430 | * type of memory controller. The ID is therefore hardcoded to 0. |
448 | */ | 431 | */ |
449 | if (edac_mc_add_mc(mci)) { | 432 | if (edac_mc_add_mc(mci)) { |
450 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 433 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
451 | goto fail1; | 434 | goto fail1; |
452 | } | 435 | } |
453 | 436 | ||
@@ -463,10 +446,11 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
463 | } | 446 | } |
464 | 447 | ||
465 | /* get this far and it's successful */ | 448 | /* get this far and it's successful */ |
466 | edac_dbg(3, "success\n"); | 449 | debugf3("%s(): success\n", __func__); |
467 | return 0; | 450 | return 0; |
468 | 451 | ||
469 | fail1: | 452 | fail1: |
453 | kobject_put(&mci->edac_mci_kobj); | ||
470 | edac_mc_free(mci); | 454 | edac_mc_free(mci); |
471 | 455 | ||
472 | fail0: | 456 | fail0: |
@@ -479,12 +463,12 @@ fail0: | |||
479 | } | 463 | } |
480 | 464 | ||
481 | /* returns count (>= 0), or negative on error */ | 465 | /* returns count (>= 0), or negative on error */ |
482 | static int i82875p_init_one(struct pci_dev *pdev, | 466 | static int __devinit i82875p_init_one(struct pci_dev *pdev, |
483 | const struct pci_device_id *ent) | 467 | const struct pci_device_id *ent) |
484 | { | 468 | { |
485 | int rc; | 469 | int rc; |
486 | 470 | ||
487 | edac_dbg(0, "\n"); | 471 | debugf0("%s()\n", __func__); |
488 | i82875p_printk(KERN_INFO, "i82875p init one\n"); | 472 | i82875p_printk(KERN_INFO, "i82875p init one\n"); |
489 | 473 | ||
490 | if (pci_enable_device(pdev) < 0) | 474 | if (pci_enable_device(pdev) < 0) |
@@ -498,12 +482,12 @@ static int i82875p_init_one(struct pci_dev *pdev, | |||
498 | return rc; | 482 | return rc; |
499 | } | 483 | } |
500 | 484 | ||
501 | static void i82875p_remove_one(struct pci_dev *pdev) | 485 | static void __devexit i82875p_remove_one(struct pci_dev *pdev) |
502 | { | 486 | { |
503 | struct mem_ctl_info *mci; | 487 | struct mem_ctl_info *mci; |
504 | struct i82875p_pvt *pvt = NULL; | 488 | struct i82875p_pvt *pvt = NULL; |
505 | 489 | ||
506 | edac_dbg(0, "\n"); | 490 | debugf0("%s()\n", __func__); |
507 | 491 | ||
508 | if (i82875p_pci) | 492 | if (i82875p_pci) |
509 | edac_pci_release_generic_ctl(i82875p_pci); | 493 | edac_pci_release_generic_ctl(i82875p_pci); |
@@ -527,7 +511,7 @@ static void i82875p_remove_one(struct pci_dev *pdev) | |||
527 | edac_mc_free(mci); | 511 | edac_mc_free(mci); |
528 | } | 512 | } |
529 | 513 | ||
530 | static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = { | 514 | static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { |
531 | { | 515 | { |
532 | PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 516 | PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
533 | I82875P}, | 517 | I82875P}, |
@@ -541,7 +525,7 @@ MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); | |||
541 | static struct pci_driver i82875p_driver = { | 525 | static struct pci_driver i82875p_driver = { |
542 | .name = EDAC_MOD_STR, | 526 | .name = EDAC_MOD_STR, |
543 | .probe = i82875p_init_one, | 527 | .probe = i82875p_init_one, |
544 | .remove = i82875p_remove_one, | 528 | .remove = __devexit_p(i82875p_remove_one), |
545 | .id_table = i82875p_pci_tbl, | 529 | .id_table = i82875p_pci_tbl, |
546 | }; | 530 | }; |
547 | 531 | ||
@@ -549,7 +533,7 @@ static int __init i82875p_init(void) | |||
549 | { | 533 | { |
550 | int pci_rc; | 534 | int pci_rc; |
551 | 535 | ||
552 | edac_dbg(3, "\n"); | 536 | debugf3("%s()\n", __func__); |
553 | 537 | ||
554 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 538 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
555 | opstate_init(); | 539 | opstate_init(); |
@@ -564,7 +548,7 @@ static int __init i82875p_init(void) | |||
564 | PCI_DEVICE_ID_INTEL_82875_0, NULL); | 548 | PCI_DEVICE_ID_INTEL_82875_0, NULL); |
565 | 549 | ||
566 | if (!mci_pdev) { | 550 | if (!mci_pdev) { |
567 | edac_dbg(0, "875p pci_get_device fail\n"); | 551 | debugf0("875p pci_get_device fail\n"); |
568 | pci_rc = -ENODEV; | 552 | pci_rc = -ENODEV; |
569 | goto fail1; | 553 | goto fail1; |
570 | } | 554 | } |
@@ -572,7 +556,7 @@ static int __init i82875p_init(void) | |||
572 | pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); | 556 | pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); |
573 | 557 | ||
574 | if (pci_rc < 0) { | 558 | if (pci_rc < 0) { |
575 | edac_dbg(0, "875p init fail\n"); | 559 | debugf0("875p init fail\n"); |
576 | pci_rc = -ENODEV; | 560 | pci_rc = -ENODEV; |
577 | goto fail1; | 561 | goto fail1; |
578 | } | 562 | } |
@@ -592,7 +576,7 @@ fail0: | |||
592 | 576 | ||
593 | static void __exit i82875p_exit(void) | 577 | static void __exit i82875p_exit(void) |
594 | { | 578 | { |
595 | edac_dbg(3, "\n"); | 579 | debugf3("%s()\n", __func__); |
596 | 580 | ||
597 | i82875p_remove_one(mci_pdev); | 581 | i82875p_remove_one(mci_pdev); |
598 | pci_dev_put(mci_pdev); | 582 | pci_dev_put(mci_pdev); |
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 0c8d4b0eaa3..a5da732fe5b 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c | |||
@@ -29,8 +29,7 @@ | |||
29 | #define PCI_DEVICE_ID_INTEL_82975_0 0x277c | 29 | #define PCI_DEVICE_ID_INTEL_82975_0 0x277c |
30 | #endif /* PCI_DEVICE_ID_INTEL_82975_0 */ | 30 | #endif /* PCI_DEVICE_ID_INTEL_82975_0 */ |
31 | 31 | ||
32 | #define I82975X_NR_DIMMS 8 | 32 | #define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans)) |
33 | #define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans)) | ||
34 | 33 | ||
35 | /* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */ | 34 | /* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */ |
36 | #define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b) | 35 | #define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b) |
@@ -241,7 +240,7 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci, | |||
241 | { | 240 | { |
242 | struct pci_dev *pdev; | 241 | struct pci_dev *pdev; |
243 | 242 | ||
244 | pdev = to_pci_dev(mci->pdev); | 243 | pdev = to_pci_dev(mci->dev); |
245 | 244 | ||
246 | /* | 245 | /* |
247 | * This is a mess because there is no atomic way to read all the | 246 | * This is a mess because there is no atomic way to read all the |
@@ -278,9 +277,11 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci, | |||
278 | static int i82975x_process_error_info(struct mem_ctl_info *mci, | 277 | static int i82975x_process_error_info(struct mem_ctl_info *mci, |
279 | struct i82975x_error_info *info, int handle_errors) | 278 | struct i82975x_error_info *info, int handle_errors) |
280 | { | 279 | { |
281 | int row, chan; | 280 | int row, multi_chan, chan; |
282 | unsigned long offst, page; | 281 | unsigned long offst, page; |
283 | 282 | ||
283 | multi_chan = mci->csrows[0].nr_channels - 1; | ||
284 | |||
284 | if (!(info->errsts2 & 0x0003)) | 285 | if (!(info->errsts2 & 0x0003)) |
285 | return 0; | 286 | return 0; |
286 | 287 | ||
@@ -288,41 +289,25 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, | |||
288 | return 1; | 289 | return 1; |
289 | 290 | ||
290 | if ((info->errsts ^ info->errsts2) & 0x0003) { | 291 | if ((info->errsts ^ info->errsts2) & 0x0003) { |
291 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 292 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
292 | -1, -1, -1, "UE overwrote CE", ""); | ||
293 | info->errsts = info->errsts2; | 293 | info->errsts = info->errsts2; |
294 | } | 294 | } |
295 | 295 | ||
296 | page = (unsigned long) info->eap; | 296 | page = (unsigned long) info->eap; |
297 | page >>= 1; | ||
298 | if (info->xeap & 1) | 297 | if (info->xeap & 1) |
299 | page |= 0x80000000; | 298 | page |= 0x100000000ul; |
300 | page >>= (PAGE_SHIFT - 1); | 299 | chan = page & 1; |
300 | page >>= 1; | ||
301 | offst = page & ((1 << PAGE_SHIFT) - 1); | ||
302 | page >>= PAGE_SHIFT; | ||
301 | row = edac_mc_find_csrow_by_page(mci, page); | 303 | row = edac_mc_find_csrow_by_page(mci, page); |
302 | 304 | ||
303 | if (row == -1) { | ||
304 | i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n" | ||
305 | "\tXEAP=%u\n" | ||
306 | "\t EAP=0x%08x\n" | ||
307 | "\tPAGE=0x%08x\n", | ||
308 | (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page); | ||
309 | return 0; | ||
310 | } | ||
311 | chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1; | ||
312 | offst = info->eap | ||
313 | & ((1 << PAGE_SHIFT) - | ||
314 | (1 << mci->csrows[row]->channels[chan]->dimm->grain)); | ||
315 | |||
316 | if (info->errsts & 0x0002) | 305 | if (info->errsts & 0x0002) |
317 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 306 | edac_mc_handle_ue(mci, page, offst , row, "i82975x UE"); |
318 | page, offst, 0, | ||
319 | row, -1, -1, | ||
320 | "i82975x UE", ""); | ||
321 | else | 307 | else |
322 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 308 | edac_mc_handle_ce(mci, page, offst, info->derrsyn, row, |
323 | page, offst, info->derrsyn, | 309 | multi_chan ? chan : 0, |
324 | row, chan ? chan : 0, -1, | 310 | "i82975x CE"); |
325 | "i82975x CE", ""); | ||
326 | 311 | ||
327 | return 1; | 312 | return 1; |
328 | } | 313 | } |
@@ -331,7 +316,7 @@ static void i82975x_check(struct mem_ctl_info *mci) | |||
331 | { | 316 | { |
332 | struct i82975x_error_info info; | 317 | struct i82975x_error_info info; |
333 | 318 | ||
334 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 319 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
335 | i82975x_get_error_info(mci, &info); | 320 | i82975x_get_error_info(mci, &info); |
336 | i82975x_process_error_info(mci, &info, 1); | 321 | i82975x_process_error_info(mci, &info, 1); |
337 | } | 322 | } |
@@ -370,13 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) | |||
370 | static void i82975x_init_csrows(struct mem_ctl_info *mci, | 355 | static void i82975x_init_csrows(struct mem_ctl_info *mci, |
371 | struct pci_dev *pdev, void __iomem *mch_window) | 356 | struct pci_dev *pdev, void __iomem *mch_window) |
372 | { | 357 | { |
358 | static const char *labels[4] = { | ||
359 | "DIMM A1", "DIMM A2", | ||
360 | "DIMM B1", "DIMM B2" | ||
361 | }; | ||
373 | struct csrow_info *csrow; | 362 | struct csrow_info *csrow; |
374 | unsigned long last_cumul_size; | 363 | unsigned long last_cumul_size; |
375 | u8 value; | 364 | u8 value; |
376 | u32 cumul_size, nr_pages; | 365 | u32 cumul_size; |
377 | int index, chan; | 366 | int index, chan; |
378 | struct dimm_info *dimm; | ||
379 | enum dev_type dtype; | ||
380 | 367 | ||
381 | last_cumul_size = 0; | 368 | last_cumul_size = 0; |
382 | 369 | ||
@@ -390,7 +377,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, | |||
390 | */ | 377 | */ |
391 | 378 | ||
392 | for (index = 0; index < mci->nr_csrows; index++) { | 379 | for (index = 0; index < mci->nr_csrows; index++) { |
393 | csrow = mci->csrows[index]; | 380 | csrow = &mci->csrows[index]; |
394 | 381 | ||
395 | value = readb(mch_window + I82975X_DRB + index + | 382 | value = readb(mch_window + I82975X_DRB + index + |
396 | ((index >= 4) ? 0x80 : 0)); | 383 | ((index >= 4) ? 0x80 : 0)); |
@@ -402,11 +389,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, | |||
402 | */ | 389 | */ |
403 | if (csrow->nr_channels > 1) | 390 | if (csrow->nr_channels > 1) |
404 | cumul_size <<= 1; | 391 | cumul_size <<= 1; |
405 | edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); | 392 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
406 | 393 | cumul_size); | |
407 | nr_pages = cumul_size - last_cumul_size; | ||
408 | if (!nr_pages) | ||
409 | continue; | ||
410 | 394 | ||
411 | /* | 395 | /* |
412 | * Initialise dram labels | 396 | * Initialise dram labels |
@@ -414,24 +398,22 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, | |||
414 | * [0-7] for single-channel; i.e. csrow->nr_channels = 1 | 398 | * [0-7] for single-channel; i.e. csrow->nr_channels = 1 |
415 | * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 | 399 | * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 |
416 | */ | 400 | */ |
417 | dtype = i82975x_dram_type(mch_window, index); | 401 | for (chan = 0; chan < csrow->nr_channels; chan++) |
418 | for (chan = 0; chan < csrow->nr_channels; chan++) { | 402 | strncpy(csrow->channels[chan].label, |
419 | dimm = mci->csrows[index]->channels[chan]->dimm; | 403 | labels[(index >> 1) + (chan * 2)], |
420 | 404 | EDAC_MC_LABEL_LEN); | |
421 | dimm->nr_pages = nr_pages / csrow->nr_channels; | 405 | |
422 | 406 | if (cumul_size == last_cumul_size) | |
423 | snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d", | 407 | continue; /* not populated */ |
424 | (chan == 0) ? 'A' : 'B', | ||
425 | index); | ||
426 | dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ | ||
427 | dimm->dtype = i82975x_dram_type(mch_window, index); | ||
428 | dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */ | ||
429 | dimm->edac_mode = EDAC_SECDED; /* only supported */ | ||
430 | } | ||
431 | 408 | ||
432 | csrow->first_page = last_cumul_size; | 409 | csrow->first_page = last_cumul_size; |
433 | csrow->last_page = cumul_size - 1; | 410 | csrow->last_page = cumul_size - 1; |
411 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
434 | last_cumul_size = cumul_size; | 412 | last_cumul_size = cumul_size; |
413 | csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */ | ||
414 | csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */ | ||
415 | csrow->dtype = i82975x_dram_type(mch_window, index); | ||
416 | csrow->edac_mode = EDAC_SECDED; /* only supported */ | ||
435 | } | 417 | } |
436 | } | 418 | } |
437 | 419 | ||
@@ -473,7 +455,6 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) | |||
473 | { | 455 | { |
474 | int rc = -ENODEV; | 456 | int rc = -ENODEV; |
475 | struct mem_ctl_info *mci; | 457 | struct mem_ctl_info *mci; |
476 | struct edac_mc_layer layers[2]; | ||
477 | struct i82975x_pvt *pvt; | 458 | struct i82975x_pvt *pvt; |
478 | void __iomem *mch_window; | 459 | void __iomem *mch_window; |
479 | u32 mchbar; | 460 | u32 mchbar; |
@@ -485,11 +466,11 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) | |||
485 | u8 c1drb[4]; | 466 | u8 c1drb[4]; |
486 | #endif | 467 | #endif |
487 | 468 | ||
488 | edac_dbg(0, "\n"); | 469 | debugf0("%s()\n", __func__); |
489 | 470 | ||
490 | pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); | 471 | pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); |
491 | if (!(mchbar & 1)) { | 472 | if (!(mchbar & 1)) { |
492 | edac_dbg(3, "failed, MCHBAR disabled!\n"); | 473 | debugf3("%s(): failed, MCHBAR disabled!\n", __func__); |
493 | goto fail0; | 474 | goto fail0; |
494 | } | 475 | } |
495 | mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ | 476 | mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ |
@@ -542,20 +523,15 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) | |||
542 | chans = dual_channel_active(mch_window) + 1; | 523 | chans = dual_channel_active(mch_window) + 1; |
543 | 524 | ||
544 | /* assuming only one controller, index thus is 0 */ | 525 | /* assuming only one controller, index thus is 0 */ |
545 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 526 | mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans), |
546 | layers[0].size = I82975X_NR_DIMMS; | 527 | chans, 0); |
547 | layers[0].is_virt_csrow = true; | ||
548 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
549 | layers[1].size = I82975X_NR_CSROWS(chans); | ||
550 | layers[1].is_virt_csrow = false; | ||
551 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | ||
552 | if (!mci) { | 528 | if (!mci) { |
553 | rc = -ENOMEM; | 529 | rc = -ENOMEM; |
554 | goto fail1; | 530 | goto fail1; |
555 | } | 531 | } |
556 | 532 | ||
557 | edac_dbg(3, "init mci\n"); | 533 | debugf3("%s(): init mci\n", __func__); |
558 | mci->pdev = &pdev->dev; | 534 | mci->dev = &pdev->dev; |
559 | mci->mtype_cap = MEM_FLAG_DDR2; | 535 | mci->mtype_cap = MEM_FLAG_DDR2; |
560 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 536 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
561 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 537 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
@@ -565,7 +541,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) | |||
565 | mci->dev_name = pci_name(pdev); | 541 | mci->dev_name = pci_name(pdev); |
566 | mci->edac_check = i82975x_check; | 542 | mci->edac_check = i82975x_check; |
567 | mci->ctl_page_to_phys = NULL; | 543 | mci->ctl_page_to_phys = NULL; |
568 | edac_dbg(3, "init pvt\n"); | 544 | debugf3("%s(): init pvt\n", __func__); |
569 | pvt = (struct i82975x_pvt *) mci->pvt_info; | 545 | pvt = (struct i82975x_pvt *) mci->pvt_info; |
570 | pvt->mch_window = mch_window; | 546 | pvt->mch_window = mch_window; |
571 | i82975x_init_csrows(mci, pdev, mch_window); | 547 | i82975x_init_csrows(mci, pdev, mch_window); |
@@ -574,12 +550,12 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) | |||
574 | 550 | ||
575 | /* finalize this instance of memory controller with edac core */ | 551 | /* finalize this instance of memory controller with edac core */ |
576 | if (edac_mc_add_mc(mci)) { | 552 | if (edac_mc_add_mc(mci)) { |
577 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 553 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
578 | goto fail2; | 554 | goto fail2; |
579 | } | 555 | } |
580 | 556 | ||
581 | /* get this far and it's successful */ | 557 | /* get this far and it's successful */ |
582 | edac_dbg(3, "success\n"); | 558 | debugf3("%s(): success\n", __func__); |
583 | return 0; | 559 | return 0; |
584 | 560 | ||
585 | fail2: | 561 | fail2: |
@@ -592,12 +568,12 @@ fail0: | |||
592 | } | 568 | } |
593 | 569 | ||
594 | /* returns count (>= 0), or negative on error */ | 570 | /* returns count (>= 0), or negative on error */ |
595 | static int i82975x_init_one(struct pci_dev *pdev, | 571 | static int __devinit i82975x_init_one(struct pci_dev *pdev, |
596 | const struct pci_device_id *ent) | 572 | const struct pci_device_id *ent) |
597 | { | 573 | { |
598 | int rc; | 574 | int rc; |
599 | 575 | ||
600 | edac_dbg(0, "\n"); | 576 | debugf0("%s()\n", __func__); |
601 | 577 | ||
602 | if (pci_enable_device(pdev) < 0) | 578 | if (pci_enable_device(pdev) < 0) |
603 | return -EIO; | 579 | return -EIO; |
@@ -610,12 +586,12 @@ static int i82975x_init_one(struct pci_dev *pdev, | |||
610 | return rc; | 586 | return rc; |
611 | } | 587 | } |
612 | 588 | ||
613 | static void i82975x_remove_one(struct pci_dev *pdev) | 589 | static void __devexit i82975x_remove_one(struct pci_dev *pdev) |
614 | { | 590 | { |
615 | struct mem_ctl_info *mci; | 591 | struct mem_ctl_info *mci; |
616 | struct i82975x_pvt *pvt; | 592 | struct i82975x_pvt *pvt; |
617 | 593 | ||
618 | edac_dbg(0, "\n"); | 594 | debugf0("%s()\n", __func__); |
619 | 595 | ||
620 | mci = edac_mc_del_mc(&pdev->dev); | 596 | mci = edac_mc_del_mc(&pdev->dev); |
621 | if (mci == NULL) | 597 | if (mci == NULL) |
@@ -628,7 +604,7 @@ static void i82975x_remove_one(struct pci_dev *pdev) | |||
628 | edac_mc_free(mci); | 604 | edac_mc_free(mci); |
629 | } | 605 | } |
630 | 606 | ||
631 | static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = { | 607 | static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = { |
632 | { | 608 | { |
633 | PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 609 | PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
634 | I82975X | 610 | I82975X |
@@ -643,7 +619,7 @@ MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl); | |||
643 | static struct pci_driver i82975x_driver = { | 619 | static struct pci_driver i82975x_driver = { |
644 | .name = EDAC_MOD_STR, | 620 | .name = EDAC_MOD_STR, |
645 | .probe = i82975x_init_one, | 621 | .probe = i82975x_init_one, |
646 | .remove = i82975x_remove_one, | 622 | .remove = __devexit_p(i82975x_remove_one), |
647 | .id_table = i82975x_pci_tbl, | 623 | .id_table = i82975x_pci_tbl, |
648 | }; | 624 | }; |
649 | 625 | ||
@@ -651,7 +627,7 @@ static int __init i82975x_init(void) | |||
651 | { | 627 | { |
652 | int pci_rc; | 628 | int pci_rc; |
653 | 629 | ||
654 | edac_dbg(3, "\n"); | 630 | debugf3("%s()\n", __func__); |
655 | 631 | ||
656 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 632 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
657 | opstate_init(); | 633 | opstate_init(); |
@@ -665,7 +641,7 @@ static int __init i82975x_init(void) | |||
665 | PCI_DEVICE_ID_INTEL_82975_0, NULL); | 641 | PCI_DEVICE_ID_INTEL_82975_0, NULL); |
666 | 642 | ||
667 | if (!mci_pdev) { | 643 | if (!mci_pdev) { |
668 | edac_dbg(0, "i82975x pci_get_device fail\n"); | 644 | debugf0("i82975x pci_get_device fail\n"); |
669 | pci_rc = -ENODEV; | 645 | pci_rc = -ENODEV; |
670 | goto fail1; | 646 | goto fail1; |
671 | } | 647 | } |
@@ -673,7 +649,7 @@ static int __init i82975x_init(void) | |||
673 | pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); | 649 | pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); |
674 | 650 | ||
675 | if (pci_rc < 0) { | 651 | if (pci_rc < 0) { |
676 | edac_dbg(0, "i82975x init fail\n"); | 652 | debugf0("i82975x init fail\n"); |
677 | pci_rc = -ENODEV; | 653 | pci_rc = -ENODEV; |
678 | goto fail1; | 654 | goto fail1; |
679 | } | 655 | } |
@@ -693,7 +669,7 @@ fail0: | |||
693 | 669 | ||
694 | static void __exit i82975x_exit(void) | 670 | static void __exit i82975x_exit(void) |
695 | { | 671 | { |
696 | edac_dbg(3, "\n"); | 672 | debugf3("%s()\n", __func__); |
697 | 673 | ||
698 | pci_unregister_driver(&i82975x_driver); | 674 | pci_unregister_driver(&i82975x_driver); |
699 | 675 | ||
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ad637572d8c..795cfbc0bf5 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c | |||
@@ -9,7 +9,7 @@ static u8 xec_mask = 0xf; | |||
9 | static u8 nb_err_cpumask = 0xf; | 9 | static u8 nb_err_cpumask = 0xf; |
10 | 10 | ||
11 | static bool report_gart_errors; | 11 | static bool report_gart_errors; |
12 | static void (*nb_bus_decoder)(int node_id, struct mce *m); | 12 | static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg); |
13 | 13 | ||
14 | void amd_report_gart_errors(bool v) | 14 | void amd_report_gart_errors(bool v) |
15 | { | 15 | { |
@@ -17,13 +17,13 @@ void amd_report_gart_errors(bool v) | |||
17 | } | 17 | } |
18 | EXPORT_SYMBOL_GPL(amd_report_gart_errors); | 18 | EXPORT_SYMBOL_GPL(amd_report_gart_errors); |
19 | 19 | ||
20 | void amd_register_ecc_decoder(void (*f)(int, struct mce *)) | 20 | void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32)) |
21 | { | 21 | { |
22 | nb_bus_decoder = f; | 22 | nb_bus_decoder = f; |
23 | } | 23 | } |
24 | EXPORT_SYMBOL_GPL(amd_register_ecc_decoder); | 24 | EXPORT_SYMBOL_GPL(amd_register_ecc_decoder); |
25 | 25 | ||
26 | void amd_unregister_ecc_decoder(void (*f)(int, struct mce *)) | 26 | void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32)) |
27 | { | 27 | { |
28 | if (nb_bus_decoder) { | 28 | if (nb_bus_decoder) { |
29 | WARN_ON(nb_bus_decoder != f); | 29 | WARN_ON(nb_bus_decoder != f); |
@@ -39,32 +39,43 @@ EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder); | |||
39 | */ | 39 | */ |
40 | 40 | ||
41 | /* transaction type */ | 41 | /* transaction type */ |
42 | const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" }; | 42 | const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" }; |
43 | EXPORT_SYMBOL_GPL(tt_msgs); | 43 | EXPORT_SYMBOL_GPL(tt_msgs); |
44 | 44 | ||
45 | /* cache level */ | 45 | /* cache level */ |
46 | const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" }; | 46 | const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" }; |
47 | EXPORT_SYMBOL_GPL(ll_msgs); | 47 | EXPORT_SYMBOL_GPL(ll_msgs); |
48 | 48 | ||
49 | /* memory transaction type */ | 49 | /* memory transaction type */ |
50 | const char * const rrrr_msgs[] = { | 50 | const char *rrrr_msgs[] = { |
51 | "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP" | 51 | "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP" |
52 | }; | 52 | }; |
53 | EXPORT_SYMBOL_GPL(rrrr_msgs); | 53 | EXPORT_SYMBOL_GPL(rrrr_msgs); |
54 | 54 | ||
55 | /* participating processor */ | 55 | /* participating processor */ |
56 | const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" }; | 56 | const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" }; |
57 | EXPORT_SYMBOL_GPL(pp_msgs); | 57 | EXPORT_SYMBOL_GPL(pp_msgs); |
58 | 58 | ||
59 | /* request timeout */ | 59 | /* request timeout */ |
60 | const char * const to_msgs[] = { "no timeout", "timed out" }; | 60 | const char *to_msgs[] = { "no timeout", "timed out" }; |
61 | EXPORT_SYMBOL_GPL(to_msgs); | 61 | EXPORT_SYMBOL_GPL(to_msgs); |
62 | 62 | ||
63 | /* memory or i/o */ | 63 | /* memory or i/o */ |
64 | const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" }; | 64 | const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" }; |
65 | EXPORT_SYMBOL_GPL(ii_msgs); | 65 | EXPORT_SYMBOL_GPL(ii_msgs); |
66 | 66 | ||
67 | static const char * const f15h_mc1_mce_desc[] = { | 67 | static const char *f10h_nb_mce_desc[] = { |
68 | "HT link data error", | ||
69 | "Protocol error (link, L3, probe filter, etc.)", | ||
70 | "Parity error in NB-internal arrays", | ||
71 | "Link Retry due to IO link transmission error", | ||
72 | "L3 ECC data cache error", | ||
73 | "ECC error in L3 cache tag", | ||
74 | "L3 LRU parity bits error", | ||
75 | "ECC Error in the Probe Filter directory" | ||
76 | }; | ||
77 | |||
78 | static const char * const f15h_ic_mce_desc[] = { | ||
68 | "UC during a demand linefill from L2", | 79 | "UC during a demand linefill from L2", |
69 | "Parity error during data load from IC", | 80 | "Parity error during data load from IC", |
70 | "Parity error for IC valid bit", | 81 | "Parity error for IC valid bit", |
@@ -77,14 +88,14 @@ static const char * const f15h_mc1_mce_desc[] = { | |||
77 | "Parity error for IC probe tag valid bit", | 88 | "Parity error for IC probe tag valid bit", |
78 | "PFB non-cacheable bit parity error", | 89 | "PFB non-cacheable bit parity error", |
79 | "PFB valid bit parity error", /* xec = 0xd */ | 90 | "PFB valid bit parity error", /* xec = 0xd */ |
80 | "Microcode Patch Buffer", /* xec = 010 */ | 91 | "patch RAM", /* xec = 010 */ |
81 | "uop queue", | 92 | "uop queue", |
82 | "insn buffer", | 93 | "insn buffer", |
83 | "predecode buffer", | 94 | "predecode buffer", |
84 | "fetch address FIFO" | 95 | "fetch address FIFO" |
85 | }; | 96 | }; |
86 | 97 | ||
87 | static const char * const f15h_mc2_mce_desc[] = { | 98 | static const char * const f15h_cu_mce_desc[] = { |
88 | "Fill ECC error on data fills", /* xec = 0x4 */ | 99 | "Fill ECC error on data fills", /* xec = 0x4 */ |
89 | "Fill parity error on insn fills", | 100 | "Fill parity error on insn fills", |
90 | "Prefetcher request FIFO parity error", | 101 | "Prefetcher request FIFO parity error", |
@@ -93,7 +104,7 @@ static const char * const f15h_mc2_mce_desc[] = { | |||
93 | "WCC Tag ECC error", | 104 | "WCC Tag ECC error", |
94 | "WCC Data ECC error", | 105 | "WCC Data ECC error", |
95 | "WCB Data parity error", | 106 | "WCB Data parity error", |
96 | "VB Data ECC or parity error", | 107 | "VB Data/ECC error", |
97 | "L2 Tag ECC error", /* xec = 0x10 */ | 108 | "L2 Tag ECC error", /* xec = 0x10 */ |
98 | "Hard L2 Tag ECC error", | 109 | "Hard L2 Tag ECC error", |
99 | "Multiple hits on L2 tag", | 110 | "Multiple hits on L2 tag", |
@@ -101,29 +112,7 @@ static const char * const f15h_mc2_mce_desc[] = { | |||
101 | "PRB address parity error" | 112 | "PRB address parity error" |
102 | }; | 113 | }; |
103 | 114 | ||
104 | static const char * const mc4_mce_desc[] = { | 115 | static const char * const fr_ex_mce_desc[] = { |
105 | "DRAM ECC error detected on the NB", | ||
106 | "CRC error detected on HT link", | ||
107 | "Link-defined sync error packets detected on HT link", | ||
108 | "HT Master abort", | ||
109 | "HT Target abort", | ||
110 | "Invalid GART PTE entry during GART table walk", | ||
111 | "Unsupported atomic RMW received from an IO link", | ||
112 | "Watchdog timeout due to lack of progress", | ||
113 | "DRAM ECC error detected on the NB", | ||
114 | "SVM DMA Exclusion Vector error", | ||
115 | "HT data error detected on link", | ||
116 | "Protocol error (link, L3, probe filter)", | ||
117 | "NB internal arrays parity error", | ||
118 | "DRAM addr/ctl signals parity error", | ||
119 | "IO link transmission error", | ||
120 | "L3 data cache ECC error", /* xec = 0x1c */ | ||
121 | "L3 cache tag error", | ||
122 | "L3 LRU parity bits error", | ||
123 | "ECC Error in the Probe Filter directory" | ||
124 | }; | ||
125 | |||
126 | static const char * const mc5_mce_desc[] = { | ||
127 | "CPU Watchdog timer expire", | 116 | "CPU Watchdog timer expire", |
128 | "Wakeup array dest tag", | 117 | "Wakeup array dest tag", |
129 | "AG payload array", | 118 | "AG payload array", |
@@ -136,10 +125,10 @@ static const char * const mc5_mce_desc[] = { | |||
136 | "Physical register file AG0 port", | 125 | "Physical register file AG0 port", |
137 | "Physical register file AG1 port", | 126 | "Physical register file AG1 port", |
138 | "Flag register file", | 127 | "Flag register file", |
139 | "DE error occurred" | 128 | "DE correctable error could not be corrected" |
140 | }; | 129 | }; |
141 | 130 | ||
142 | static bool f12h_mc0_mce(u16 ec, u8 xec) | 131 | static bool f12h_dc_mce(u16 ec, u8 xec) |
143 | { | 132 | { |
144 | bool ret = false; | 133 | bool ret = false; |
145 | 134 | ||
@@ -157,26 +146,26 @@ static bool f12h_mc0_mce(u16 ec, u8 xec) | |||
157 | return ret; | 146 | return ret; |
158 | } | 147 | } |
159 | 148 | ||
160 | static bool f10h_mc0_mce(u16 ec, u8 xec) | 149 | static bool f10h_dc_mce(u16 ec, u8 xec) |
161 | { | 150 | { |
162 | if (R4(ec) == R4_GEN && LL(ec) == LL_L1) { | 151 | if (R4(ec) == R4_GEN && LL(ec) == LL_L1) { |
163 | pr_cont("during data scrub.\n"); | 152 | pr_cont("during data scrub.\n"); |
164 | return true; | 153 | return true; |
165 | } | 154 | } |
166 | return f12h_mc0_mce(ec, xec); | 155 | return f12h_dc_mce(ec, xec); |
167 | } | 156 | } |
168 | 157 | ||
169 | static bool k8_mc0_mce(u16 ec, u8 xec) | 158 | static bool k8_dc_mce(u16 ec, u8 xec) |
170 | { | 159 | { |
171 | if (BUS_ERROR(ec)) { | 160 | if (BUS_ERROR(ec)) { |
172 | pr_cont("during system linefill.\n"); | 161 | pr_cont("during system linefill.\n"); |
173 | return true; | 162 | return true; |
174 | } | 163 | } |
175 | 164 | ||
176 | return f10h_mc0_mce(ec, xec); | 165 | return f10h_dc_mce(ec, xec); |
177 | } | 166 | } |
178 | 167 | ||
179 | static bool f14h_mc0_mce(u16 ec, u8 xec) | 168 | static bool f14h_dc_mce(u16 ec, u8 xec) |
180 | { | 169 | { |
181 | u8 r4 = R4(ec); | 170 | u8 r4 = R4(ec); |
182 | bool ret = true; | 171 | bool ret = true; |
@@ -228,7 +217,7 @@ static bool f14h_mc0_mce(u16 ec, u8 xec) | |||
228 | return ret; | 217 | return ret; |
229 | } | 218 | } |
230 | 219 | ||
231 | static bool f15h_mc0_mce(u16 ec, u8 xec) | 220 | static bool f15h_dc_mce(u16 ec, u8 xec) |
232 | { | 221 | { |
233 | bool ret = true; | 222 | bool ret = true; |
234 | 223 | ||
@@ -266,21 +255,22 @@ static bool f15h_mc0_mce(u16 ec, u8 xec) | |||
266 | } else if (BUS_ERROR(ec)) { | 255 | } else if (BUS_ERROR(ec)) { |
267 | 256 | ||
268 | if (!xec) | 257 | if (!xec) |
269 | pr_cont("System Read Data Error.\n"); | 258 | pr_cont("during system linefill.\n"); |
270 | else | 259 | else |
271 | pr_cont(" Internal error condition type %d.\n", xec); | 260 | pr_cont(" Internal %s condition.\n", |
261 | ((xec == 1) ? "livelock" : "deadlock")); | ||
272 | } else | 262 | } else |
273 | ret = false; | 263 | ret = false; |
274 | 264 | ||
275 | return ret; | 265 | return ret; |
276 | } | 266 | } |
277 | 267 | ||
278 | static void decode_mc0_mce(struct mce *m) | 268 | static void amd_decode_dc_mce(struct mce *m) |
279 | { | 269 | { |
280 | u16 ec = EC(m->status); | 270 | u16 ec = EC(m->status); |
281 | u8 xec = XEC(m->status, xec_mask); | 271 | u8 xec = XEC(m->status, xec_mask); |
282 | 272 | ||
283 | pr_emerg(HW_ERR "MC0 Error: "); | 273 | pr_emerg(HW_ERR "Data Cache Error: "); |
284 | 274 | ||
285 | /* TLB error signatures are the same across families */ | 275 | /* TLB error signatures are the same across families */ |
286 | if (TLB_ERROR(ec)) { | 276 | if (TLB_ERROR(ec)) { |
@@ -290,13 +280,13 @@ static void decode_mc0_mce(struct mce *m) | |||
290 | : (xec ? "multimatch" : "parity"))); | 280 | : (xec ? "multimatch" : "parity"))); |
291 | return; | 281 | return; |
292 | } | 282 | } |
293 | } else if (fam_ops->mc0_mce(ec, xec)) | 283 | } else if (fam_ops->dc_mce(ec, xec)) |
294 | ; | 284 | ; |
295 | else | 285 | else |
296 | pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n"); | 286 | pr_emerg(HW_ERR "Corrupted DC MCE info?\n"); |
297 | } | 287 | } |
298 | 288 | ||
299 | static bool k8_mc1_mce(u16 ec, u8 xec) | 289 | static bool k8_ic_mce(u16 ec, u8 xec) |
300 | { | 290 | { |
301 | u8 ll = LL(ec); | 291 | u8 ll = LL(ec); |
302 | bool ret = true; | 292 | bool ret = true; |
@@ -330,7 +320,7 @@ static bool k8_mc1_mce(u16 ec, u8 xec) | |||
330 | return ret; | 320 | return ret; |
331 | } | 321 | } |
332 | 322 | ||
333 | static bool f14h_mc1_mce(u16 ec, u8 xec) | 323 | static bool f14h_ic_mce(u16 ec, u8 xec) |
334 | { | 324 | { |
335 | u8 r4 = R4(ec); | 325 | u8 r4 = R4(ec); |
336 | bool ret = true; | 326 | bool ret = true; |
@@ -349,7 +339,7 @@ static bool f14h_mc1_mce(u16 ec, u8 xec) | |||
349 | return ret; | 339 | return ret; |
350 | } | 340 | } |
351 | 341 | ||
352 | static bool f15h_mc1_mce(u16 ec, u8 xec) | 342 | static bool f15h_ic_mce(u16 ec, u8 xec) |
353 | { | 343 | { |
354 | bool ret = true; | 344 | bool ret = true; |
355 | 345 | ||
@@ -358,19 +348,15 @@ static bool f15h_mc1_mce(u16 ec, u8 xec) | |||
358 | 348 | ||
359 | switch (xec) { | 349 | switch (xec) { |
360 | case 0x0 ... 0xa: | 350 | case 0x0 ... 0xa: |
361 | pr_cont("%s.\n", f15h_mc1_mce_desc[xec]); | 351 | pr_cont("%s.\n", f15h_ic_mce_desc[xec]); |
362 | break; | 352 | break; |
363 | 353 | ||
364 | case 0xd: | 354 | case 0xd: |
365 | pr_cont("%s.\n", f15h_mc1_mce_desc[xec-2]); | 355 | pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]); |
366 | break; | ||
367 | |||
368 | case 0x10: | ||
369 | pr_cont("%s.\n", f15h_mc1_mce_desc[xec-4]); | ||
370 | break; | 356 | break; |
371 | 357 | ||
372 | case 0x11 ... 0x14: | 358 | case 0x10 ... 0x14: |
373 | pr_cont("Decoder %s parity error.\n", f15h_mc1_mce_desc[xec-4]); | 359 | pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]); |
374 | break; | 360 | break; |
375 | 361 | ||
376 | default: | 362 | default: |
@@ -379,12 +365,12 @@ static bool f15h_mc1_mce(u16 ec, u8 xec) | |||
379 | return ret; | 365 | return ret; |
380 | } | 366 | } |
381 | 367 | ||
382 | static void decode_mc1_mce(struct mce *m) | 368 | static void amd_decode_ic_mce(struct mce *m) |
383 | { | 369 | { |
384 | u16 ec = EC(m->status); | 370 | u16 ec = EC(m->status); |
385 | u8 xec = XEC(m->status, xec_mask); | 371 | u8 xec = XEC(m->status, xec_mask); |
386 | 372 | ||
387 | pr_emerg(HW_ERR "MC1 Error: "); | 373 | pr_emerg(HW_ERR "Instruction Cache Error: "); |
388 | 374 | ||
389 | if (TLB_ERROR(ec)) | 375 | if (TLB_ERROR(ec)) |
390 | pr_cont("%s TLB %s.\n", LL_MSG(ec), | 376 | pr_cont("%s TLB %s.\n", LL_MSG(ec), |
@@ -393,18 +379,18 @@ static void decode_mc1_mce(struct mce *m) | |||
393 | bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58))); | 379 | bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58))); |
394 | 380 | ||
395 | pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read")); | 381 | pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read")); |
396 | } else if (fam_ops->mc1_mce(ec, xec)) | 382 | } else if (fam_ops->ic_mce(ec, xec)) |
397 | ; | 383 | ; |
398 | else | 384 | else |
399 | pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n"); | 385 | pr_emerg(HW_ERR "Corrupted IC MCE info?\n"); |
400 | } | 386 | } |
401 | 387 | ||
402 | static void decode_mc2_mce(struct mce *m) | 388 | static void amd_decode_bu_mce(struct mce *m) |
403 | { | 389 | { |
404 | u16 ec = EC(m->status); | 390 | u16 ec = EC(m->status); |
405 | u8 xec = XEC(m->status, xec_mask); | 391 | u8 xec = XEC(m->status, xec_mask); |
406 | 392 | ||
407 | pr_emerg(HW_ERR "MC2 Error"); | 393 | pr_emerg(HW_ERR "Bus Unit Error"); |
408 | 394 | ||
409 | if (xec == 0x1) | 395 | if (xec == 0x1) |
410 | pr_cont(" in the write data buffers.\n"); | 396 | pr_cont(" in the write data buffers.\n"); |
@@ -429,24 +415,24 @@ static void decode_mc2_mce(struct mce *m) | |||
429 | pr_cont(": %s parity/ECC error during data " | 415 | pr_cont(": %s parity/ECC error during data " |
430 | "access from L2.\n", R4_MSG(ec)); | 416 | "access from L2.\n", R4_MSG(ec)); |
431 | else | 417 | else |
432 | goto wrong_mc2_mce; | 418 | goto wrong_bu_mce; |
433 | } else | 419 | } else |
434 | goto wrong_mc2_mce; | 420 | goto wrong_bu_mce; |
435 | } else | 421 | } else |
436 | goto wrong_mc2_mce; | 422 | goto wrong_bu_mce; |
437 | 423 | ||
438 | return; | 424 | return; |
439 | 425 | ||
440 | wrong_mc2_mce: | 426 | wrong_bu_mce: |
441 | pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n"); | 427 | pr_emerg(HW_ERR "Corrupted BU MCE info?\n"); |
442 | } | 428 | } |
443 | 429 | ||
444 | static void decode_f15_mc2_mce(struct mce *m) | 430 | static void amd_decode_cu_mce(struct mce *m) |
445 | { | 431 | { |
446 | u16 ec = EC(m->status); | 432 | u16 ec = EC(m->status); |
447 | u8 xec = XEC(m->status, xec_mask); | 433 | u8 xec = XEC(m->status, xec_mask); |
448 | 434 | ||
449 | pr_emerg(HW_ERR "MC2 Error: "); | 435 | pr_emerg(HW_ERR "Combined Unit Error: "); |
450 | 436 | ||
451 | if (TLB_ERROR(ec)) { | 437 | if (TLB_ERROR(ec)) { |
452 | if (xec == 0x0) | 438 | if (xec == 0x0) |
@@ -454,87 +440,114 @@ static void decode_f15_mc2_mce(struct mce *m) | |||
454 | else if (xec == 0x1) | 440 | else if (xec == 0x1) |
455 | pr_cont("Poison data provided for TLB fill.\n"); | 441 | pr_cont("Poison data provided for TLB fill.\n"); |
456 | else | 442 | else |
457 | goto wrong_f15_mc2_mce; | 443 | goto wrong_cu_mce; |
458 | } else if (BUS_ERROR(ec)) { | 444 | } else if (BUS_ERROR(ec)) { |
459 | if (xec > 2) | 445 | if (xec > 2) |
460 | goto wrong_f15_mc2_mce; | 446 | goto wrong_cu_mce; |
461 | 447 | ||
462 | pr_cont("Error during attempted NB data read.\n"); | 448 | pr_cont("Error during attempted NB data read.\n"); |
463 | } else if (MEM_ERROR(ec)) { | 449 | } else if (MEM_ERROR(ec)) { |
464 | switch (xec) { | 450 | switch (xec) { |
465 | case 0x4 ... 0xc: | 451 | case 0x4 ... 0xc: |
466 | pr_cont("%s.\n", f15h_mc2_mce_desc[xec - 0x4]); | 452 | pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]); |
467 | break; | 453 | break; |
468 | 454 | ||
469 | case 0x10 ... 0x14: | 455 | case 0x10 ... 0x14: |
470 | pr_cont("%s.\n", f15h_mc2_mce_desc[xec - 0x7]); | 456 | pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]); |
471 | break; | 457 | break; |
472 | 458 | ||
473 | default: | 459 | default: |
474 | goto wrong_f15_mc2_mce; | 460 | goto wrong_cu_mce; |
475 | } | 461 | } |
476 | } | 462 | } |
477 | 463 | ||
478 | return; | 464 | return; |
479 | 465 | ||
480 | wrong_f15_mc2_mce: | 466 | wrong_cu_mce: |
481 | pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n"); | 467 | pr_emerg(HW_ERR "Corrupted CU MCE info?\n"); |
482 | } | 468 | } |
483 | 469 | ||
484 | static void decode_mc3_mce(struct mce *m) | 470 | static void amd_decode_ls_mce(struct mce *m) |
485 | { | 471 | { |
486 | u16 ec = EC(m->status); | 472 | u16 ec = EC(m->status); |
487 | u8 xec = XEC(m->status, xec_mask); | 473 | u8 xec = XEC(m->status, xec_mask); |
488 | 474 | ||
489 | if (boot_cpu_data.x86 >= 0x14) { | 475 | if (boot_cpu_data.x86 >= 0x14) { |
490 | pr_emerg("You shouldn't be seeing MC3 MCE on this cpu family," | 476 | pr_emerg("You shouldn't be seeing an LS MCE on this cpu family," |
491 | " please report on LKML.\n"); | 477 | " please report on LKML.\n"); |
492 | return; | 478 | return; |
493 | } | 479 | } |
494 | 480 | ||
495 | pr_emerg(HW_ERR "MC3 Error"); | 481 | pr_emerg(HW_ERR "Load Store Error"); |
496 | 482 | ||
497 | if (xec == 0x0) { | 483 | if (xec == 0x0) { |
498 | u8 r4 = R4(ec); | 484 | u8 r4 = R4(ec); |
499 | 485 | ||
500 | if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) | 486 | if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) |
501 | goto wrong_mc3_mce; | 487 | goto wrong_ls_mce; |
502 | 488 | ||
503 | pr_cont(" during %s.\n", R4_MSG(ec)); | 489 | pr_cont(" during %s.\n", R4_MSG(ec)); |
504 | } else | 490 | } else |
505 | goto wrong_mc3_mce; | 491 | goto wrong_ls_mce; |
506 | 492 | ||
507 | return; | 493 | return; |
508 | 494 | ||
509 | wrong_mc3_mce: | 495 | wrong_ls_mce: |
510 | pr_emerg(HW_ERR "Corrupted MC3 MCE info?\n"); | 496 | pr_emerg(HW_ERR "Corrupted LS MCE info?\n"); |
511 | } | 497 | } |
512 | 498 | ||
513 | static void decode_mc4_mce(struct mce *m) | 499 | static bool k8_nb_mce(u16 ec, u8 xec) |
514 | { | 500 | { |
515 | struct cpuinfo_x86 *c = &boot_cpu_data; | 501 | bool ret = true; |
516 | int node_id = amd_get_nb_id(m->extcpu); | ||
517 | u16 ec = EC(m->status); | ||
518 | u8 xec = XEC(m->status, 0x1f); | ||
519 | u8 offset = 0; | ||
520 | |||
521 | pr_emerg(HW_ERR "MC4 Error (node %d): ", node_id); | ||
522 | 502 | ||
523 | switch (xec) { | 503 | switch (xec) { |
524 | case 0x0 ... 0xe: | 504 | case 0x1: |
505 | pr_cont("CRC error detected on HT link.\n"); | ||
506 | break; | ||
525 | 507 | ||
526 | /* special handling for DRAM ECCs */ | 508 | case 0x5: |
527 | if (xec == 0x0 || xec == 0x8) { | 509 | pr_cont("Invalid GART PTE entry during GART table walk.\n"); |
528 | /* no ECCs on F11h */ | 510 | break; |
529 | if (c->x86 == 0x11) | ||
530 | goto wrong_mc4_mce; | ||
531 | 511 | ||
532 | pr_cont("%s.\n", mc4_mce_desc[xec]); | 512 | case 0x6: |
513 | pr_cont("Unsupported atomic RMW received from an IO link.\n"); | ||
514 | break; | ||
533 | 515 | ||
534 | if (nb_bus_decoder) | 516 | case 0x0: |
535 | nb_bus_decoder(node_id, m); | 517 | case 0x8: |
536 | return; | 518 | if (boot_cpu_data.x86 == 0x11) |
537 | } | 519 | return false; |
520 | |||
521 | pr_cont("DRAM ECC error detected on the NB.\n"); | ||
522 | break; | ||
523 | |||
524 | case 0xd: | ||
525 | pr_cont("Parity error on the DRAM addr/ctl signals.\n"); | ||
526 | break; | ||
527 | |||
528 | default: | ||
529 | ret = false; | ||
530 | break; | ||
531 | } | ||
532 | |||
533 | return ret; | ||
534 | } | ||
535 | |||
536 | static bool f10h_nb_mce(u16 ec, u8 xec) | ||
537 | { | ||
538 | bool ret = true; | ||
539 | u8 offset = 0; | ||
540 | |||
541 | if (k8_nb_mce(ec, xec)) | ||
542 | return true; | ||
543 | |||
544 | switch(xec) { | ||
545 | case 0xa ... 0xc: | ||
546 | offset = 10; | ||
547 | break; | ||
548 | |||
549 | case 0xe: | ||
550 | offset = 11; | ||
538 | break; | 551 | break; |
539 | 552 | ||
540 | case 0xf: | 553 | case 0xf: |
@@ -543,59 +556,139 @@ static void decode_mc4_mce(struct mce *m) | |||
543 | else if (BUS_ERROR(ec)) | 556 | else if (BUS_ERROR(ec)) |
544 | pr_cont("DMA Exclusion Vector Table Walk error.\n"); | 557 | pr_cont("DMA Exclusion Vector Table Walk error.\n"); |
545 | else | 558 | else |
546 | goto wrong_mc4_mce; | 559 | ret = false; |
547 | return; | 560 | |
561 | goto out; | ||
562 | break; | ||
548 | 563 | ||
549 | case 0x19: | 564 | case 0x19: |
550 | if (boot_cpu_data.x86 == 0x15) | 565 | if (boot_cpu_data.x86 == 0x15) |
551 | pr_cont("Compute Unit Data Error.\n"); | 566 | pr_cont("Compute Unit Data Error.\n"); |
552 | else | 567 | else |
553 | goto wrong_mc4_mce; | 568 | ret = false; |
554 | return; | 569 | |
570 | goto out; | ||
571 | break; | ||
555 | 572 | ||
556 | case 0x1c ... 0x1f: | 573 | case 0x1c ... 0x1f: |
557 | offset = 13; | 574 | offset = 24; |
558 | break; | 575 | break; |
559 | 576 | ||
560 | default: | 577 | default: |
561 | goto wrong_mc4_mce; | 578 | ret = false; |
579 | |||
580 | goto out; | ||
581 | break; | ||
582 | } | ||
583 | |||
584 | pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]); | ||
585 | |||
586 | out: | ||
587 | return ret; | ||
588 | } | ||
589 | |||
590 | static bool nb_noop_mce(u16 ec, u8 xec) | ||
591 | { | ||
592 | return false; | ||
593 | } | ||
594 | |||
595 | void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) | ||
596 | { | ||
597 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
598 | u16 ec = EC(m->status); | ||
599 | u8 xec = XEC(m->status, 0x1f); | ||
600 | u32 nbsh = (u32)(m->status >> 32); | ||
601 | int core = -1; | ||
602 | |||
603 | pr_emerg(HW_ERR "Northbridge Error (node %d", node_id); | ||
604 | |||
605 | /* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */ | ||
606 | if (c->x86 == 0x10 && c->x86_model > 7) { | ||
607 | if (nbsh & NBSH_ERR_CPU_VAL) | ||
608 | core = nbsh & nb_err_cpumask; | ||
609 | } else { | ||
610 | u8 assoc_cpus = nbsh & nb_err_cpumask; | ||
611 | |||
612 | if (assoc_cpus > 0) | ||
613 | core = fls(assoc_cpus) - 1; | ||
614 | } | ||
615 | |||
616 | if (core >= 0) | ||
617 | pr_cont(", core %d): ", core); | ||
618 | else | ||
619 | pr_cont("): "); | ||
620 | |||
621 | switch (xec) { | ||
622 | case 0x2: | ||
623 | pr_cont("Sync error (sync packets on HT link detected).\n"); | ||
624 | return; | ||
625 | |||
626 | case 0x3: | ||
627 | pr_cont("HT Master abort.\n"); | ||
628 | return; | ||
629 | |||
630 | case 0x4: | ||
631 | pr_cont("HT Target abort.\n"); | ||
632 | return; | ||
633 | |||
634 | case 0x7: | ||
635 | pr_cont("NB Watchdog timeout.\n"); | ||
636 | return; | ||
637 | |||
638 | case 0x9: | ||
639 | pr_cont("SVM DMA Exclusion Vector error.\n"); | ||
640 | return; | ||
641 | |||
642 | default: | ||
643 | break; | ||
562 | } | 644 | } |
563 | 645 | ||
564 | pr_cont("%s.\n", mc4_mce_desc[xec - offset]); | 646 | if (!fam_ops->nb_mce(ec, xec)) |
647 | goto wrong_nb_mce; | ||
648 | |||
649 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15) | ||
650 | if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) | ||
651 | nb_bus_decoder(node_id, m, nbcfg); | ||
652 | |||
565 | return; | 653 | return; |
566 | 654 | ||
567 | wrong_mc4_mce: | 655 | wrong_nb_mce: |
568 | pr_emerg(HW_ERR "Corrupted MC4 MCE info?\n"); | 656 | pr_emerg(HW_ERR "Corrupted NB MCE info?\n"); |
569 | } | 657 | } |
658 | EXPORT_SYMBOL_GPL(amd_decode_nb_mce); | ||
570 | 659 | ||
571 | static void decode_mc5_mce(struct mce *m) | 660 | static void amd_decode_fr_mce(struct mce *m) |
572 | { | 661 | { |
573 | struct cpuinfo_x86 *c = &boot_cpu_data; | 662 | struct cpuinfo_x86 *c = &boot_cpu_data; |
574 | u8 xec = XEC(m->status, xec_mask); | 663 | u8 xec = XEC(m->status, xec_mask); |
575 | 664 | ||
576 | if (c->x86 == 0xf || c->x86 == 0x11) | 665 | if (c->x86 == 0xf || c->x86 == 0x11) |
577 | goto wrong_mc5_mce; | 666 | goto wrong_fr_mce; |
578 | 667 | ||
579 | pr_emerg(HW_ERR "MC5 Error: "); | 668 | if (c->x86 != 0x15 && xec != 0x0) |
669 | goto wrong_fr_mce; | ||
670 | |||
671 | pr_emerg(HW_ERR "%s Error: ", | ||
672 | (c->x86 == 0x15 ? "Execution Unit" : "FIROB")); | ||
580 | 673 | ||
581 | if (xec == 0x0 || xec == 0xc) | 674 | if (xec == 0x0 || xec == 0xc) |
582 | pr_cont("%s.\n", mc5_mce_desc[xec]); | 675 | pr_cont("%s.\n", fr_ex_mce_desc[xec]); |
583 | else if (xec < 0xd) | 676 | else if (xec < 0xd) |
584 | pr_cont("%s parity error.\n", mc5_mce_desc[xec]); | 677 | pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]); |
585 | else | 678 | else |
586 | goto wrong_mc5_mce; | 679 | goto wrong_fr_mce; |
587 | 680 | ||
588 | return; | 681 | return; |
589 | 682 | ||
590 | wrong_mc5_mce: | 683 | wrong_fr_mce: |
591 | pr_emerg(HW_ERR "Corrupted MC5 MCE info?\n"); | 684 | pr_emerg(HW_ERR "Corrupted FR MCE info?\n"); |
592 | } | 685 | } |
593 | 686 | ||
594 | static void decode_mc6_mce(struct mce *m) | 687 | static void amd_decode_fp_mce(struct mce *m) |
595 | { | 688 | { |
596 | u8 xec = XEC(m->status, xec_mask); | 689 | u8 xec = XEC(m->status, xec_mask); |
597 | 690 | ||
598 | pr_emerg(HW_ERR "MC6 Error: "); | 691 | pr_emerg(HW_ERR "Floating Point Unit Error: "); |
599 | 692 | ||
600 | switch (xec) { | 693 | switch (xec) { |
601 | case 0x1: | 694 | case 0x1: |
@@ -619,7 +712,7 @@ static void decode_mc6_mce(struct mce *m) | |||
619 | break; | 712 | break; |
620 | 713 | ||
621 | default: | 714 | default: |
622 | goto wrong_mc6_mce; | 715 | goto wrong_fp_mce; |
623 | break; | 716 | break; |
624 | } | 717 | } |
625 | 718 | ||
@@ -627,8 +720,8 @@ static void decode_mc6_mce(struct mce *m) | |||
627 | 720 | ||
628 | return; | 721 | return; |
629 | 722 | ||
630 | wrong_mc6_mce: | 723 | wrong_fp_mce: |
631 | pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n"); | 724 | pr_emerg(HW_ERR "Corrupted FP MCE info?\n"); |
632 | } | 725 | } |
633 | 726 | ||
634 | static inline void amd_decode_err_code(u16 ec) | 727 | static inline void amd_decode_err_code(u16 ec) |
@@ -667,94 +760,73 @@ static bool amd_filter_mce(struct mce *m) | |||
667 | return false; | 760 | return false; |
668 | } | 761 | } |
669 | 762 | ||
670 | static const char *decode_error_status(struct mce *m) | ||
671 | { | ||
672 | if (m->status & MCI_STATUS_UC) { | ||
673 | if (m->status & MCI_STATUS_PCC) | ||
674 | return "System Fatal error."; | ||
675 | if (m->mcgstatus & MCG_STATUS_RIPV) | ||
676 | return "Uncorrected, software restartable error."; | ||
677 | return "Uncorrected, software containable error."; | ||
678 | } | ||
679 | |||
680 | if (m->status & MCI_STATUS_DEFERRED) | ||
681 | return "Deferred error."; | ||
682 | |||
683 | return "Corrected error, no action required."; | ||
684 | } | ||
685 | |||
686 | int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) | 763 | int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) |
687 | { | 764 | { |
688 | struct mce *m = (struct mce *)data; | 765 | struct mce *m = (struct mce *)data; |
689 | struct cpuinfo_x86 *c = &cpu_data(m->extcpu); | 766 | struct cpuinfo_x86 *c = &boot_cpu_data; |
690 | int ecc; | 767 | int node, ecc; |
691 | 768 | ||
692 | if (amd_filter_mce(m)) | 769 | if (amd_filter_mce(m)) |
693 | return NOTIFY_STOP; | 770 | return NOTIFY_STOP; |
694 | 771 | ||
772 | pr_emerg(HW_ERR "MC%d_STATUS[%s|%s|%s|%s|%s", | ||
773 | m->bank, | ||
774 | ((m->status & MCI_STATUS_OVER) ? "Over" : "-"), | ||
775 | ((m->status & MCI_STATUS_UC) ? "UE" : "CE"), | ||
776 | ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"), | ||
777 | ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"), | ||
778 | ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-")); | ||
779 | |||
780 | if (c->x86 == 0x15) | ||
781 | pr_cont("|%s|%s", | ||
782 | ((m->status & BIT_64(44)) ? "Deferred" : "-"), | ||
783 | ((m->status & BIT_64(43)) ? "Poison" : "-")); | ||
784 | |||
785 | /* do the two bits[14:13] together */ | ||
786 | ecc = (m->status >> 45) & 0x3; | ||
787 | if (ecc) | ||
788 | pr_cont("|%sECC", ((ecc == 2) ? "C" : "U")); | ||
789 | |||
790 | pr_cont("]: 0x%016llx\n", m->status); | ||
791 | |||
792 | |||
695 | switch (m->bank) { | 793 | switch (m->bank) { |
696 | case 0: | 794 | case 0: |
697 | decode_mc0_mce(m); | 795 | amd_decode_dc_mce(m); |
698 | break; | 796 | break; |
699 | 797 | ||
700 | case 1: | 798 | case 1: |
701 | decode_mc1_mce(m); | 799 | amd_decode_ic_mce(m); |
702 | break; | 800 | break; |
703 | 801 | ||
704 | case 2: | 802 | case 2: |
705 | if (c->x86 == 0x15) | 803 | if (c->x86 == 0x15) |
706 | decode_f15_mc2_mce(m); | 804 | amd_decode_cu_mce(m); |
707 | else | 805 | else |
708 | decode_mc2_mce(m); | 806 | amd_decode_bu_mce(m); |
709 | break; | 807 | break; |
710 | 808 | ||
711 | case 3: | 809 | case 3: |
712 | decode_mc3_mce(m); | 810 | amd_decode_ls_mce(m); |
713 | break; | 811 | break; |
714 | 812 | ||
715 | case 4: | 813 | case 4: |
716 | decode_mc4_mce(m); | 814 | node = amd_get_nb_id(m->extcpu); |
815 | amd_decode_nb_mce(node, m, 0); | ||
717 | break; | 816 | break; |
718 | 817 | ||
719 | case 5: | 818 | case 5: |
720 | decode_mc5_mce(m); | 819 | amd_decode_fr_mce(m); |
721 | break; | 820 | break; |
722 | 821 | ||
723 | case 6: | 822 | case 6: |
724 | decode_mc6_mce(m); | 823 | amd_decode_fp_mce(m); |
725 | break; | 824 | break; |
726 | 825 | ||
727 | default: | 826 | default: |
728 | break; | 827 | break; |
729 | } | 828 | } |
730 | 829 | ||
731 | pr_emerg(HW_ERR "Error Status: %s\n", decode_error_status(m)); | ||
732 | |||
733 | pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s", | ||
734 | m->extcpu, | ||
735 | c->x86, c->x86_model, c->x86_mask, | ||
736 | m->bank, | ||
737 | ((m->status & MCI_STATUS_OVER) ? "Over" : "-"), | ||
738 | ((m->status & MCI_STATUS_UC) ? "UE" : "CE"), | ||
739 | ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"), | ||
740 | ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"), | ||
741 | ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-")); | ||
742 | |||
743 | if (c->x86 == 0x15) | ||
744 | pr_cont("|%s|%s", | ||
745 | ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"), | ||
746 | ((m->status & MCI_STATUS_POISON) ? "Poison" : "-")); | ||
747 | |||
748 | /* do the two bits[14:13] together */ | ||
749 | ecc = (m->status >> 45) & 0x3; | ||
750 | if (ecc) | ||
751 | pr_cont("|%sECC", ((ecc == 2) ? "C" : "U")); | ||
752 | |||
753 | pr_cont("]: 0x%016llx\n", m->status); | ||
754 | |||
755 | if (m->status & MCI_STATUS_ADDRV) | ||
756 | pr_emerg(HW_ERR "MC%d_ADDR: 0x%016llx\n", m->bank, m->addr); | ||
757 | |||
758 | amd_decode_err_code(m->status & 0xffff); | 830 | amd_decode_err_code(m->status & 0xffff); |
759 | 831 | ||
760 | return NOTIFY_STOP; | 832 | return NOTIFY_STOP; |
@@ -772,7 +844,9 @@ static int __init mce_amd_init(void) | |||
772 | if (c->x86_vendor != X86_VENDOR_AMD) | 844 | if (c->x86_vendor != X86_VENDOR_AMD) |
773 | return 0; | 845 | return 0; |
774 | 846 | ||
775 | if (c->x86 < 0xf || c->x86 > 0x15) | 847 | if ((c->x86 < 0xf || c->x86 > 0x12) && |
848 | (c->x86 != 0x14 || c->x86_model > 0xf) && | ||
849 | (c->x86 != 0x15 || c->x86_model > 0xf)) | ||
776 | return 0; | 850 | return 0; |
777 | 851 | ||
778 | fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); | 852 | fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); |
@@ -781,46 +855,52 @@ static int __init mce_amd_init(void) | |||
781 | 855 | ||
782 | switch (c->x86) { | 856 | switch (c->x86) { |
783 | case 0xf: | 857 | case 0xf: |
784 | fam_ops->mc0_mce = k8_mc0_mce; | 858 | fam_ops->dc_mce = k8_dc_mce; |
785 | fam_ops->mc1_mce = k8_mc1_mce; | 859 | fam_ops->ic_mce = k8_ic_mce; |
860 | fam_ops->nb_mce = k8_nb_mce; | ||
786 | break; | 861 | break; |
787 | 862 | ||
788 | case 0x10: | 863 | case 0x10: |
789 | fam_ops->mc0_mce = f10h_mc0_mce; | 864 | fam_ops->dc_mce = f10h_dc_mce; |
790 | fam_ops->mc1_mce = k8_mc1_mce; | 865 | fam_ops->ic_mce = k8_ic_mce; |
866 | fam_ops->nb_mce = f10h_nb_mce; | ||
791 | break; | 867 | break; |
792 | 868 | ||
793 | case 0x11: | 869 | case 0x11: |
794 | fam_ops->mc0_mce = k8_mc0_mce; | 870 | fam_ops->dc_mce = k8_dc_mce; |
795 | fam_ops->mc1_mce = k8_mc1_mce; | 871 | fam_ops->ic_mce = k8_ic_mce; |
872 | fam_ops->nb_mce = f10h_nb_mce; | ||
796 | break; | 873 | break; |
797 | 874 | ||
798 | case 0x12: | 875 | case 0x12: |
799 | fam_ops->mc0_mce = f12h_mc0_mce; | 876 | fam_ops->dc_mce = f12h_dc_mce; |
800 | fam_ops->mc1_mce = k8_mc1_mce; | 877 | fam_ops->ic_mce = k8_ic_mce; |
878 | fam_ops->nb_mce = nb_noop_mce; | ||
801 | break; | 879 | break; |
802 | 880 | ||
803 | case 0x14: | 881 | case 0x14: |
804 | nb_err_cpumask = 0x3; | 882 | nb_err_cpumask = 0x3; |
805 | fam_ops->mc0_mce = f14h_mc0_mce; | 883 | fam_ops->dc_mce = f14h_dc_mce; |
806 | fam_ops->mc1_mce = f14h_mc1_mce; | 884 | fam_ops->ic_mce = f14h_ic_mce; |
885 | fam_ops->nb_mce = nb_noop_mce; | ||
807 | break; | 886 | break; |
808 | 887 | ||
809 | case 0x15: | 888 | case 0x15: |
810 | xec_mask = 0x1f; | 889 | xec_mask = 0x1f; |
811 | fam_ops->mc0_mce = f15h_mc0_mce; | 890 | fam_ops->dc_mce = f15h_dc_mce; |
812 | fam_ops->mc1_mce = f15h_mc1_mce; | 891 | fam_ops->ic_mce = f15h_ic_mce; |
892 | fam_ops->nb_mce = f10h_nb_mce; | ||
813 | break; | 893 | break; |
814 | 894 | ||
815 | default: | 895 | default: |
816 | printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86); | 896 | printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86); |
817 | kfree(fam_ops); | 897 | kfree(fam_ops); |
818 | return -EINVAL; | 898 | return -EINVAL; |
819 | } | 899 | } |
820 | 900 | ||
821 | pr_info("MCE: In-kernel MCE decoding enabled.\n"); | 901 | pr_info("MCE: In-kernel MCE decoding enabled.\n"); |
822 | 902 | ||
823 | mce_register_decode_chain(&amd_mce_dec_nb); | 903 | atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); |
824 | 904 | ||
825 | return 0; | 905 | return 0; |
826 | } | 906 | } |
@@ -829,7 +909,7 @@ early_initcall(mce_amd_init); | |||
829 | #ifdef MODULE | 909 | #ifdef MODULE |
830 | static void __exit mce_amd_exit(void) | 910 | static void __exit mce_amd_exit(void) |
831 | { | 911 | { |
832 | mce_unregister_decode_chain(&amd_mce_dec_nb); | 912 | atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); |
833 | kfree(fam_ops); | 913 | kfree(fam_ops); |
834 | } | 914 | } |
835 | 915 | ||
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h index 679679951e2..795a3206acf 100644 --- a/drivers/edac/mce_amd.h +++ b/drivers/edac/mce_amd.h | |||
@@ -5,6 +5,8 @@ | |||
5 | 5 | ||
6 | #include <asm/mce.h> | 6 | #include <asm/mce.h> |
7 | 7 | ||
8 | #define BIT_64(n) (U64_C(1) << (n)) | ||
9 | |||
8 | #define EC(x) ((x) & 0xffff) | 10 | #define EC(x) ((x) & 0xffff) |
9 | #define XEC(x, mask) (((x) >> 16) & mask) | 11 | #define XEC(x, mask) (((x) >> 16) & mask) |
10 | 12 | ||
@@ -29,8 +31,10 @@ | |||
29 | #define R4(x) (((x) >> 4) & 0xf) | 31 | #define R4(x) (((x) >> 4) & 0xf) |
30 | #define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") | 32 | #define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") |
31 | 33 | ||
32 | #define MCI_STATUS_DEFERRED BIT_64(44) | 34 | /* |
33 | #define MCI_STATUS_POISON BIT_64(43) | 35 | * F3x4C bits (MCi_STATUS' high half) |
36 | */ | ||
37 | #define NBSH_ERR_CPU_VAL BIT(24) | ||
34 | 38 | ||
35 | enum tt_ids { | 39 | enum tt_ids { |
36 | TT_INSTR = 0, | 40 | TT_INSTR = 0, |
@@ -65,24 +69,26 @@ enum rrrr_ids { | |||
65 | R4_SNOOP, | 69 | R4_SNOOP, |
66 | }; | 70 | }; |
67 | 71 | ||
68 | extern const char * const tt_msgs[]; | 72 | extern const char *tt_msgs[]; |
69 | extern const char * const ll_msgs[]; | 73 | extern const char *ll_msgs[]; |
70 | extern const char * const rrrr_msgs[]; | 74 | extern const char *rrrr_msgs[]; |
71 | extern const char * const pp_msgs[]; | 75 | extern const char *pp_msgs[]; |
72 | extern const char * const to_msgs[]; | 76 | extern const char *to_msgs[]; |
73 | extern const char * const ii_msgs[]; | 77 | extern const char *ii_msgs[]; |
74 | 78 | ||
75 | /* | 79 | /* |
76 | * per-family decoder ops | 80 | * per-family decoder ops |
77 | */ | 81 | */ |
78 | struct amd_decoder_ops { | 82 | struct amd_decoder_ops { |
79 | bool (*mc0_mce)(u16, u8); | 83 | bool (*dc_mce)(u16, u8); |
80 | bool (*mc1_mce)(u16, u8); | 84 | bool (*ic_mce)(u16, u8); |
85 | bool (*nb_mce)(u16, u8); | ||
81 | }; | 86 | }; |
82 | 87 | ||
83 | void amd_report_gart_errors(bool); | 88 | void amd_report_gart_errors(bool); |
84 | void amd_register_ecc_decoder(void (*f)(int, struct mce *)); | 89 | void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32)); |
85 | void amd_unregister_ecc_decoder(void (*f)(int, struct mce *)); | 90 | void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32)); |
91 | void amd_decode_nb_mce(int, struct mce *, u32); | ||
86 | int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data); | 92 | int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data); |
87 | 93 | ||
88 | #endif /* _EDAC_MCE_AMD_H */ | 94 | #endif /* _EDAC_MCE_AMD_H */ |
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c index 2ae78f20cc2..a4987e03f59 100644 --- a/drivers/edac/mce_amd_inj.c +++ b/drivers/edac/mce_amd_inj.c | |||
@@ -6,14 +6,13 @@ | |||
6 | * This file may be distributed under the terms of the GNU General Public | 6 | * This file may be distributed under the terms of the GNU General Public |
7 | * License version 2. | 7 | * License version 2. |
8 | * | 8 | * |
9 | * Copyright (c) 2010: Borislav Petkov <bp@alien8.de> | 9 | * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com> |
10 | * Advanced Micro Devices Inc. | 10 | * Advanced Micro Devices Inc. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kobject.h> | 13 | #include <linux/kobject.h> |
14 | #include <linux/device.h> | 14 | #include <linux/sysdev.h> |
15 | #include <linux/edac.h> | 15 | #include <linux/edac.h> |
16 | #include <linux/module.h> | ||
17 | #include <asm/mce.h> | 16 | #include <asm/mce.h> |
18 | 17 | ||
19 | #include "mce_amd.h" | 18 | #include "mce_amd.h" |
@@ -116,14 +115,14 @@ static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc, | |||
116 | 115 | ||
117 | static int __init edac_init_mce_inject(void) | 116 | static int __init edac_init_mce_inject(void) |
118 | { | 117 | { |
119 | struct bus_type *edac_subsys = NULL; | 118 | struct sysdev_class *edac_class = NULL; |
120 | int i, err = 0; | 119 | int i, err = 0; |
121 | 120 | ||
122 | edac_subsys = edac_get_sysfs_subsys(); | 121 | edac_class = edac_get_sysfs_class(); |
123 | if (!edac_subsys) | 122 | if (!edac_class) |
124 | return -EINVAL; | 123 | return -EINVAL; |
125 | 124 | ||
126 | mce_kobj = kobject_create_and_add("mce", &edac_subsys->dev_root->kobj); | 125 | mce_kobj = kobject_create_and_add("mce", &edac_class->kset.kobj); |
127 | if (!mce_kobj) { | 126 | if (!mce_kobj) { |
128 | printk(KERN_ERR "Error creating a mce kset.\n"); | 127 | printk(KERN_ERR "Error creating a mce kset.\n"); |
129 | err = -ENOMEM; | 128 | err = -ENOMEM; |
@@ -147,7 +146,7 @@ err_sysfs_create: | |||
147 | kobject_del(mce_kobj); | 146 | kobject_del(mce_kobj); |
148 | 147 | ||
149 | err_mce_kobj: | 148 | err_mce_kobj: |
150 | edac_put_sysfs_subsys(); | 149 | edac_put_sysfs_class(); |
151 | 150 | ||
152 | return err; | 151 | return err; |
153 | } | 152 | } |
@@ -161,13 +160,13 @@ static void __exit edac_exit_mce_inject(void) | |||
161 | 160 | ||
162 | kobject_del(mce_kobj); | 161 | kobject_del(mce_kobj); |
163 | 162 | ||
164 | edac_put_sysfs_subsys(); | 163 | edac_put_sysfs_class(); |
165 | } | 164 | } |
166 | 165 | ||
167 | module_init(edac_init_mce_inject); | 166 | module_init(edac_init_mce_inject); |
168 | module_exit(edac_exit_mce_inject); | 167 | module_exit(edac_exit_mce_inject); |
169 | 168 | ||
170 | MODULE_LICENSE("GPL"); | 169 | MODULE_LICENSE("GPL"); |
171 | MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>"); | 170 | MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>"); |
172 | MODULE_AUTHOR("AMD Inc."); | 171 | MODULE_AUTHOR("AMD Inc."); |
173 | MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); | 172 | MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 42a840d530a..8af8e864a9c 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -49,45 +49,34 @@ static u32 orig_hid1[2]; | |||
49 | 49 | ||
50 | /************************ MC SYSFS parts ***********************************/ | 50 | /************************ MC SYSFS parts ***********************************/ |
51 | 51 | ||
52 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) | 52 | static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci, |
53 | |||
54 | static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev, | ||
55 | struct device_attribute *mattr, | ||
56 | char *data) | 53 | char *data) |
57 | { | 54 | { |
58 | struct mem_ctl_info *mci = to_mci(dev); | ||
59 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 55 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
60 | return sprintf(data, "0x%08x", | 56 | return sprintf(data, "0x%08x", |
61 | in_be32(pdata->mc_vbase + | 57 | in_be32(pdata->mc_vbase + |
62 | MPC85XX_MC_DATA_ERR_INJECT_HI)); | 58 | MPC85XX_MC_DATA_ERR_INJECT_HI)); |
63 | } | 59 | } |
64 | 60 | ||
65 | static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev, | 61 | static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci, |
66 | struct device_attribute *mattr, | ||
67 | char *data) | 62 | char *data) |
68 | { | 63 | { |
69 | struct mem_ctl_info *mci = to_mci(dev); | ||
70 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 64 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
71 | return sprintf(data, "0x%08x", | 65 | return sprintf(data, "0x%08x", |
72 | in_be32(pdata->mc_vbase + | 66 | in_be32(pdata->mc_vbase + |
73 | MPC85XX_MC_DATA_ERR_INJECT_LO)); | 67 | MPC85XX_MC_DATA_ERR_INJECT_LO)); |
74 | } | 68 | } |
75 | 69 | ||
76 | static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev, | 70 | static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data) |
77 | struct device_attribute *mattr, | ||
78 | char *data) | ||
79 | { | 71 | { |
80 | struct mem_ctl_info *mci = to_mci(dev); | ||
81 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 72 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
82 | return sprintf(data, "0x%08x", | 73 | return sprintf(data, "0x%08x", |
83 | in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); | 74 | in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); |
84 | } | 75 | } |
85 | 76 | ||
86 | static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev, | 77 | static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci, |
87 | struct device_attribute *mattr, | ||
88 | const char *data, size_t count) | 78 | const char *data, size_t count) |
89 | { | 79 | { |
90 | struct mem_ctl_info *mci = to_mci(dev); | ||
91 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 80 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
92 | if (isdigit(*data)) { | 81 | if (isdigit(*data)) { |
93 | out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, | 82 | out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, |
@@ -97,11 +86,9 @@ static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev, | |||
97 | return 0; | 86 | return 0; |
98 | } | 87 | } |
99 | 88 | ||
100 | static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev, | 89 | static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci, |
101 | struct device_attribute *mattr, | ||
102 | const char *data, size_t count) | 90 | const char *data, size_t count) |
103 | { | 91 | { |
104 | struct mem_ctl_info *mci = to_mci(dev); | ||
105 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 92 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
106 | if (isdigit(*data)) { | 93 | if (isdigit(*data)) { |
107 | out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, | 94 | out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, |
@@ -111,11 +98,9 @@ static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev, | |||
111 | return 0; | 98 | return 0; |
112 | } | 99 | } |
113 | 100 | ||
114 | static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev, | 101 | static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci, |
115 | struct device_attribute *mattr, | 102 | const char *data, size_t count) |
116 | const char *data, size_t count) | ||
117 | { | 103 | { |
118 | struct mem_ctl_info *mci = to_mci(dev); | ||
119 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 104 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
120 | if (isdigit(*data)) { | 105 | if (isdigit(*data)) { |
121 | out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, | 106 | out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, |
@@ -125,35 +110,38 @@ static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev, | |||
125 | return 0; | 110 | return 0; |
126 | } | 111 | } |
127 | 112 | ||
128 | DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR, | 113 | static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = { |
129 | mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store); | 114 | { |
130 | DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR, | 115 | .attr = { |
131 | mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store); | 116 | .name = "inject_data_hi", |
132 | DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR, | 117 | .mode = (S_IRUGO | S_IWUSR) |
133 | mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store); | 118 | }, |
134 | 119 | .show = mpc85xx_mc_inject_data_hi_show, | |
135 | static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci) | 120 | .store = mpc85xx_mc_inject_data_hi_store}, |
136 | { | 121 | { |
137 | int rc; | 122 | .attr = { |
138 | 123 | .name = "inject_data_lo", | |
139 | rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi); | 124 | .mode = (S_IRUGO | S_IWUSR) |
140 | if (rc < 0) | 125 | }, |
141 | return rc; | 126 | .show = mpc85xx_mc_inject_data_lo_show, |
142 | rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo); | 127 | .store = mpc85xx_mc_inject_data_lo_store}, |
143 | if (rc < 0) | 128 | { |
144 | return rc; | 129 | .attr = { |
145 | rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl); | 130 | .name = "inject_ctrl", |
146 | if (rc < 0) | 131 | .mode = (S_IRUGO | S_IWUSR) |
147 | return rc; | 132 | }, |
133 | .show = mpc85xx_mc_inject_ctrl_show, | ||
134 | .store = mpc85xx_mc_inject_ctrl_store}, | ||
148 | 135 | ||
149 | return 0; | 136 | /* End of list */ |
150 | } | 137 | { |
138 | .attr = {.name = NULL} | ||
139 | } | ||
140 | }; | ||
151 | 141 | ||
152 | static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci) | 142 | static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci) |
153 | { | 143 | { |
154 | device_remove_file(&mci->dev, &dev_attr_inject_data_hi); | 144 | mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes; |
155 | device_remove_file(&mci->dev, &dev_attr_inject_data_lo); | ||
156 | device_remove_file(&mci->dev, &dev_attr_inject_ctrl); | ||
157 | } | 145 | } |
158 | 146 | ||
159 | /**************************** PCI Err device ***************************/ | 147 | /**************************** PCI Err device ***************************/ |
@@ -212,7 +200,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) | |||
212 | return IRQ_HANDLED; | 200 | return IRQ_HANDLED; |
213 | } | 201 | } |
214 | 202 | ||
215 | int mpc85xx_pci_err_probe(struct platform_device *op) | 203 | static int __devinit mpc85xx_pci_err_probe(struct platform_device *op) |
216 | { | 204 | { |
217 | struct edac_pci_ctl_info *pci; | 205 | struct edac_pci_ctl_info *pci; |
218 | struct mpc85xx_pci_pdata *pdata; | 206 | struct mpc85xx_pci_pdata *pdata; |
@@ -226,16 +214,6 @@ int mpc85xx_pci_err_probe(struct platform_device *op) | |||
226 | if (!pci) | 214 | if (!pci) |
227 | return -ENOMEM; | 215 | return -ENOMEM; |
228 | 216 | ||
229 | /* make sure error reporting method is sane */ | ||
230 | switch (edac_op_state) { | ||
231 | case EDAC_OPSTATE_POLL: | ||
232 | case EDAC_OPSTATE_INT: | ||
233 | break; | ||
234 | default: | ||
235 | edac_op_state = EDAC_OPSTATE_INT; | ||
236 | break; | ||
237 | } | ||
238 | |||
239 | pdata = pci->pvt_info; | 217 | pdata = pci->pvt_info; |
240 | pdata->name = "mpc85xx_pci_err"; | 218 | pdata->name = "mpc85xx_pci_err"; |
241 | pdata->irq = NO_IRQ; | 219 | pdata->irq = NO_IRQ; |
@@ -290,7 +268,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op) | |||
290 | out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); | 268 | out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); |
291 | 269 | ||
292 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { | 270 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { |
293 | edac_dbg(3, "failed edac_pci_add_device()\n"); | 271 | debugf3("%s(): failed edac_pci_add_device()\n", __func__); |
294 | goto err; | 272 | goto err; |
295 | } | 273 | } |
296 | 274 | ||
@@ -313,7 +291,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op) | |||
313 | } | 291 | } |
314 | 292 | ||
315 | devres_remove_group(&op->dev, mpc85xx_pci_err_probe); | 293 | devres_remove_group(&op->dev, mpc85xx_pci_err_probe); |
316 | edac_dbg(3, "success\n"); | 294 | debugf3("%s(): success\n", __func__); |
317 | printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); | 295 | printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); |
318 | 296 | ||
319 | return 0; | 297 | return 0; |
@@ -325,14 +303,13 @@ err: | |||
325 | devres_release_group(&op->dev, mpc85xx_pci_err_probe); | 303 | devres_release_group(&op->dev, mpc85xx_pci_err_probe); |
326 | return res; | 304 | return res; |
327 | } | 305 | } |
328 | EXPORT_SYMBOL(mpc85xx_pci_err_probe); | ||
329 | 306 | ||
330 | static int mpc85xx_pci_err_remove(struct platform_device *op) | 307 | static int mpc85xx_pci_err_remove(struct platform_device *op) |
331 | { | 308 | { |
332 | struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); | 309 | struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); |
333 | struct mpc85xx_pci_pdata *pdata = pci->pvt_info; | 310 | struct mpc85xx_pci_pdata *pdata = pci->pvt_info; |
334 | 311 | ||
335 | edac_dbg(0, "\n"); | 312 | debugf0("%s()\n", __func__); |
336 | 313 | ||
337 | out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, | 314 | out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, |
338 | orig_pci_err_cap_dr); | 315 | orig_pci_err_cap_dr); |
@@ -349,6 +326,27 @@ static int mpc85xx_pci_err_remove(struct platform_device *op) | |||
349 | return 0; | 326 | return 0; |
350 | } | 327 | } |
351 | 328 | ||
329 | static struct of_device_id mpc85xx_pci_err_of_match[] = { | ||
330 | { | ||
331 | .compatible = "fsl,mpc8540-pcix", | ||
332 | }, | ||
333 | { | ||
334 | .compatible = "fsl,mpc8540-pci", | ||
335 | }, | ||
336 | {}, | ||
337 | }; | ||
338 | MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match); | ||
339 | |||
340 | static struct platform_driver mpc85xx_pci_err_driver = { | ||
341 | .probe = mpc85xx_pci_err_probe, | ||
342 | .remove = __devexit_p(mpc85xx_pci_err_remove), | ||
343 | .driver = { | ||
344 | .name = "mpc85xx_pci_err", | ||
345 | .owner = THIS_MODULE, | ||
346 | .of_match_table = mpc85xx_pci_err_of_match, | ||
347 | }, | ||
348 | }; | ||
349 | |||
352 | #endif /* CONFIG_PCI */ | 350 | #endif /* CONFIG_PCI */ |
353 | 351 | ||
354 | /**************************** L2 Err device ***************************/ | 352 | /**************************** L2 Err device ***************************/ |
@@ -504,7 +502,7 @@ static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id) | |||
504 | return IRQ_HANDLED; | 502 | return IRQ_HANDLED; |
505 | } | 503 | } |
506 | 504 | ||
507 | static int mpc85xx_l2_err_probe(struct platform_device *op) | 505 | static int __devinit mpc85xx_l2_err_probe(struct platform_device *op) |
508 | { | 506 | { |
509 | struct edac_device_ctl_info *edac_dev; | 507 | struct edac_device_ctl_info *edac_dev; |
510 | struct mpc85xx_l2_pdata *pdata; | 508 | struct mpc85xx_l2_pdata *pdata; |
@@ -572,7 +570,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op) | |||
572 | pdata->edac_idx = edac_dev_idx++; | 570 | pdata->edac_idx = edac_dev_idx++; |
573 | 571 | ||
574 | if (edac_device_add_device(edac_dev) > 0) { | 572 | if (edac_device_add_device(edac_dev) > 0) { |
575 | edac_dbg(3, "failed edac_device_add_device()\n"); | 573 | debugf3("%s(): failed edac_device_add_device()\n", __func__); |
576 | goto err; | 574 | goto err; |
577 | } | 575 | } |
578 | 576 | ||
@@ -600,7 +598,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op) | |||
600 | 598 | ||
601 | devres_remove_group(&op->dev, mpc85xx_l2_err_probe); | 599 | devres_remove_group(&op->dev, mpc85xx_l2_err_probe); |
602 | 600 | ||
603 | edac_dbg(3, "success\n"); | 601 | debugf3("%s(): success\n", __func__); |
604 | printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); | 602 | printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); |
605 | 603 | ||
606 | return 0; | 604 | return 0; |
@@ -618,7 +616,7 @@ static int mpc85xx_l2_err_remove(struct platform_device *op) | |||
618 | struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); | 616 | struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); |
619 | struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; | 617 | struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; |
620 | 618 | ||
621 | edac_dbg(0, "\n"); | 619 | debugf0("%s()\n", __func__); |
622 | 620 | ||
623 | if (edac_op_state == EDAC_OPSTATE_INT) { | 621 | if (edac_op_state == EDAC_OPSTATE_INT) { |
624 | out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); | 622 | out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); |
@@ -815,7 +813,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci) | |||
815 | pfn = err_addr >> PAGE_SHIFT; | 813 | pfn = err_addr >> PAGE_SHIFT; |
816 | 814 | ||
817 | for (row_index = 0; row_index < mci->nr_csrows; row_index++) { | 815 | for (row_index = 0; row_index < mci->nr_csrows; row_index++) { |
818 | csrow = mci->csrows[row_index]; | 816 | csrow = &mci->csrows[row_index]; |
819 | if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) | 817 | if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) |
820 | break; | 818 | break; |
821 | } | 819 | } |
@@ -856,16 +854,12 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci) | |||
856 | mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); | 854 | mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); |
857 | 855 | ||
858 | if (err_detect & DDR_EDE_SBE) | 856 | if (err_detect & DDR_EDE_SBE) |
859 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 857 | edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK, |
860 | pfn, err_addr & ~PAGE_MASK, syndrome, | 858 | syndrome, row_index, 0, mci->ctl_name); |
861 | row_index, 0, -1, | ||
862 | mci->ctl_name, ""); | ||
863 | 859 | ||
864 | if (err_detect & DDR_EDE_MBE) | 860 | if (err_detect & DDR_EDE_MBE) |
865 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 861 | edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK, |
866 | pfn, err_addr & ~PAGE_MASK, syndrome, | 862 | row_index, mci->ctl_name); |
867 | row_index, 0, -1, | ||
868 | mci->ctl_name, ""); | ||
869 | 863 | ||
870 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); | 864 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); |
871 | } | 865 | } |
@@ -885,11 +879,10 @@ static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id) | |||
885 | return IRQ_HANDLED; | 879 | return IRQ_HANDLED; |
886 | } | 880 | } |
887 | 881 | ||
888 | static void mpc85xx_init_csrows(struct mem_ctl_info *mci) | 882 | static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) |
889 | { | 883 | { |
890 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 884 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
891 | struct csrow_info *csrow; | 885 | struct csrow_info *csrow; |
892 | struct dimm_info *dimm; | ||
893 | u32 sdram_ctl; | 886 | u32 sdram_ctl; |
894 | u32 sdtype; | 887 | u32 sdtype; |
895 | enum mem_type mtype; | 888 | enum mem_type mtype; |
@@ -935,9 +928,7 @@ static void mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
935 | u32 start; | 928 | u32 start; |
936 | u32 end; | 929 | u32 end; |
937 | 930 | ||
938 | csrow = mci->csrows[index]; | 931 | csrow = &mci->csrows[index]; |
939 | dimm = csrow->channels[0]->dimm; | ||
940 | |||
941 | cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + | 932 | cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + |
942 | (index * MPC85XX_MC_CS_BNDS_OFS)); | 933 | (index * MPC85XX_MC_CS_BNDS_OFS)); |
943 | 934 | ||
@@ -953,21 +944,19 @@ static void mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
953 | 944 | ||
954 | csrow->first_page = start; | 945 | csrow->first_page = start; |
955 | csrow->last_page = end; | 946 | csrow->last_page = end; |
956 | 947 | csrow->nr_pages = end + 1 - start; | |
957 | dimm->nr_pages = end + 1 - start; | 948 | csrow->grain = 8; |
958 | dimm->grain = 8; | 949 | csrow->mtype = mtype; |
959 | dimm->mtype = mtype; | 950 | csrow->dtype = DEV_UNKNOWN; |
960 | dimm->dtype = DEV_UNKNOWN; | ||
961 | if (sdram_ctl & DSC_X32_EN) | 951 | if (sdram_ctl & DSC_X32_EN) |
962 | dimm->dtype = DEV_X32; | 952 | csrow->dtype = DEV_X32; |
963 | dimm->edac_mode = EDAC_SECDED; | 953 | csrow->edac_mode = EDAC_SECDED; |
964 | } | 954 | } |
965 | } | 955 | } |
966 | 956 | ||
967 | static int mpc85xx_mc_err_probe(struct platform_device *op) | 957 | static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) |
968 | { | 958 | { |
969 | struct mem_ctl_info *mci; | 959 | struct mem_ctl_info *mci; |
970 | struct edac_mc_layer layers[2]; | ||
971 | struct mpc85xx_mc_pdata *pdata; | 960 | struct mpc85xx_mc_pdata *pdata; |
972 | struct resource r; | 961 | struct resource r; |
973 | u32 sdram_ctl; | 962 | u32 sdram_ctl; |
@@ -976,14 +965,7 @@ static int mpc85xx_mc_err_probe(struct platform_device *op) | |||
976 | if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL)) | 965 | if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL)) |
977 | return -ENOMEM; | 966 | return -ENOMEM; |
978 | 967 | ||
979 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 968 | mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx); |
980 | layers[0].size = 4; | ||
981 | layers[0].is_virt_csrow = true; | ||
982 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
983 | layers[1].size = 1; | ||
984 | layers[1].is_virt_csrow = false; | ||
985 | mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, | ||
986 | sizeof(*pdata)); | ||
987 | if (!mci) { | 969 | if (!mci) { |
988 | devres_release_group(&op->dev, mpc85xx_mc_err_probe); | 970 | devres_release_group(&op->dev, mpc85xx_mc_err_probe); |
989 | return -ENOMEM; | 971 | return -ENOMEM; |
@@ -992,9 +974,9 @@ static int mpc85xx_mc_err_probe(struct platform_device *op) | |||
992 | pdata = mci->pvt_info; | 974 | pdata = mci->pvt_info; |
993 | pdata->name = "mpc85xx_mc_err"; | 975 | pdata->name = "mpc85xx_mc_err"; |
994 | pdata->irq = NO_IRQ; | 976 | pdata->irq = NO_IRQ; |
995 | mci->pdev = &op->dev; | 977 | mci->dev = &op->dev; |
996 | pdata->edac_idx = edac_mc_idx++; | 978 | pdata->edac_idx = edac_mc_idx++; |
997 | dev_set_drvdata(mci->pdev, mci); | 979 | dev_set_drvdata(mci->dev, mci); |
998 | mci->ctl_name = pdata->name; | 980 | mci->ctl_name = pdata->name; |
999 | mci->dev_name = pdata->name; | 981 | mci->dev_name = pdata->name; |
1000 | 982 | ||
@@ -1028,7 +1010,7 @@ static int mpc85xx_mc_err_probe(struct platform_device *op) | |||
1028 | goto err; | 1010 | goto err; |
1029 | } | 1011 | } |
1030 | 1012 | ||
1031 | edac_dbg(3, "init mci\n"); | 1013 | debugf3("%s(): init mci\n", __func__); |
1032 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | | 1014 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | |
1033 | MEM_FLAG_DDR | MEM_FLAG_DDR2; | 1015 | MEM_FLAG_DDR | MEM_FLAG_DDR2; |
1034 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 1016 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
@@ -1043,6 +1025,8 @@ static int mpc85xx_mc_err_probe(struct platform_device *op) | |||
1043 | 1025 | ||
1044 | mci->scrub_mode = SCRUB_SW_SRC; | 1026 | mci->scrub_mode = SCRUB_SW_SRC; |
1045 | 1027 | ||
1028 | mpc85xx_set_mc_sysfs_attributes(mci); | ||
1029 | |||
1046 | mpc85xx_init_csrows(mci); | 1030 | mpc85xx_init_csrows(mci); |
1047 | 1031 | ||
1048 | /* store the original error disable bits */ | 1032 | /* store the original error disable bits */ |
@@ -1054,13 +1038,7 @@ static int mpc85xx_mc_err_probe(struct platform_device *op) | |||
1054 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); | 1038 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); |
1055 | 1039 | ||
1056 | if (edac_mc_add_mc(mci)) { | 1040 | if (edac_mc_add_mc(mci)) { |
1057 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 1041 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
1058 | goto err; | ||
1059 | } | ||
1060 | |||
1061 | if (mpc85xx_create_sysfs_attributes(mci)) { | ||
1062 | edac_mc_del_mc(mci->pdev); | ||
1063 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | ||
1064 | goto err; | 1042 | goto err; |
1065 | } | 1043 | } |
1066 | 1044 | ||
@@ -1094,7 +1072,7 @@ static int mpc85xx_mc_err_probe(struct platform_device *op) | |||
1094 | } | 1072 | } |
1095 | 1073 | ||
1096 | devres_remove_group(&op->dev, mpc85xx_mc_err_probe); | 1074 | devres_remove_group(&op->dev, mpc85xx_mc_err_probe); |
1097 | edac_dbg(3, "success\n"); | 1075 | debugf3("%s(): success\n", __func__); |
1098 | printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); | 1076 | printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); |
1099 | 1077 | ||
1100 | return 0; | 1078 | return 0; |
@@ -1112,7 +1090,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op) | |||
1112 | struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); | 1090 | struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); |
1113 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; | 1091 | struct mpc85xx_mc_pdata *pdata = mci->pvt_info; |
1114 | 1092 | ||
1115 | edac_dbg(0, "\n"); | 1093 | debugf0("%s()\n", __func__); |
1116 | 1094 | ||
1117 | if (edac_op_state == EDAC_OPSTATE_INT) { | 1095 | if (edac_op_state == EDAC_OPSTATE_INT) { |
1118 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); | 1096 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); |
@@ -1123,7 +1101,6 @@ static int mpc85xx_mc_err_remove(struct platform_device *op) | |||
1123 | orig_ddr_err_disable); | 1101 | orig_ddr_err_disable); |
1124 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); | 1102 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); |
1125 | 1103 | ||
1126 | mpc85xx_remove_sysfs_attributes(mci); | ||
1127 | edac_mc_del_mc(&op->dev); | 1104 | edac_mc_del_mc(&op->dev); |
1128 | edac_mc_free(mci); | 1105 | edac_mc_free(mci); |
1129 | return 0; | 1106 | return 0; |
@@ -1151,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = { | |||
1151 | { .compatible = "fsl,p1020-memory-controller", }, | 1128 | { .compatible = "fsl,p1020-memory-controller", }, |
1152 | { .compatible = "fsl,p1021-memory-controller", }, | 1129 | { .compatible = "fsl,p1021-memory-controller", }, |
1153 | { .compatible = "fsl,p2020-memory-controller", }, | 1130 | { .compatible = "fsl,p2020-memory-controller", }, |
1154 | { .compatible = "fsl,qoriq-memory-controller", }, | 1131 | { .compatible = "fsl,p4080-memory-controller", }, |
1155 | {}, | 1132 | {}, |
1156 | }; | 1133 | }; |
1157 | MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); | 1134 | MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); |
@@ -1200,6 +1177,12 @@ static int __init mpc85xx_mc_init(void) | |||
1200 | if (res) | 1177 | if (res) |
1201 | printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); | 1178 | printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); |
1202 | 1179 | ||
1180 | #ifdef CONFIG_PCI | ||
1181 | res = platform_driver_register(&mpc85xx_pci_err_driver); | ||
1182 | if (res) | ||
1183 | printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); | ||
1184 | #endif | ||
1185 | |||
1203 | #ifdef CONFIG_FSL_SOC_BOOKE | 1186 | #ifdef CONFIG_FSL_SOC_BOOKE |
1204 | pvr = mfspr(SPRN_PVR); | 1187 | pvr = mfspr(SPRN_PVR); |
1205 | 1188 | ||
@@ -1236,6 +1219,9 @@ static void __exit mpc85xx_mc_exit(void) | |||
1236 | on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); | 1219 | on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); |
1237 | } | 1220 | } |
1238 | #endif | 1221 | #endif |
1222 | #ifdef CONFIG_PCI | ||
1223 | platform_driver_unregister(&mpc85xx_pci_err_driver); | ||
1224 | #endif | ||
1239 | platform_driver_unregister(&mpc85xx_l2_err_driver); | 1225 | platform_driver_unregister(&mpc85xx_l2_err_driver); |
1240 | platform_driver_unregister(&mpc85xx_mc_err_driver); | 1226 | platform_driver_unregister(&mpc85xx_mc_err_driver); |
1241 | } | 1227 | } |
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index 542fad70e36..7e5ff367705 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c | |||
@@ -100,7 +100,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev) | |||
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int mv64x60_pci_err_probe(struct platform_device *pdev) | 103 | static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) |
104 | { | 104 | { |
105 | struct edac_pci_ctl_info *pci; | 105 | struct edac_pci_ctl_info *pci; |
106 | struct mv64x60_pci_pdata *pdata; | 106 | struct mv64x60_pci_pdata *pdata; |
@@ -169,7 +169,7 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev) | |||
169 | MV64X60_PCIx_ERR_MASK_VAL); | 169 | MV64X60_PCIx_ERR_MASK_VAL); |
170 | 170 | ||
171 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { | 171 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { |
172 | edac_dbg(3, "failed edac_pci_add_device()\n"); | 172 | debugf3("%s(): failed edac_pci_add_device()\n", __func__); |
173 | goto err; | 173 | goto err; |
174 | } | 174 | } |
175 | 175 | ||
@@ -194,7 +194,7 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev) | |||
194 | devres_remove_group(&pdev->dev, mv64x60_pci_err_probe); | 194 | devres_remove_group(&pdev->dev, mv64x60_pci_err_probe); |
195 | 195 | ||
196 | /* get this far and it's successful */ | 196 | /* get this far and it's successful */ |
197 | edac_dbg(3, "success\n"); | 197 | debugf3("%s(): success\n", __func__); |
198 | 198 | ||
199 | return 0; | 199 | return 0; |
200 | 200 | ||
@@ -210,7 +210,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev) | |||
210 | { | 210 | { |
211 | struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); | 211 | struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); |
212 | 212 | ||
213 | edac_dbg(0, "\n"); | 213 | debugf0("%s()\n", __func__); |
214 | 214 | ||
215 | edac_pci_del_device(&pdev->dev); | 215 | edac_pci_del_device(&pdev->dev); |
216 | 216 | ||
@@ -221,7 +221,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev) | |||
221 | 221 | ||
222 | static struct platform_driver mv64x60_pci_err_driver = { | 222 | static struct platform_driver mv64x60_pci_err_driver = { |
223 | .probe = mv64x60_pci_err_probe, | 223 | .probe = mv64x60_pci_err_probe, |
224 | .remove = mv64x60_pci_err_remove, | 224 | .remove = __devexit_p(mv64x60_pci_err_remove), |
225 | .driver = { | 225 | .driver = { |
226 | .name = "mv64x60_pci_err", | 226 | .name = "mv64x60_pci_err", |
227 | } | 227 | } |
@@ -271,7 +271,7 @@ static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id) | |||
271 | return IRQ_HANDLED; | 271 | return IRQ_HANDLED; |
272 | } | 272 | } |
273 | 273 | ||
274 | static int mv64x60_sram_err_probe(struct platform_device *pdev) | 274 | static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev) |
275 | { | 275 | { |
276 | struct edac_device_ctl_info *edac_dev; | 276 | struct edac_device_ctl_info *edac_dev; |
277 | struct mv64x60_sram_pdata *pdata; | 277 | struct mv64x60_sram_pdata *pdata; |
@@ -336,7 +336,7 @@ static int mv64x60_sram_err_probe(struct platform_device *pdev) | |||
336 | pdata->edac_idx = edac_dev_idx++; | 336 | pdata->edac_idx = edac_dev_idx++; |
337 | 337 | ||
338 | if (edac_device_add_device(edac_dev) > 0) { | 338 | if (edac_device_add_device(edac_dev) > 0) { |
339 | edac_dbg(3, "failed edac_device_add_device()\n"); | 339 | debugf3("%s(): failed edac_device_add_device()\n", __func__); |
340 | goto err; | 340 | goto err; |
341 | } | 341 | } |
342 | 342 | ||
@@ -363,7 +363,7 @@ static int mv64x60_sram_err_probe(struct platform_device *pdev) | |||
363 | devres_remove_group(&pdev->dev, mv64x60_sram_err_probe); | 363 | devres_remove_group(&pdev->dev, mv64x60_sram_err_probe); |
364 | 364 | ||
365 | /* get this far and it's successful */ | 365 | /* get this far and it's successful */ |
366 | edac_dbg(3, "success\n"); | 366 | debugf3("%s(): success\n", __func__); |
367 | 367 | ||
368 | return 0; | 368 | return 0; |
369 | 369 | ||
@@ -379,7 +379,7 @@ static int mv64x60_sram_err_remove(struct platform_device *pdev) | |||
379 | { | 379 | { |
380 | struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); | 380 | struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); |
381 | 381 | ||
382 | edac_dbg(0, "\n"); | 382 | debugf0("%s()\n", __func__); |
383 | 383 | ||
384 | edac_device_del_device(&pdev->dev); | 384 | edac_device_del_device(&pdev->dev); |
385 | edac_device_free_ctl_info(edac_dev); | 385 | edac_device_free_ctl_info(edac_dev); |
@@ -439,7 +439,7 @@ static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id) | |||
439 | return IRQ_HANDLED; | 439 | return IRQ_HANDLED; |
440 | } | 440 | } |
441 | 441 | ||
442 | static int mv64x60_cpu_err_probe(struct platform_device *pdev) | 442 | static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) |
443 | { | 443 | { |
444 | struct edac_device_ctl_info *edac_dev; | 444 | struct edac_device_ctl_info *edac_dev; |
445 | struct resource *r; | 445 | struct resource *r; |
@@ -531,7 +531,7 @@ static int mv64x60_cpu_err_probe(struct platform_device *pdev) | |||
531 | pdata->edac_idx = edac_dev_idx++; | 531 | pdata->edac_idx = edac_dev_idx++; |
532 | 532 | ||
533 | if (edac_device_add_device(edac_dev) > 0) { | 533 | if (edac_device_add_device(edac_dev) > 0) { |
534 | edac_dbg(3, "failed edac_device_add_device()\n"); | 534 | debugf3("%s(): failed edac_device_add_device()\n", __func__); |
535 | goto err; | 535 | goto err; |
536 | } | 536 | } |
537 | 537 | ||
@@ -558,7 +558,7 @@ static int mv64x60_cpu_err_probe(struct platform_device *pdev) | |||
558 | devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe); | 558 | devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe); |
559 | 559 | ||
560 | /* get this far and it's successful */ | 560 | /* get this far and it's successful */ |
561 | edac_dbg(3, "success\n"); | 561 | debugf3("%s(): success\n", __func__); |
562 | 562 | ||
563 | return 0; | 563 | return 0; |
564 | 564 | ||
@@ -574,7 +574,7 @@ static int mv64x60_cpu_err_remove(struct platform_device *pdev) | |||
574 | { | 574 | { |
575 | struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); | 575 | struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); |
576 | 576 | ||
577 | edac_dbg(0, "\n"); | 577 | debugf0("%s()\n", __func__); |
578 | 578 | ||
579 | edac_device_del_device(&pdev->dev); | 579 | edac_device_del_device(&pdev->dev); |
580 | edac_device_free_ctl_info(edac_dev); | 580 | edac_device_free_ctl_info(edac_dev); |
@@ -611,17 +611,12 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci) | |||
611 | 611 | ||
612 | /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ | 612 | /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ |
613 | if (!(reg & 0x1)) | 613 | if (!(reg & 0x1)) |
614 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 614 | edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT, |
615 | err_addr >> PAGE_SHIFT, | 615 | err_addr & PAGE_MASK, syndrome, 0, 0, |
616 | err_addr & PAGE_MASK, syndrome, | 616 | mci->ctl_name); |
617 | 0, 0, -1, | ||
618 | mci->ctl_name, ""); | ||
619 | else /* 2 bit error, UE */ | 617 | else /* 2 bit error, UE */ |
620 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 618 | edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT, |
621 | err_addr >> PAGE_SHIFT, | 619 | err_addr & PAGE_MASK, 0, mci->ctl_name); |
622 | err_addr & PAGE_MASK, 0, | ||
623 | 0, 0, -1, | ||
624 | mci->ctl_name, ""); | ||
625 | 620 | ||
626 | /* clear the error */ | 621 | /* clear the error */ |
627 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); | 622 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); |
@@ -661,8 +656,6 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci, | |||
661 | struct mv64x60_mc_pdata *pdata) | 656 | struct mv64x60_mc_pdata *pdata) |
662 | { | 657 | { |
663 | struct csrow_info *csrow; | 658 | struct csrow_info *csrow; |
664 | struct dimm_info *dimm; | ||
665 | |||
666 | u32 devtype; | 659 | u32 devtype; |
667 | u32 ctl; | 660 | u32 ctl; |
668 | 661 | ||
@@ -670,37 +663,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci, | |||
670 | 663 | ||
671 | ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); | 664 | ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); |
672 | 665 | ||
673 | csrow = mci->csrows[0]; | 666 | csrow = &mci->csrows[0]; |
674 | dimm = csrow->channels[0]->dimm; | 667 | csrow->first_page = 0; |
675 | 668 | csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT; | |
676 | dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT; | 669 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
677 | dimm->grain = 8; | 670 | csrow->grain = 8; |
678 | 671 | ||
679 | dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR; | 672 | csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR; |
680 | 673 | ||
681 | devtype = (ctl >> 20) & 0x3; | 674 | devtype = (ctl >> 20) & 0x3; |
682 | switch (devtype) { | 675 | switch (devtype) { |
683 | case 0x0: | 676 | case 0x0: |
684 | dimm->dtype = DEV_X32; | 677 | csrow->dtype = DEV_X32; |
685 | break; | 678 | break; |
686 | case 0x2: /* could be X8 too, but no way to tell */ | 679 | case 0x2: /* could be X8 too, but no way to tell */ |
687 | dimm->dtype = DEV_X16; | 680 | csrow->dtype = DEV_X16; |
688 | break; | 681 | break; |
689 | case 0x3: | 682 | case 0x3: |
690 | dimm->dtype = DEV_X4; | 683 | csrow->dtype = DEV_X4; |
691 | break; | 684 | break; |
692 | default: | 685 | default: |
693 | dimm->dtype = DEV_UNKNOWN; | 686 | csrow->dtype = DEV_UNKNOWN; |
694 | break; | 687 | break; |
695 | } | 688 | } |
696 | 689 | ||
697 | dimm->edac_mode = EDAC_SECDED; | 690 | csrow->edac_mode = EDAC_SECDED; |
698 | } | 691 | } |
699 | 692 | ||
700 | static int mv64x60_mc_err_probe(struct platform_device *pdev) | 693 | static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) |
701 | { | 694 | { |
702 | struct mem_ctl_info *mci; | 695 | struct mem_ctl_info *mci; |
703 | struct edac_mc_layer layers[2]; | ||
704 | struct mv64x60_mc_pdata *pdata; | 696 | struct mv64x60_mc_pdata *pdata; |
705 | struct resource *r; | 697 | struct resource *r; |
706 | u32 ctl; | 698 | u32 ctl; |
@@ -709,14 +701,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
709 | if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL)) | 701 | if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL)) |
710 | return -ENOMEM; | 702 | return -ENOMEM; |
711 | 703 | ||
712 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 704 | mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx); |
713 | layers[0].size = 1; | ||
714 | layers[0].is_virt_csrow = true; | ||
715 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
716 | layers[1].size = 1; | ||
717 | layers[1].is_virt_csrow = false; | ||
718 | mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, | ||
719 | sizeof(struct mv64x60_mc_pdata)); | ||
720 | if (!mci) { | 705 | if (!mci) { |
721 | printk(KERN_ERR "%s: No memory for CPU err\n", __func__); | 706 | printk(KERN_ERR "%s: No memory for CPU err\n", __func__); |
722 | devres_release_group(&pdev->dev, mv64x60_mc_err_probe); | 707 | devres_release_group(&pdev->dev, mv64x60_mc_err_probe); |
@@ -724,7 +709,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
724 | } | 709 | } |
725 | 710 | ||
726 | pdata = mci->pvt_info; | 711 | pdata = mci->pvt_info; |
727 | mci->pdev = &pdev->dev; | 712 | mci->dev = &pdev->dev; |
728 | platform_set_drvdata(pdev, mci); | 713 | platform_set_drvdata(pdev, mci); |
729 | pdata->name = "mv64x60_mc_err"; | 714 | pdata->name = "mv64x60_mc_err"; |
730 | pdata->irq = NO_IRQ; | 715 | pdata->irq = NO_IRQ; |
@@ -766,7 +751,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
766 | goto err2; | 751 | goto err2; |
767 | } | 752 | } |
768 | 753 | ||
769 | edac_dbg(3, "init mci\n"); | 754 | debugf3("%s(): init mci\n", __func__); |
770 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; | 755 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; |
771 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 756 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
772 | mci->edac_cap = EDAC_FLAG_SECDED; | 757 | mci->edac_cap = EDAC_FLAG_SECDED; |
@@ -790,7 +775,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
790 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); | 775 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); |
791 | 776 | ||
792 | if (edac_mc_add_mc(mci)) { | 777 | if (edac_mc_add_mc(mci)) { |
793 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 778 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
794 | goto err; | 779 | goto err; |
795 | } | 780 | } |
796 | 781 | ||
@@ -815,7 +800,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
815 | } | 800 | } |
816 | 801 | ||
817 | /* get this far and it's successful */ | 802 | /* get this far and it's successful */ |
818 | edac_dbg(3, "success\n"); | 803 | debugf3("%s(): success\n", __func__); |
819 | 804 | ||
820 | return 0; | 805 | return 0; |
821 | 806 | ||
@@ -831,7 +816,7 @@ static int mv64x60_mc_err_remove(struct platform_device *pdev) | |||
831 | { | 816 | { |
832 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); | 817 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); |
833 | 818 | ||
834 | edac_dbg(0, "\n"); | 819 | debugf0("%s()\n", __func__); |
835 | 820 | ||
836 | edac_mc_del_mc(&pdev->dev); | 821 | edac_mc_del_mc(&pdev->dev); |
837 | edac_mc_free(mci); | 822 | edac_mc_free(mci); |
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c deleted file mode 100644 index 7e98084d364..00000000000 --- a/drivers/edac/octeon_edac-l2c.c +++ /dev/null | |||
@@ -1,208 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 Cavium, Inc. | ||
7 | * | ||
8 | * Copyright (C) 2009 Wind River Systems, | ||
9 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/edac.h> | ||
16 | |||
17 | #include <asm/octeon/cvmx.h> | ||
18 | |||
19 | #include "edac_core.h" | ||
20 | #include "edac_module.h" | ||
21 | |||
22 | #define EDAC_MOD_STR "octeon-l2c" | ||
23 | |||
24 | static void octeon_l2c_poll_oct1(struct edac_device_ctl_info *l2c) | ||
25 | { | ||
26 | union cvmx_l2t_err l2t_err, l2t_err_reset; | ||
27 | union cvmx_l2d_err l2d_err, l2d_err_reset; | ||
28 | |||
29 | l2t_err_reset.u64 = 0; | ||
30 | l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); | ||
31 | if (l2t_err.s.sec_err) { | ||
32 | edac_device_handle_ce(l2c, 0, 0, | ||
33 | "Tag Single bit error (corrected)"); | ||
34 | l2t_err_reset.s.sec_err = 1; | ||
35 | } | ||
36 | if (l2t_err.s.ded_err) { | ||
37 | edac_device_handle_ue(l2c, 0, 0, | ||
38 | "Tag Double bit error (detected)"); | ||
39 | l2t_err_reset.s.ded_err = 1; | ||
40 | } | ||
41 | if (l2t_err_reset.u64) | ||
42 | cvmx_write_csr(CVMX_L2T_ERR, l2t_err_reset.u64); | ||
43 | |||
44 | l2d_err_reset.u64 = 0; | ||
45 | l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR); | ||
46 | if (l2d_err.s.sec_err) { | ||
47 | edac_device_handle_ce(l2c, 0, 1, | ||
48 | "Data Single bit error (corrected)"); | ||
49 | l2d_err_reset.s.sec_err = 1; | ||
50 | } | ||
51 | if (l2d_err.s.ded_err) { | ||
52 | edac_device_handle_ue(l2c, 0, 1, | ||
53 | "Data Double bit error (detected)"); | ||
54 | l2d_err_reset.s.ded_err = 1; | ||
55 | } | ||
56 | if (l2d_err_reset.u64) | ||
57 | cvmx_write_csr(CVMX_L2D_ERR, l2d_err_reset.u64); | ||
58 | |||
59 | } | ||
60 | |||
61 | static void _octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c, int tad) | ||
62 | { | ||
63 | union cvmx_l2c_err_tdtx err_tdtx, err_tdtx_reset; | ||
64 | union cvmx_l2c_err_ttgx err_ttgx, err_ttgx_reset; | ||
65 | char buf1[64]; | ||
66 | char buf2[80]; | ||
67 | |||
68 | err_tdtx_reset.u64 = 0; | ||
69 | err_tdtx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TDTX(tad)); | ||
70 | if (err_tdtx.s.dbe || err_tdtx.s.sbe || | ||
71 | err_tdtx.s.vdbe || err_tdtx.s.vsbe) | ||
72 | snprintf(buf1, sizeof(buf1), | ||
73 | "type:%d, syn:0x%x, way:%d", | ||
74 | err_tdtx.s.type, err_tdtx.s.syn, err_tdtx.s.wayidx); | ||
75 | |||
76 | if (err_tdtx.s.dbe) { | ||
77 | snprintf(buf2, sizeof(buf2), | ||
78 | "L2D Double bit error (detected):%s", buf1); | ||
79 | err_tdtx_reset.s.dbe = 1; | ||
80 | edac_device_handle_ue(l2c, tad, 1, buf2); | ||
81 | } | ||
82 | if (err_tdtx.s.sbe) { | ||
83 | snprintf(buf2, sizeof(buf2), | ||
84 | "L2D Single bit error (corrected):%s", buf1); | ||
85 | err_tdtx_reset.s.sbe = 1; | ||
86 | edac_device_handle_ce(l2c, tad, 1, buf2); | ||
87 | } | ||
88 | if (err_tdtx.s.vdbe) { | ||
89 | snprintf(buf2, sizeof(buf2), | ||
90 | "VBF Double bit error (detected):%s", buf1); | ||
91 | err_tdtx_reset.s.vdbe = 1; | ||
92 | edac_device_handle_ue(l2c, tad, 1, buf2); | ||
93 | } | ||
94 | if (err_tdtx.s.vsbe) { | ||
95 | snprintf(buf2, sizeof(buf2), | ||
96 | "VBF Single bit error (corrected):%s", buf1); | ||
97 | err_tdtx_reset.s.vsbe = 1; | ||
98 | edac_device_handle_ce(l2c, tad, 1, buf2); | ||
99 | } | ||
100 | if (err_tdtx_reset.u64) | ||
101 | cvmx_write_csr(CVMX_L2C_ERR_TDTX(tad), err_tdtx_reset.u64); | ||
102 | |||
103 | err_ttgx_reset.u64 = 0; | ||
104 | err_ttgx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TTGX(tad)); | ||
105 | |||
106 | if (err_ttgx.s.dbe || err_ttgx.s.sbe) | ||
107 | snprintf(buf1, sizeof(buf1), | ||
108 | "type:%d, syn:0x%x, way:%d", | ||
109 | err_ttgx.s.type, err_ttgx.s.syn, err_ttgx.s.wayidx); | ||
110 | |||
111 | if (err_ttgx.s.dbe) { | ||
112 | snprintf(buf2, sizeof(buf2), | ||
113 | "Tag Double bit error (detected):%s", buf1); | ||
114 | err_ttgx_reset.s.dbe = 1; | ||
115 | edac_device_handle_ue(l2c, tad, 0, buf2); | ||
116 | } | ||
117 | if (err_ttgx.s.sbe) { | ||
118 | snprintf(buf2, sizeof(buf2), | ||
119 | "Tag Single bit error (corrected):%s", buf1); | ||
120 | err_ttgx_reset.s.sbe = 1; | ||
121 | edac_device_handle_ce(l2c, tad, 0, buf2); | ||
122 | } | ||
123 | if (err_ttgx_reset.u64) | ||
124 | cvmx_write_csr(CVMX_L2C_ERR_TTGX(tad), err_ttgx_reset.u64); | ||
125 | } | ||
126 | |||
127 | static void octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c) | ||
128 | { | ||
129 | int i; | ||
130 | for (i = 0; i < l2c->nr_instances; i++) | ||
131 | _octeon_l2c_poll_oct2(l2c, i); | ||
132 | } | ||
133 | |||
134 | static int octeon_l2c_probe(struct platform_device *pdev) | ||
135 | { | ||
136 | struct edac_device_ctl_info *l2c; | ||
137 | |||
138 | int num_tads = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : 1; | ||
139 | |||
140 | /* 'Tags' are block 0, 'Data' is block 1*/ | ||
141 | l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0, | ||
142 | NULL, 0, edac_device_alloc_index()); | ||
143 | if (!l2c) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | l2c->dev = &pdev->dev; | ||
147 | platform_set_drvdata(pdev, l2c); | ||
148 | l2c->dev_name = dev_name(&pdev->dev); | ||
149 | |||
150 | l2c->mod_name = "octeon-l2c"; | ||
151 | l2c->ctl_name = "octeon_l2c_err"; | ||
152 | |||
153 | |||
154 | if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) { | ||
155 | union cvmx_l2t_err l2t_err; | ||
156 | union cvmx_l2d_err l2d_err; | ||
157 | |||
158 | l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); | ||
159 | l2t_err.s.sec_intena = 0; /* We poll */ | ||
160 | l2t_err.s.ded_intena = 0; | ||
161 | cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64); | ||
162 | |||
163 | l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR); | ||
164 | l2d_err.s.sec_intena = 0; /* We poll */ | ||
165 | l2d_err.s.ded_intena = 0; | ||
166 | cvmx_write_csr(CVMX_L2T_ERR, l2d_err.u64); | ||
167 | |||
168 | l2c->edac_check = octeon_l2c_poll_oct1; | ||
169 | } else { | ||
170 | /* OCTEON II */ | ||
171 | l2c->edac_check = octeon_l2c_poll_oct2; | ||
172 | } | ||
173 | |||
174 | if (edac_device_add_device(l2c) > 0) { | ||
175 | pr_err("%s: edac_device_add_device() failed\n", __func__); | ||
176 | goto err; | ||
177 | } | ||
178 | |||
179 | |||
180 | return 0; | ||
181 | |||
182 | err: | ||
183 | edac_device_free_ctl_info(l2c); | ||
184 | |||
185 | return -ENXIO; | ||
186 | } | ||
187 | |||
188 | static int octeon_l2c_remove(struct platform_device *pdev) | ||
189 | { | ||
190 | struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev); | ||
191 | |||
192 | edac_device_del_device(&pdev->dev); | ||
193 | edac_device_free_ctl_info(l2c); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static struct platform_driver octeon_l2c_driver = { | ||
199 | .probe = octeon_l2c_probe, | ||
200 | .remove = octeon_l2c_remove, | ||
201 | .driver = { | ||
202 | .name = "octeon_l2c_edac", | ||
203 | } | ||
204 | }; | ||
205 | module_platform_driver(octeon_l2c_driver); | ||
206 | |||
207 | MODULE_LICENSE("GPL"); | ||
208 | MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); | ||
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c deleted file mode 100644 index 93412d6b3af..00000000000 --- a/drivers/edac/octeon_edac-lmc.c +++ /dev/null | |||
@@ -1,186 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2009 Wind River Systems, | ||
7 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/edac.h> | ||
14 | |||
15 | #include <asm/octeon/octeon.h> | ||
16 | #include <asm/octeon/cvmx-lmcx-defs.h> | ||
17 | |||
18 | #include "edac_core.h" | ||
19 | #include "edac_module.h" | ||
20 | |||
21 | #define OCTEON_MAX_MC 4 | ||
22 | |||
23 | static void octeon_lmc_edac_poll(struct mem_ctl_info *mci) | ||
24 | { | ||
25 | union cvmx_lmcx_mem_cfg0 cfg0; | ||
26 | bool do_clear = false; | ||
27 | char msg[64]; | ||
28 | |||
29 | cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx)); | ||
30 | if (cfg0.s.sec_err || cfg0.s.ded_err) { | ||
31 | union cvmx_lmcx_fadr fadr; | ||
32 | fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx)); | ||
33 | snprintf(msg, sizeof(msg), | ||
34 | "DIMM %d rank %d bank %d row %d col %d", | ||
35 | fadr.cn30xx.fdimm, fadr.cn30xx.fbunk, | ||
36 | fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol); | ||
37 | } | ||
38 | |||
39 | if (cfg0.s.sec_err) { | ||
40 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, | ||
41 | -1, -1, -1, msg, ""); | ||
42 | cfg0.s.sec_err = -1; /* Done, re-arm */ | ||
43 | do_clear = true; | ||
44 | } | ||
45 | |||
46 | if (cfg0.s.ded_err) { | ||
47 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | ||
48 | -1, -1, -1, msg, ""); | ||
49 | cfg0.s.ded_err = -1; /* Done, re-arm */ | ||
50 | do_clear = true; | ||
51 | } | ||
52 | if (do_clear) | ||
53 | cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64); | ||
54 | } | ||
55 | |||
56 | static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci) | ||
57 | { | ||
58 | union cvmx_lmcx_int int_reg; | ||
59 | bool do_clear = false; | ||
60 | char msg[64]; | ||
61 | |||
62 | int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); | ||
63 | if (int_reg.s.sec_err || int_reg.s.ded_err) { | ||
64 | union cvmx_lmcx_fadr fadr; | ||
65 | fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx)); | ||
66 | snprintf(msg, sizeof(msg), | ||
67 | "DIMM %d rank %d bank %d row %d col %d", | ||
68 | fadr.cn61xx.fdimm, fadr.cn61xx.fbunk, | ||
69 | fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol); | ||
70 | } | ||
71 | |||
72 | if (int_reg.s.sec_err) { | ||
73 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, | ||
74 | -1, -1, -1, msg, ""); | ||
75 | int_reg.s.sec_err = -1; /* Done, re-arm */ | ||
76 | do_clear = true; | ||
77 | } | ||
78 | |||
79 | if (int_reg.s.ded_err) { | ||
80 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | ||
81 | -1, -1, -1, msg, ""); | ||
82 | int_reg.s.ded_err = -1; /* Done, re-arm */ | ||
83 | do_clear = true; | ||
84 | } | ||
85 | if (do_clear) | ||
86 | cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64); | ||
87 | } | ||
88 | |||
89 | static int octeon_lmc_edac_probe(struct platform_device *pdev) | ||
90 | { | ||
91 | struct mem_ctl_info *mci; | ||
92 | struct edac_mc_layer layers[1]; | ||
93 | int mc = pdev->id; | ||
94 | |||
95 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | ||
96 | layers[0].size = 1; | ||
97 | layers[0].is_virt_csrow = false; | ||
98 | |||
99 | if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) { | ||
100 | union cvmx_lmcx_mem_cfg0 cfg0; | ||
101 | |||
102 | cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0)); | ||
103 | if (!cfg0.s.ecc_ena) { | ||
104 | dev_info(&pdev->dev, "Disabled (ECC not enabled)\n"); | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0); | ||
109 | if (!mci) | ||
110 | return -ENXIO; | ||
111 | |||
112 | mci->pdev = &pdev->dev; | ||
113 | mci->dev_name = dev_name(&pdev->dev); | ||
114 | |||
115 | mci->mod_name = "octeon-lmc"; | ||
116 | mci->ctl_name = "octeon-lmc-err"; | ||
117 | mci->edac_check = octeon_lmc_edac_poll; | ||
118 | |||
119 | if (edac_mc_add_mc(mci)) { | ||
120 | dev_err(&pdev->dev, "edac_mc_add_mc() failed\n"); | ||
121 | edac_mc_free(mci); | ||
122 | return -ENXIO; | ||
123 | } | ||
124 | |||
125 | cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc)); | ||
126 | cfg0.s.intr_ded_ena = 0; /* We poll */ | ||
127 | cfg0.s.intr_sec_ena = 0; | ||
128 | cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64); | ||
129 | } else { | ||
130 | /* OCTEON II */ | ||
131 | union cvmx_lmcx_int_en en; | ||
132 | union cvmx_lmcx_config config; | ||
133 | |||
134 | config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0)); | ||
135 | if (!config.s.ecc_ena) { | ||
136 | dev_info(&pdev->dev, "Disabled (ECC not enabled)\n"); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0); | ||
141 | if (!mci) | ||
142 | return -ENXIO; | ||
143 | |||
144 | mci->pdev = &pdev->dev; | ||
145 | mci->dev_name = dev_name(&pdev->dev); | ||
146 | |||
147 | mci->mod_name = "octeon-lmc"; | ||
148 | mci->ctl_name = "co_lmc_err"; | ||
149 | mci->edac_check = octeon_lmc_edac_poll_o2; | ||
150 | |||
151 | if (edac_mc_add_mc(mci)) { | ||
152 | dev_err(&pdev->dev, "edac_mc_add_mc() failed\n"); | ||
153 | edac_mc_free(mci); | ||
154 | return -ENXIO; | ||
155 | } | ||
156 | |||
157 | en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc)); | ||
158 | en.s.intr_ded_ena = 0; /* We poll */ | ||
159 | en.s.intr_sec_ena = 0; | ||
160 | cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64); | ||
161 | } | ||
162 | platform_set_drvdata(pdev, mci); | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static int octeon_lmc_edac_remove(struct platform_device *pdev) | ||
168 | { | ||
169 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); | ||
170 | |||
171 | edac_mc_del_mc(&pdev->dev); | ||
172 | edac_mc_free(mci); | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static struct platform_driver octeon_lmc_edac_driver = { | ||
177 | .probe = octeon_lmc_edac_probe, | ||
178 | .remove = octeon_lmc_edac_remove, | ||
179 | .driver = { | ||
180 | .name = "octeon_lmc_edac", | ||
181 | } | ||
182 | }; | ||
183 | module_platform_driver(octeon_lmc_edac_driver); | ||
184 | |||
185 | MODULE_LICENSE("GPL"); | ||
186 | MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); | ||
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c deleted file mode 100644 index 0f83c33a7d1..00000000000 --- a/drivers/edac/octeon_edac-pc.c +++ /dev/null | |||
@@ -1,143 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 Cavium, Inc. | ||
7 | * | ||
8 | * Copyright (C) 2009 Wind River Systems, | ||
9 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/edac.h> | ||
17 | |||
18 | #include "edac_core.h" | ||
19 | #include "edac_module.h" | ||
20 | |||
21 | #include <asm/octeon/cvmx.h> | ||
22 | #include <asm/mipsregs.h> | ||
23 | |||
24 | extern int register_co_cache_error_notifier(struct notifier_block *nb); | ||
25 | extern int unregister_co_cache_error_notifier(struct notifier_block *nb); | ||
26 | |||
27 | extern unsigned long long cache_err_dcache[NR_CPUS]; | ||
28 | |||
29 | struct co_cache_error { | ||
30 | struct notifier_block notifier; | ||
31 | struct edac_device_ctl_info *ed; | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * EDAC CPU cache error callback | ||
36 | * | ||
37 | * @event: non-zero if unrecoverable. | ||
38 | */ | ||
39 | static int co_cache_error_event(struct notifier_block *this, | ||
40 | unsigned long event, void *ptr) | ||
41 | { | ||
42 | struct co_cache_error *p = container_of(this, struct co_cache_error, | ||
43 | notifier); | ||
44 | |||
45 | unsigned int core = cvmx_get_core_num(); | ||
46 | unsigned int cpu = smp_processor_id(); | ||
47 | u64 icache_err = read_octeon_c0_icacheerr(); | ||
48 | u64 dcache_err; | ||
49 | |||
50 | if (event) { | ||
51 | dcache_err = cache_err_dcache[core]; | ||
52 | cache_err_dcache[core] = 0; | ||
53 | } else { | ||
54 | dcache_err = read_octeon_c0_dcacheerr(); | ||
55 | } | ||
56 | |||
57 | if (icache_err & 1) { | ||
58 | edac_device_printk(p->ed, KERN_ERR, | ||
59 | "CacheErr (Icache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n", | ||
60 | (unsigned long long)icache_err, core, cpu, | ||
61 | read_c0_errorepc()); | ||
62 | write_octeon_c0_icacheerr(0); | ||
63 | edac_device_handle_ce(p->ed, cpu, 1, "icache"); | ||
64 | } | ||
65 | if (dcache_err & 1) { | ||
66 | edac_device_printk(p->ed, KERN_ERR, | ||
67 | "CacheErr (Dcache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n", | ||
68 | (unsigned long long)dcache_err, core, cpu, | ||
69 | read_c0_errorepc()); | ||
70 | if (event) | ||
71 | edac_device_handle_ue(p->ed, cpu, 0, "dcache"); | ||
72 | else | ||
73 | edac_device_handle_ce(p->ed, cpu, 0, "dcache"); | ||
74 | |||
75 | /* Clear the error indication */ | ||
76 | if (OCTEON_IS_MODEL(OCTEON_FAM_2)) | ||
77 | write_octeon_c0_dcacheerr(1); | ||
78 | else | ||
79 | write_octeon_c0_dcacheerr(0); | ||
80 | } | ||
81 | |||
82 | return NOTIFY_STOP; | ||
83 | } | ||
84 | |||
85 | static int co_cache_error_probe(struct platform_device *pdev) | ||
86 | { | ||
87 | struct co_cache_error *p = devm_kzalloc(&pdev->dev, sizeof(*p), | ||
88 | GFP_KERNEL); | ||
89 | if (!p) | ||
90 | return -ENOMEM; | ||
91 | |||
92 | p->notifier.notifier_call = co_cache_error_event; | ||
93 | platform_set_drvdata(pdev, p); | ||
94 | |||
95 | p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(), | ||
96 | "cache", 2, 0, NULL, 0, | ||
97 | edac_device_alloc_index()); | ||
98 | if (!p->ed) | ||
99 | goto err; | ||
100 | |||
101 | p->ed->dev = &pdev->dev; | ||
102 | |||
103 | p->ed->dev_name = dev_name(&pdev->dev); | ||
104 | |||
105 | p->ed->mod_name = "octeon-cpu"; | ||
106 | p->ed->ctl_name = "cache"; | ||
107 | |||
108 | if (edac_device_add_device(p->ed)) { | ||
109 | pr_err("%s: edac_device_add_device() failed\n", __func__); | ||
110 | goto err1; | ||
111 | } | ||
112 | |||
113 | register_co_cache_error_notifier(&p->notifier); | ||
114 | |||
115 | return 0; | ||
116 | |||
117 | err1: | ||
118 | edac_device_free_ctl_info(p->ed); | ||
119 | err: | ||
120 | return -ENXIO; | ||
121 | } | ||
122 | |||
123 | static int co_cache_error_remove(struct platform_device *pdev) | ||
124 | { | ||
125 | struct co_cache_error *p = platform_get_drvdata(pdev); | ||
126 | |||
127 | unregister_co_cache_error_notifier(&p->notifier); | ||
128 | edac_device_del_device(&pdev->dev); | ||
129 | edac_device_free_ctl_info(p->ed); | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static struct platform_driver co_cache_error_driver = { | ||
134 | .probe = co_cache_error_probe, | ||
135 | .remove = co_cache_error_remove, | ||
136 | .driver = { | ||
137 | .name = "octeon_pc_edac", | ||
138 | } | ||
139 | }; | ||
140 | module_platform_driver(co_cache_error_driver); | ||
141 | |||
142 | MODULE_LICENSE("GPL"); | ||
143 | MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); | ||
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c deleted file mode 100644 index 9ca73cec74e..00000000000 --- a/drivers/edac/octeon_edac-pci.c +++ /dev/null | |||
@@ -1,111 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 Cavium, Inc. | ||
7 | * Copyright (C) 2009 Wind River Systems, | ||
8 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
9 | */ | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/edac.h> | ||
15 | |||
16 | #include <asm/octeon/cvmx.h> | ||
17 | #include <asm/octeon/cvmx-npi-defs.h> | ||
18 | #include <asm/octeon/cvmx-pci-defs.h> | ||
19 | #include <asm/octeon/octeon.h> | ||
20 | |||
21 | #include "edac_core.h" | ||
22 | #include "edac_module.h" | ||
23 | |||
24 | static void octeon_pci_poll(struct edac_pci_ctl_info *pci) | ||
25 | { | ||
26 | union cvmx_pci_cfg01 cfg01; | ||
27 | |||
28 | cfg01.u32 = octeon_npi_read32(CVMX_NPI_PCI_CFG01); | ||
29 | if (cfg01.s.dpe) { /* Detected parity error */ | ||
30 | edac_pci_handle_pe(pci, pci->ctl_name); | ||
31 | cfg01.s.dpe = 1; /* Reset */ | ||
32 | octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); | ||
33 | } | ||
34 | if (cfg01.s.sse) { | ||
35 | edac_pci_handle_npe(pci, "Signaled System Error"); | ||
36 | cfg01.s.sse = 1; /* Reset */ | ||
37 | octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); | ||
38 | } | ||
39 | if (cfg01.s.rma) { | ||
40 | edac_pci_handle_npe(pci, "Received Master Abort"); | ||
41 | cfg01.s.rma = 1; /* Reset */ | ||
42 | octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); | ||
43 | } | ||
44 | if (cfg01.s.rta) { | ||
45 | edac_pci_handle_npe(pci, "Received Target Abort"); | ||
46 | cfg01.s.rta = 1; /* Reset */ | ||
47 | octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); | ||
48 | } | ||
49 | if (cfg01.s.sta) { | ||
50 | edac_pci_handle_npe(pci, "Signaled Target Abort"); | ||
51 | cfg01.s.sta = 1; /* Reset */ | ||
52 | octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); | ||
53 | } | ||
54 | if (cfg01.s.mdpe) { | ||
55 | edac_pci_handle_npe(pci, "Master Data Parity Error"); | ||
56 | cfg01.s.mdpe = 1; /* Reset */ | ||
57 | octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | static int octeon_pci_probe(struct platform_device *pdev) | ||
62 | { | ||
63 | struct edac_pci_ctl_info *pci; | ||
64 | int res = 0; | ||
65 | |||
66 | pci = edac_pci_alloc_ctl_info(0, "octeon_pci_err"); | ||
67 | if (!pci) | ||
68 | return -ENOMEM; | ||
69 | |||
70 | pci->dev = &pdev->dev; | ||
71 | platform_set_drvdata(pdev, pci); | ||
72 | pci->dev_name = dev_name(&pdev->dev); | ||
73 | |||
74 | pci->mod_name = "octeon-pci"; | ||
75 | pci->ctl_name = "octeon_pci_err"; | ||
76 | pci->edac_check = octeon_pci_poll; | ||
77 | |||
78 | if (edac_pci_add_device(pci, 0) > 0) { | ||
79 | pr_err("%s: edac_pci_add_device() failed\n", __func__); | ||
80 | goto err; | ||
81 | } | ||
82 | |||
83 | return 0; | ||
84 | |||
85 | err: | ||
86 | edac_pci_free_ctl_info(pci); | ||
87 | |||
88 | return res; | ||
89 | } | ||
90 | |||
91 | static int octeon_pci_remove(struct platform_device *pdev) | ||
92 | { | ||
93 | struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); | ||
94 | |||
95 | edac_pci_del_device(&pdev->dev); | ||
96 | edac_pci_free_ctl_info(pci); | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static struct platform_driver octeon_pci_driver = { | ||
102 | .probe = octeon_pci_probe, | ||
103 | .remove = octeon_pci_remove, | ||
104 | .driver = { | ||
105 | .name = "octeon_pci_edac", | ||
106 | } | ||
107 | }; | ||
108 | module_platform_driver(octeon_pci_driver); | ||
109 | |||
110 | MODULE_LICENSE("GPL"); | ||
111 | MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); | ||
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c index 9c971b57553..7f71ee43674 100644 --- a/drivers/edac/pasemi_edac.c +++ b/drivers/edac/pasemi_edac.c | |||
@@ -74,7 +74,7 @@ static int system_mmc_id; | |||
74 | 74 | ||
75 | static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) | 75 | static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) |
76 | { | 76 | { |
77 | struct pci_dev *pdev = to_pci_dev(mci->pdev); | 77 | struct pci_dev *pdev = to_pci_dev(mci->dev); |
78 | u32 tmp; | 78 | u32 tmp; |
79 | 79 | ||
80 | pci_read_config_dword(pdev, MCDEBUG_ERRSTA, | 80 | pci_read_config_dword(pdev, MCDEBUG_ERRSTA, |
@@ -95,7 +95,7 @@ static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) | |||
95 | 95 | ||
96 | static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) | 96 | static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) |
97 | { | 97 | { |
98 | struct pci_dev *pdev = to_pci_dev(mci->pdev); | 98 | struct pci_dev *pdev = to_pci_dev(mci->dev); |
99 | u32 errlog1a; | 99 | u32 errlog1a; |
100 | u32 cs; | 100 | u32 cs; |
101 | 101 | ||
@@ -110,16 +110,15 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) | |||
110 | /* uncorrectable/multi-bit errors */ | 110 | /* uncorrectable/multi-bit errors */ |
111 | if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | | 111 | if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | |
112 | MCDEBUG_ERRSTA_RFL_STATUS)) { | 112 | MCDEBUG_ERRSTA_RFL_STATUS)) { |
113 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 113 | edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0, |
114 | mci->csrows[cs]->first_page, 0, 0, | 114 | cs, mci->ctl_name); |
115 | cs, 0, -1, mci->ctl_name, ""); | ||
116 | } | 115 | } |
117 | 116 | ||
118 | /* correctable/single-bit errors */ | 117 | /* correctable/single-bit errors */ |
119 | if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) | 118 | if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) { |
120 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 119 | edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0, |
121 | mci->csrows[cs]->first_page, 0, 0, | 120 | 0, cs, 0, mci->ctl_name); |
122 | cs, 0, -1, mci->ctl_name, ""); | 121 | } |
123 | } | 122 | } |
124 | 123 | ||
125 | static void pasemi_edac_check(struct mem_ctl_info *mci) | 124 | static void pasemi_edac_check(struct mem_ctl_info *mci) |
@@ -136,13 +135,11 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci, | |||
136 | enum edac_type edac_mode) | 135 | enum edac_type edac_mode) |
137 | { | 136 | { |
138 | struct csrow_info *csrow; | 137 | struct csrow_info *csrow; |
139 | struct dimm_info *dimm; | ||
140 | u32 rankcfg; | 138 | u32 rankcfg; |
141 | int index; | 139 | int index; |
142 | 140 | ||
143 | for (index = 0; index < mci->nr_csrows; index++) { | 141 | for (index = 0; index < mci->nr_csrows; index++) { |
144 | csrow = mci->csrows[index]; | 142 | csrow = &mci->csrows[index]; |
145 | dimm = csrow->channels[0]->dimm; | ||
146 | 143 | ||
147 | pci_read_config_dword(pdev, | 144 | pci_read_config_dword(pdev, |
148 | MCDRAM_RANKCFG + (index * 12), | 145 | MCDRAM_RANKCFG + (index * 12), |
@@ -154,20 +151,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci, | |||
154 | switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >> | 151 | switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >> |
155 | MCDRAM_RANKCFG_TYPE_SIZE_S) { | 152 | MCDRAM_RANKCFG_TYPE_SIZE_S) { |
156 | case 0: | 153 | case 0: |
157 | dimm->nr_pages = 128 << (20 - PAGE_SHIFT); | 154 | csrow->nr_pages = 128 << (20 - PAGE_SHIFT); |
158 | break; | 155 | break; |
159 | case 1: | 156 | case 1: |
160 | dimm->nr_pages = 256 << (20 - PAGE_SHIFT); | 157 | csrow->nr_pages = 256 << (20 - PAGE_SHIFT); |
161 | break; | 158 | break; |
162 | case 2: | 159 | case 2: |
163 | case 3: | 160 | case 3: |
164 | dimm->nr_pages = 512 << (20 - PAGE_SHIFT); | 161 | csrow->nr_pages = 512 << (20 - PAGE_SHIFT); |
165 | break; | 162 | break; |
166 | case 4: | 163 | case 4: |
167 | dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); | 164 | csrow->nr_pages = 1024 << (20 - PAGE_SHIFT); |
168 | break; | 165 | break; |
169 | case 5: | 166 | case 5: |
170 | dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); | 167 | csrow->nr_pages = 2048 << (20 - PAGE_SHIFT); |
171 | break; | 168 | break; |
172 | default: | 169 | default: |
173 | edac_mc_printk(mci, KERN_ERR, | 170 | edac_mc_printk(mci, KERN_ERR, |
@@ -177,22 +174,21 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci, | |||
177 | } | 174 | } |
178 | 175 | ||
179 | csrow->first_page = last_page_in_mmc; | 176 | csrow->first_page = last_page_in_mmc; |
180 | csrow->last_page = csrow->first_page + dimm->nr_pages - 1; | 177 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
181 | last_page_in_mmc += dimm->nr_pages; | 178 | last_page_in_mmc += csrow->nr_pages; |
182 | csrow->page_mask = 0; | 179 | csrow->page_mask = 0; |
183 | dimm->grain = PASEMI_EDAC_ERROR_GRAIN; | 180 | csrow->grain = PASEMI_EDAC_ERROR_GRAIN; |
184 | dimm->mtype = MEM_DDR; | 181 | csrow->mtype = MEM_DDR; |
185 | dimm->dtype = DEV_UNKNOWN; | 182 | csrow->dtype = DEV_UNKNOWN; |
186 | dimm->edac_mode = edac_mode; | 183 | csrow->edac_mode = edac_mode; |
187 | } | 184 | } |
188 | return 0; | 185 | return 0; |
189 | } | 186 | } |
190 | 187 | ||
191 | static int pasemi_edac_probe(struct pci_dev *pdev, | 188 | static int __devinit pasemi_edac_probe(struct pci_dev *pdev, |
192 | const struct pci_device_id *ent) | 189 | const struct pci_device_id *ent) |
193 | { | 190 | { |
194 | struct mem_ctl_info *mci = NULL; | 191 | struct mem_ctl_info *mci = NULL; |
195 | struct edac_mc_layer layers[2]; | ||
196 | u32 errctl1, errcor, scrub, mcen; | 192 | u32 errctl1, errcor, scrub, mcen; |
197 | 193 | ||
198 | pci_read_config_dword(pdev, MCCFG_MCEN, &mcen); | 194 | pci_read_config_dword(pdev, MCCFG_MCEN, &mcen); |
@@ -209,14 +205,9 @@ static int pasemi_edac_probe(struct pci_dev *pdev, | |||
209 | MCDEBUG_ERRCTL1_RFL_LOG_EN; | 205 | MCDEBUG_ERRCTL1_RFL_LOG_EN; |
210 | pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1); | 206 | pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1); |
211 | 207 | ||
212 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 208 | mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS, |
213 | layers[0].size = PASEMI_EDAC_NR_CSROWS; | 209 | system_mmc_id++); |
214 | layers[0].is_virt_csrow = true; | 210 | |
215 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
216 | layers[1].size = PASEMI_EDAC_NR_CHANS; | ||
217 | layers[1].is_virt_csrow = false; | ||
218 | mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers, | ||
219 | 0); | ||
220 | if (mci == NULL) | 211 | if (mci == NULL) |
221 | return -ENOMEM; | 212 | return -ENOMEM; |
222 | 213 | ||
@@ -225,7 +216,7 @@ static int pasemi_edac_probe(struct pci_dev *pdev, | |||
225 | MCCFG_ERRCOR_ECC_GEN_EN | | 216 | MCCFG_ERRCOR_ECC_GEN_EN | |
226 | MCCFG_ERRCOR_ECC_CRR_EN; | 217 | MCCFG_ERRCOR_ECC_CRR_EN; |
227 | 218 | ||
228 | mci->pdev = &pdev->dev; | 219 | mci->dev = &pdev->dev; |
229 | mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR; | 220 | mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR; |
230 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 221 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
231 | mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ? | 222 | mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ? |
@@ -266,7 +257,7 @@ fail: | |||
266 | return -ENODEV; | 257 | return -ENODEV; |
267 | } | 258 | } |
268 | 259 | ||
269 | static void pasemi_edac_remove(struct pci_dev *pdev) | 260 | static void __devexit pasemi_edac_remove(struct pci_dev *pdev) |
270 | { | 261 | { |
271 | struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); | 262 | struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); |
272 | 263 | ||
@@ -287,7 +278,7 @@ MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl); | |||
287 | static struct pci_driver pasemi_edac_driver = { | 278 | static struct pci_driver pasemi_edac_driver = { |
288 | .name = MODULE_NAME, | 279 | .name = MODULE_NAME, |
289 | .probe = pasemi_edac_probe, | 280 | .probe = pasemi_edac_probe, |
290 | .remove = pasemi_edac_remove, | 281 | .remove = __devexit_p(pasemi_edac_remove), |
291 | .id_table = pasemi_edac_pci_tbl, | 282 | .id_table = pasemi_edac_pci_tbl, |
292 | }; | 283 | }; |
293 | 284 | ||
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index ef6b7e08f48..0de7d877089 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c | |||
@@ -142,7 +142,7 @@ | |||
142 | 142 | ||
143 | /* | 143 | /* |
144 | * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are | 144 | * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are |
145 | * indirectly accessed and have a base and length defined by the | 145 | * indirectly acccessed and have a base and length defined by the |
146 | * device tree. The base can be anything; however, we expect the | 146 | * device tree. The base can be anything; however, we expect the |
147 | * length to be precisely two registers, the first for the address | 147 | * length to be precisely two registers, the first for the address |
148 | * window and the second for the data window. | 148 | * window and the second for the data window. |
@@ -184,7 +184,7 @@ struct ppc4xx_ecc_status { | |||
184 | 184 | ||
185 | /* Function Prototypes */ | 185 | /* Function Prototypes */ |
186 | 186 | ||
187 | static int ppc4xx_edac_probe(struct platform_device *device); | 187 | static int ppc4xx_edac_probe(struct platform_device *device) |
188 | static int ppc4xx_edac_remove(struct platform_device *device); | 188 | static int ppc4xx_edac_remove(struct platform_device *device); |
189 | 189 | ||
190 | /* Global Variables */ | 190 | /* Global Variables */ |
@@ -205,7 +205,7 @@ static struct platform_driver ppc4xx_edac_driver = { | |||
205 | .remove = ppc4xx_edac_remove, | 205 | .remove = ppc4xx_edac_remove, |
206 | .driver = { | 206 | .driver = { |
207 | .owner = THIS_MODULE, | 207 | .owner = THIS_MODULE, |
208 | .name = PPC4XX_EDAC_MODULE_NAME, | 208 | .name = PPC4XX_EDAC_MODULE_NAME |
209 | .of_match_table = ppc4xx_edac_match, | 209 | .of_match_table = ppc4xx_edac_match, |
210 | }, | 210 | }, |
211 | }; | 211 | }; |
@@ -727,10 +727,7 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci, | |||
727 | 727 | ||
728 | for (row = 0; row < mci->nr_csrows; row++) | 728 | for (row = 0; row < mci->nr_csrows; row++) |
729 | if (ppc4xx_edac_check_bank_error(status, row)) | 729 | if (ppc4xx_edac_check_bank_error(status, row)) |
730 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 730 | edac_mc_handle_ce_no_info(mci, message); |
731 | 0, 0, 0, | ||
732 | row, 0, -1, | ||
733 | message, ""); | ||
734 | } | 731 | } |
735 | 732 | ||
736 | /** | 733 | /** |
@@ -758,10 +755,7 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci, | |||
758 | 755 | ||
759 | for (row = 0; row < mci->nr_csrows; row++) | 756 | for (row = 0; row < mci->nr_csrows; row++) |
760 | if (ppc4xx_edac_check_bank_error(status, row)) | 757 | if (ppc4xx_edac_check_bank_error(status, row)) |
761 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 758 | edac_mc_handle_ue(mci, page, offset, row, message); |
762 | page, offset, 0, | ||
763 | row, 0, -1, | ||
764 | message, ""); | ||
765 | } | 759 | } |
766 | 760 | ||
767 | /** | 761 | /** |
@@ -838,7 +832,8 @@ ppc4xx_edac_isr(int irq, void *dev_id) | |||
838 | * | 832 | * |
839 | * Returns a device type width enumeration. | 833 | * Returns a device type width enumeration. |
840 | */ | 834 | */ |
841 | static enum dev_type ppc4xx_edac_get_dtype(u32 mcopt1) | 835 | static enum dev_type __devinit |
836 | ppc4xx_edac_get_dtype(u32 mcopt1) | ||
842 | { | 837 | { |
843 | switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) { | 838 | switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) { |
844 | case SDRAM_MCOPT1_WDTH_16: | 839 | case SDRAM_MCOPT1_WDTH_16: |
@@ -861,7 +856,8 @@ static enum dev_type ppc4xx_edac_get_dtype(u32 mcopt1) | |||
861 | * | 856 | * |
862 | * Returns a memory type enumeration. | 857 | * Returns a memory type enumeration. |
863 | */ | 858 | */ |
864 | static enum mem_type ppc4xx_edac_get_mtype(u32 mcopt1) | 859 | static enum mem_type __devinit |
860 | ppc4xx_edac_get_mtype(u32 mcopt1) | ||
865 | { | 861 | { |
866 | bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN); | 862 | bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN); |
867 | 863 | ||
@@ -891,15 +887,17 @@ static enum mem_type ppc4xx_edac_get_mtype(u32 mcopt1) | |||
891 | * Returns 0 if OK; otherwise, -EINVAL if the memory bank size | 887 | * Returns 0 if OK; otherwise, -EINVAL if the memory bank size |
892 | * configuration cannot be determined. | 888 | * configuration cannot be determined. |
893 | */ | 889 | */ |
894 | static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | 890 | static int __devinit |
891 | ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | ||
895 | { | 892 | { |
896 | const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; | 893 | const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; |
897 | int status = 0; | 894 | int status = 0; |
898 | enum mem_type mtype; | 895 | enum mem_type mtype; |
899 | enum dev_type dtype; | 896 | enum dev_type dtype; |
900 | enum edac_type edac_mode; | 897 | enum edac_type edac_mode; |
901 | int row, j; | 898 | int row; |
902 | u32 mbxcf, size, nr_pages; | 899 | u32 mbxcf, size; |
900 | static u32 ppc4xx_last_page; | ||
903 | 901 | ||
904 | /* Establish the memory type and width */ | 902 | /* Establish the memory type and width */ |
905 | 903 | ||
@@ -950,7 +948,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | |||
950 | case SDRAM_MBCF_SZ_2GB: | 948 | case SDRAM_MBCF_SZ_2GB: |
951 | case SDRAM_MBCF_SZ_4GB: | 949 | case SDRAM_MBCF_SZ_4GB: |
952 | case SDRAM_MBCF_SZ_8GB: | 950 | case SDRAM_MBCF_SZ_8GB: |
953 | nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size); | 951 | csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size); |
954 | break; | 952 | break; |
955 | default: | 953 | default: |
956 | ppc4xx_edac_mc_printk(KERN_ERR, mci, | 954 | ppc4xx_edac_mc_printk(KERN_ERR, mci, |
@@ -961,6 +959,10 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | |||
961 | goto done; | 959 | goto done; |
962 | } | 960 | } |
963 | 961 | ||
962 | csi->first_page = ppc4xx_last_page; | ||
963 | csi->last_page = csi->first_page + csi->nr_pages - 1; | ||
964 | csi->page_mask = 0; | ||
965 | |||
964 | /* | 966 | /* |
965 | * It's unclear exactly what grain should be set to | 967 | * It's unclear exactly what grain should be set to |
966 | * here. The SDRAM_ECCES register allows resolution of | 968 | * here. The SDRAM_ECCES register allows resolution of |
@@ -973,17 +975,15 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | |||
973 | * possible values would be the PLB width (16), the | 975 | * possible values would be the PLB width (16), the |
974 | * page size (PAGE_SIZE) or the memory width (2 or 4). | 976 | * page size (PAGE_SIZE) or the memory width (2 or 4). |
975 | */ | 977 | */ |
976 | for (j = 0; j < csi->nr_channels; j++) { | ||
977 | struct dimm_info *dimm = csi->channels[j].dimm; | ||
978 | 978 | ||
979 | dimm->nr_pages = nr_pages / csi->nr_channels; | 979 | csi->grain = 1; |
980 | dimm->grain = 1; | ||
981 | 980 | ||
982 | dimm->mtype = mtype; | 981 | csi->mtype = mtype; |
983 | dimm->dtype = dtype; | 982 | csi->dtype = dtype; |
984 | 983 | ||
985 | dimm->edac_mode = edac_mode; | 984 | csi->edac_mode = edac_mode; |
986 | } | 985 | |
986 | ppc4xx_last_page += csi->nr_pages; | ||
987 | } | 987 | } |
988 | 988 | ||
989 | done: | 989 | done: |
@@ -1008,9 +1008,11 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | |||
1008 | * | 1008 | * |
1009 | * Returns 0 if OK; otherwise, < 0 on error. | 1009 | * Returns 0 if OK; otherwise, < 0 on error. |
1010 | */ | 1010 | */ |
1011 | static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | 1011 | static int __devinit |
1012 | struct platform_device *op, | 1012 | ppc4xx_edac_mc_init(struct mem_ctl_info *mci, |
1013 | const dcr_host_t *dcr_host, u32 mcopt1) | 1013 | struct platform_device *op, |
1014 | const dcr_host_t *dcr_host, | ||
1015 | u32 mcopt1) | ||
1014 | { | 1016 | { |
1015 | int status = 0; | 1017 | int status = 0; |
1016 | const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); | 1018 | const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); |
@@ -1022,9 +1024,9 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | |||
1022 | 1024 | ||
1023 | /* Initial driver pointers and private data */ | 1025 | /* Initial driver pointers and private data */ |
1024 | 1026 | ||
1025 | mci->pdev = &op->dev; | 1027 | mci->dev = &op->dev; |
1026 | 1028 | ||
1027 | dev_set_drvdata(mci->pdev, mci); | 1029 | dev_set_drvdata(mci->dev, mci); |
1028 | 1030 | ||
1029 | pdata = mci->pvt_info; | 1031 | pdata = mci->pvt_info; |
1030 | 1032 | ||
@@ -1066,7 +1068,7 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | |||
1066 | 1068 | ||
1067 | mci->mod_name = PPC4XX_EDAC_MODULE_NAME; | 1069 | mci->mod_name = PPC4XX_EDAC_MODULE_NAME; |
1068 | mci->mod_ver = PPC4XX_EDAC_MODULE_REVISION; | 1070 | mci->mod_ver = PPC4XX_EDAC_MODULE_REVISION; |
1069 | mci->ctl_name = ppc4xx_edac_match->compatible, | 1071 | mci->ctl_name = match->compatible, |
1070 | mci->dev_name = np->full_name; | 1072 | mci->dev_name = np->full_name; |
1071 | 1073 | ||
1072 | /* Initialize callbacks */ | 1074 | /* Initialize callbacks */ |
@@ -1100,8 +1102,8 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | |||
1100 | * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be | 1102 | * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be |
1101 | * mapped and assigned. | 1103 | * mapped and assigned. |
1102 | */ | 1104 | */ |
1103 | static int ppc4xx_edac_register_irq(struct platform_device *op, | 1105 | static int __devinit |
1104 | struct mem_ctl_info *mci) | 1106 | ppc4xx_edac_register_irq(struct platform_device *op, struct mem_ctl_info *mci) |
1105 | { | 1107 | { |
1106 | int status = 0; | 1108 | int status = 0; |
1107 | int ded_irq, sec_irq; | 1109 | int ded_irq, sec_irq; |
@@ -1178,8 +1180,8 @@ static int ppc4xx_edac_register_irq(struct platform_device *op, | |||
1178 | * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on | 1180 | * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on |
1179 | * error. | 1181 | * error. |
1180 | */ | 1182 | */ |
1181 | static int ppc4xx_edac_map_dcrs(const struct device_node *np, | 1183 | static int __devinit |
1182 | dcr_host_t *dcr_host) | 1184 | ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host) |
1183 | { | 1185 | { |
1184 | unsigned int dcr_base, dcr_len; | 1186 | unsigned int dcr_base, dcr_len; |
1185 | 1187 | ||
@@ -1227,14 +1229,13 @@ static int ppc4xx_edac_map_dcrs(const struct device_node *np, | |||
1227 | * Returns 0 if the controller instance was successfully bound to the | 1229 | * Returns 0 if the controller instance was successfully bound to the |
1228 | * driver; otherwise, < 0 on error. | 1230 | * driver; otherwise, < 0 on error. |
1229 | */ | 1231 | */ |
1230 | static int ppc4xx_edac_probe(struct platform_device *op) | 1232 | static int __devinit ppc4xx_edac_probe(struct platform_device *op) |
1231 | { | 1233 | { |
1232 | int status = 0; | 1234 | int status = 0; |
1233 | u32 mcopt1, memcheck; | 1235 | u32 mcopt1, memcheck; |
1234 | dcr_host_t dcr_host; | 1236 | dcr_host_t dcr_host; |
1235 | const struct device_node *np = op->dev.of_node; | 1237 | const struct device_node *np = op->dev.of_node; |
1236 | struct mem_ctl_info *mci = NULL; | 1238 | struct mem_ctl_info *mci = NULL; |
1237 | struct edac_mc_layer layers[2]; | ||
1238 | static int ppc4xx_edac_instance; | 1239 | static int ppc4xx_edac_instance; |
1239 | 1240 | ||
1240 | /* | 1241 | /* |
@@ -1280,14 +1281,12 @@ static int ppc4xx_edac_probe(struct platform_device *op) | |||
1280 | * controller instance and perform the appropriate | 1281 | * controller instance and perform the appropriate |
1281 | * initialization. | 1282 | * initialization. |
1282 | */ | 1283 | */ |
1283 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 1284 | |
1284 | layers[0].size = ppc4xx_edac_nr_csrows; | 1285 | mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata), |
1285 | layers[0].is_virt_csrow = true; | 1286 | ppc4xx_edac_nr_csrows, |
1286 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | 1287 | ppc4xx_edac_nr_chans, |
1287 | layers[1].size = ppc4xx_edac_nr_chans; | 1288 | ppc4xx_edac_instance); |
1288 | layers[1].is_virt_csrow = false; | 1289 | |
1289 | mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers, | ||
1290 | sizeof(struct ppc4xx_edac_pdata)); | ||
1291 | if (mci == NULL) { | 1290 | if (mci == NULL) { |
1292 | ppc4xx_edac_printk(KERN_ERR, "%s: " | 1291 | ppc4xx_edac_printk(KERN_ERR, "%s: " |
1293 | "Failed to allocate EDAC MC instance!\n", | 1292 | "Failed to allocate EDAC MC instance!\n", |
@@ -1329,7 +1328,7 @@ static int ppc4xx_edac_probe(struct platform_device *op) | |||
1329 | return 0; | 1328 | return 0; |
1330 | 1329 | ||
1331 | fail1: | 1330 | fail1: |
1332 | edac_mc_del_mc(mci->pdev); | 1331 | edac_mc_del_mc(mci->dev); |
1333 | 1332 | ||
1334 | fail: | 1333 | fail: |
1335 | edac_mc_free(mci); | 1334 | edac_mc_free(mci); |
@@ -1363,7 +1362,7 @@ ppc4xx_edac_remove(struct platform_device *op) | |||
1363 | 1362 | ||
1364 | dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN); | 1363 | dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN); |
1365 | 1364 | ||
1366 | edac_mc_del_mc(mci->pdev); | 1365 | edac_mc_del_mc(mci->dev); |
1367 | edac_mc_free(mci); | 1366 | edac_mc_free(mci); |
1368 | 1367 | ||
1369 | return 0; | 1368 | return 0; |
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index 2fd6a549090..b153674431f 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c | |||
@@ -131,7 +131,7 @@ struct r82600_error_info { | |||
131 | u32 eapr; | 131 | u32 eapr; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static bool disable_hardware_scrub; | 134 | static unsigned int disable_hardware_scrub; |
135 | 135 | ||
136 | static struct edac_pci_ctl_info *r82600_pci; | 136 | static struct edac_pci_ctl_info *r82600_pci; |
137 | 137 | ||
@@ -140,7 +140,7 @@ static void r82600_get_error_info(struct mem_ctl_info *mci, | |||
140 | { | 140 | { |
141 | struct pci_dev *pdev; | 141 | struct pci_dev *pdev; |
142 | 142 | ||
143 | pdev = to_pci_dev(mci->pdev); | 143 | pdev = to_pci_dev(mci->dev); |
144 | pci_read_config_dword(pdev, R82600_EAP, &info->eapr); | 144 | pci_read_config_dword(pdev, R82600_EAP, &info->eapr); |
145 | 145 | ||
146 | if (info->eapr & BIT(0)) | 146 | if (info->eapr & BIT(0)) |
@@ -179,11 +179,10 @@ static int r82600_process_error_info(struct mem_ctl_info *mci, | |||
179 | error_found = 1; | 179 | error_found = 1; |
180 | 180 | ||
181 | if (handle_errors) | 181 | if (handle_errors) |
182 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 182 | edac_mc_handle_ce(mci, page, 0, /* not avail */ |
183 | page, 0, syndrome, | 183 | syndrome, |
184 | edac_mc_find_csrow_by_page(mci, page), | 184 | edac_mc_find_csrow_by_page(mci, page), |
185 | 0, -1, | 185 | 0, mci->ctl_name); |
186 | mci->ctl_name, ""); | ||
187 | } | 186 | } |
188 | 187 | ||
189 | if (info->eapr & BIT(1)) { /* UE? */ | 188 | if (info->eapr & BIT(1)) { /* UE? */ |
@@ -191,11 +190,9 @@ static int r82600_process_error_info(struct mem_ctl_info *mci, | |||
191 | 190 | ||
192 | if (handle_errors) | 191 | if (handle_errors) |
193 | /* 82600 doesn't give enough info */ | 192 | /* 82600 doesn't give enough info */ |
194 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 193 | edac_mc_handle_ue(mci, page, 0, |
195 | page, 0, 0, | 194 | edac_mc_find_csrow_by_page(mci, page), |
196 | edac_mc_find_csrow_by_page(mci, page), | 195 | mci->ctl_name); |
197 | 0, -1, | ||
198 | mci->ctl_name, ""); | ||
199 | } | 196 | } |
200 | 197 | ||
201 | return error_found; | 198 | return error_found; |
@@ -205,7 +202,7 @@ static void r82600_check(struct mem_ctl_info *mci) | |||
205 | { | 202 | { |
206 | struct r82600_error_info info; | 203 | struct r82600_error_info info; |
207 | 204 | ||
208 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 205 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
209 | r82600_get_error_info(mci, &info); | 206 | r82600_get_error_info(mci, &info); |
210 | r82600_process_error_info(mci, &info, 1); | 207 | r82600_process_error_info(mci, &info, 1); |
211 | } | 208 | } |
@@ -219,7 +216,6 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
219 | u8 dramcr) | 216 | u8 dramcr) |
220 | { | 217 | { |
221 | struct csrow_info *csrow; | 218 | struct csrow_info *csrow; |
222 | struct dimm_info *dimm; | ||
223 | int index; | 219 | int index; |
224 | u8 drbar; /* SDRAM Row Boundary Address Register */ | 220 | u8 drbar; /* SDRAM Row Boundary Address Register */ |
225 | u32 row_high_limit, row_high_limit_last; | 221 | u32 row_high_limit, row_high_limit_last; |
@@ -230,19 +226,18 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
230 | row_high_limit_last = 0; | 226 | row_high_limit_last = 0; |
231 | 227 | ||
232 | for (index = 0; index < mci->nr_csrows; index++) { | 228 | for (index = 0; index < mci->nr_csrows; index++) { |
233 | csrow = mci->csrows[index]; | 229 | csrow = &mci->csrows[index]; |
234 | dimm = csrow->channels[0]->dimm; | ||
235 | 230 | ||
236 | /* find the DRAM Chip Select Base address and mask */ | 231 | /* find the DRAM Chip Select Base address and mask */ |
237 | pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); | 232 | pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); |
238 | 233 | ||
239 | edac_dbg(1, "Row=%d DRBA = %#0x\n", index, drbar); | 234 | debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); |
240 | 235 | ||
241 | row_high_limit = ((u32) drbar << 24); | 236 | row_high_limit = ((u32) drbar << 24); |
242 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | 237 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ |
243 | 238 | ||
244 | edac_dbg(1, "Row=%d, Boundary Address=%#0x, Last = %#0x\n", | 239 | debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n", |
245 | index, row_high_limit, row_high_limit_last); | 240 | __func__, index, row_high_limit, row_high_limit_last); |
246 | 241 | ||
247 | /* Empty row [p.57] */ | 242 | /* Empty row [p.57] */ |
248 | if (row_high_limit == row_high_limit_last) | 243 | if (row_high_limit == row_high_limit_last) |
@@ -252,17 +247,16 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
252 | 247 | ||
253 | csrow->first_page = row_base >> PAGE_SHIFT; | 248 | csrow->first_page = row_base >> PAGE_SHIFT; |
254 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | 249 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; |
255 | 250 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | |
256 | dimm->nr_pages = csrow->last_page - csrow->first_page + 1; | ||
257 | /* Error address is top 19 bits - so granularity is * | 251 | /* Error address is top 19 bits - so granularity is * |
258 | * 14 bits */ | 252 | * 14 bits */ |
259 | dimm->grain = 1 << 14; | 253 | csrow->grain = 1 << 14; |
260 | dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; | 254 | csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; |
261 | /* FIXME - check that this is unknowable with this chipset */ | 255 | /* FIXME - check that this is unknowable with this chipset */ |
262 | dimm->dtype = DEV_UNKNOWN; | 256 | csrow->dtype = DEV_UNKNOWN; |
263 | 257 | ||
264 | /* Mode is global on 82600 */ | 258 | /* Mode is global on 82600 */ |
265 | dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; | 259 | csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; |
266 | row_high_limit_last = row_high_limit; | 260 | row_high_limit_last = row_high_limit; |
267 | } | 261 | } |
268 | } | 262 | } |
@@ -270,32 +264,27 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
270 | static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | 264 | static int r82600_probe1(struct pci_dev *pdev, int dev_idx) |
271 | { | 265 | { |
272 | struct mem_ctl_info *mci; | 266 | struct mem_ctl_info *mci; |
273 | struct edac_mc_layer layers[2]; | ||
274 | u8 dramcr; | 267 | u8 dramcr; |
275 | u32 eapr; | 268 | u32 eapr; |
276 | u32 scrub_disabled; | 269 | u32 scrub_disabled; |
277 | u32 sdram_refresh_rate; | 270 | u32 sdram_refresh_rate; |
278 | struct r82600_error_info discard; | 271 | struct r82600_error_info discard; |
279 | 272 | ||
280 | edac_dbg(0, "\n"); | 273 | debugf0("%s()\n", __func__); |
281 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); | 274 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); |
282 | pci_read_config_dword(pdev, R82600_EAP, &eapr); | 275 | pci_read_config_dword(pdev, R82600_EAP, &eapr); |
283 | scrub_disabled = eapr & BIT(31); | 276 | scrub_disabled = eapr & BIT(31); |
284 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); | 277 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); |
285 | edac_dbg(2, "sdram refresh rate = %#0x\n", sdram_refresh_rate); | 278 | debugf2("%s(): sdram refresh rate = %#0x\n", __func__, |
286 | edac_dbg(2, "DRAMC register = %#0x\n", dramcr); | 279 | sdram_refresh_rate); |
287 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 280 | debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); |
288 | layers[0].size = R82600_NR_CSROWS; | 281 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0); |
289 | layers[0].is_virt_csrow = true; | 282 | |
290 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
291 | layers[1].size = R82600_NR_CHANS; | ||
292 | layers[1].is_virt_csrow = false; | ||
293 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); | ||
294 | if (mci == NULL) | 283 | if (mci == NULL) |
295 | return -ENOMEM; | 284 | return -ENOMEM; |
296 | 285 | ||
297 | edac_dbg(0, "mci = %p\n", mci); | 286 | debugf0("%s(): mci = %p\n", __func__, mci); |
298 | mci->pdev = &pdev->dev; | 287 | mci->dev = &pdev->dev; |
299 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; | 288 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; |
300 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 289 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
301 | /* FIXME try to work out if the chip leads have been used for COM2 | 290 | /* FIXME try to work out if the chip leads have been used for COM2 |
@@ -310,8 +299,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
310 | 299 | ||
311 | if (ecc_enabled(dramcr)) { | 300 | if (ecc_enabled(dramcr)) { |
312 | if (scrub_disabled) | 301 | if (scrub_disabled) |
313 | edac_dbg(3, "mci = %p - Scrubbing disabled! EAP: %#0x\n", | 302 | debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " |
314 | mci, eapr); | 303 | "%#0x\n", __func__, mci, eapr); |
315 | } else | 304 | } else |
316 | mci->edac_cap = EDAC_FLAG_NONE; | 305 | mci->edac_cap = EDAC_FLAG_NONE; |
317 | 306 | ||
@@ -328,14 +317,15 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
328 | * type of memory controller. The ID is therefore hardcoded to 0. | 317 | * type of memory controller. The ID is therefore hardcoded to 0. |
329 | */ | 318 | */ |
330 | if (edac_mc_add_mc(mci)) { | 319 | if (edac_mc_add_mc(mci)) { |
331 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 320 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
332 | goto fail; | 321 | goto fail; |
333 | } | 322 | } |
334 | 323 | ||
335 | /* get this far and it's successful */ | 324 | /* get this far and it's successful */ |
336 | 325 | ||
337 | if (disable_hardware_scrub) { | 326 | if (disable_hardware_scrub) { |
338 | edac_dbg(3, "Disabling Hardware Scrub (scrub on error)\n"); | 327 | debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", |
328 | __func__); | ||
339 | pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); | 329 | pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); |
340 | } | 330 | } |
341 | 331 | ||
@@ -350,7 +340,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
350 | __func__); | 340 | __func__); |
351 | } | 341 | } |
352 | 342 | ||
353 | edac_dbg(3, "success\n"); | 343 | debugf3("%s(): success\n", __func__); |
354 | return 0; | 344 | return 0; |
355 | 345 | ||
356 | fail: | 346 | fail: |
@@ -359,20 +349,20 @@ fail: | |||
359 | } | 349 | } |
360 | 350 | ||
361 | /* returns count (>= 0), or negative on error */ | 351 | /* returns count (>= 0), or negative on error */ |
362 | static int r82600_init_one(struct pci_dev *pdev, | 352 | static int __devinit r82600_init_one(struct pci_dev *pdev, |
363 | const struct pci_device_id *ent) | 353 | const struct pci_device_id *ent) |
364 | { | 354 | { |
365 | edac_dbg(0, "\n"); | 355 | debugf0("%s()\n", __func__); |
366 | 356 | ||
367 | /* don't need to call pci_enable_device() */ | 357 | /* don't need to call pci_enable_device() */ |
368 | return r82600_probe1(pdev, ent->driver_data); | 358 | return r82600_probe1(pdev, ent->driver_data); |
369 | } | 359 | } |
370 | 360 | ||
371 | static void r82600_remove_one(struct pci_dev *pdev) | 361 | static void __devexit r82600_remove_one(struct pci_dev *pdev) |
372 | { | 362 | { |
373 | struct mem_ctl_info *mci; | 363 | struct mem_ctl_info *mci; |
374 | 364 | ||
375 | edac_dbg(0, "\n"); | 365 | debugf0("%s()\n", __func__); |
376 | 366 | ||
377 | if (r82600_pci) | 367 | if (r82600_pci) |
378 | edac_pci_release_generic_ctl(r82600_pci); | 368 | edac_pci_release_generic_ctl(r82600_pci); |
@@ -383,7 +373,7 @@ static void r82600_remove_one(struct pci_dev *pdev) | |||
383 | edac_mc_free(mci); | 373 | edac_mc_free(mci); |
384 | } | 374 | } |
385 | 375 | ||
386 | static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = { | 376 | static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { |
387 | { | 377 | { |
388 | PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) | 378 | PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) |
389 | }, | 379 | }, |
@@ -397,7 +387,7 @@ MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); | |||
397 | static struct pci_driver r82600_driver = { | 387 | static struct pci_driver r82600_driver = { |
398 | .name = EDAC_MOD_STR, | 388 | .name = EDAC_MOD_STR, |
399 | .probe = r82600_init_one, | 389 | .probe = r82600_init_one, |
400 | .remove = r82600_remove_one, | 390 | .remove = __devexit_p(r82600_remove_one), |
401 | .id_table = r82600_pci_tbl, | 391 | .id_table = r82600_pci_tbl, |
402 | }; | 392 | }; |
403 | 393 | ||
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c deleted file mode 100644 index da7e2986e3d..00000000000 --- a/drivers/edac/sb_edac.c +++ /dev/null | |||
@@ -1,1838 +0,0 @@ | |||
1 | /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module | ||
2 | * | ||
3 | * This driver supports the memory controllers found on the Intel | ||
4 | * processor family Sandy Bridge. | ||
5 | * | ||
6 | * This file may be distributed under the terms of the | ||
7 | * GNU General Public License version 2 only. | ||
8 | * | ||
9 | * Copyright (c) 2011 by: | ||
10 | * Mauro Carvalho Chehab <mchehab@redhat.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/pci_ids.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/edac.h> | ||
20 | #include <linux/mmzone.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/bitmap.h> | ||
23 | #include <linux/math64.h> | ||
24 | #include <asm/processor.h> | ||
25 | #include <asm/mce.h> | ||
26 | |||
27 | #include "edac_core.h" | ||
28 | |||
29 | /* Static vars */ | ||
30 | static LIST_HEAD(sbridge_edac_list); | ||
31 | static DEFINE_MUTEX(sbridge_edac_lock); | ||
32 | static int probed; | ||
33 | |||
34 | /* | ||
35 | * Alter this version for the module when modifications are made | ||
36 | */ | ||
37 | #define SBRIDGE_REVISION " Ver: 1.0.0 " | ||
38 | #define EDAC_MOD_STR "sbridge_edac" | ||
39 | |||
40 | /* | ||
41 | * Debug macros | ||
42 | */ | ||
43 | #define sbridge_printk(level, fmt, arg...) \ | ||
44 | edac_printk(level, "sbridge", fmt, ##arg) | ||
45 | |||
46 | #define sbridge_mc_printk(mci, level, fmt, arg...) \ | ||
47 | edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg) | ||
48 | |||
49 | /* | ||
50 | * Get a bit field at register value <v>, from bit <lo> to bit <hi> | ||
51 | */ | ||
52 | #define GET_BITFIELD(v, lo, hi) \ | ||
53 | (((v) & ((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) >> (lo)) | ||
54 | |||
55 | /* | ||
56 | * sbridge Memory Controller Registers | ||
57 | */ | ||
58 | |||
59 | /* | ||
60 | * FIXME: For now, let's order by device function, as it makes | ||
61 | * easier for driver's development process. This table should be | ||
62 | * moved to pci_id.h when submitted upstream | ||
63 | */ | ||
64 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */ | ||
65 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */ | ||
66 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */ | ||
67 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */ | ||
68 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */ | ||
69 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */ | ||
70 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */ | ||
71 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */ | ||
72 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */ | ||
73 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */ | ||
74 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */ | ||
75 | |||
76 | /* | ||
77 | * Currently, unused, but will be needed in the future | ||
78 | * implementations, as they hold the error counters | ||
79 | */ | ||
80 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */ | ||
81 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */ | ||
82 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */ | ||
83 | #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ | ||
84 | |||
85 | /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ | ||
86 | static const u32 dram_rule[] = { | ||
87 | 0x80, 0x88, 0x90, 0x98, 0xa0, | ||
88 | 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, | ||
89 | }; | ||
90 | #define MAX_SAD ARRAY_SIZE(dram_rule) | ||
91 | |||
92 | #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff) | ||
93 | #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) | ||
94 | #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1) | ||
95 | #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) | ||
96 | |||
97 | static char *get_dram_attr(u32 reg) | ||
98 | { | ||
99 | switch(DRAM_ATTR(reg)) { | ||
100 | case 0: | ||
101 | return "DRAM"; | ||
102 | case 1: | ||
103 | return "MMCFG"; | ||
104 | case 2: | ||
105 | return "NXM"; | ||
106 | default: | ||
107 | return "unknown"; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | static const u32 interleave_list[] = { | ||
112 | 0x84, 0x8c, 0x94, 0x9c, 0xa4, | ||
113 | 0xac, 0xb4, 0xbc, 0xc4, 0xcc, | ||
114 | }; | ||
115 | #define MAX_INTERLEAVE ARRAY_SIZE(interleave_list) | ||
116 | |||
117 | #define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2) | ||
118 | #define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5) | ||
119 | #define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10) | ||
120 | #define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13) | ||
121 | #define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18) | ||
122 | #define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21) | ||
123 | #define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26) | ||
124 | #define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29) | ||
125 | |||
126 | static inline int sad_pkg(u32 reg, int interleave) | ||
127 | { | ||
128 | switch (interleave) { | ||
129 | case 0: | ||
130 | return SAD_PKG0(reg); | ||
131 | case 1: | ||
132 | return SAD_PKG1(reg); | ||
133 | case 2: | ||
134 | return SAD_PKG2(reg); | ||
135 | case 3: | ||
136 | return SAD_PKG3(reg); | ||
137 | case 4: | ||
138 | return SAD_PKG4(reg); | ||
139 | case 5: | ||
140 | return SAD_PKG5(reg); | ||
141 | case 6: | ||
142 | return SAD_PKG6(reg); | ||
143 | case 7: | ||
144 | return SAD_PKG7(reg); | ||
145 | default: | ||
146 | return -EINVAL; | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* Devices 12 Function 7 */ | ||
151 | |||
152 | #define TOLM 0x80 | ||
153 | #define TOHM 0x84 | ||
154 | |||
155 | #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) | ||
156 | #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) | ||
157 | |||
158 | /* Device 13 Function 6 */ | ||
159 | |||
160 | #define SAD_TARGET 0xf0 | ||
161 | |||
162 | #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11) | ||
163 | |||
164 | #define SAD_CONTROL 0xf4 | ||
165 | |||
166 | #define NODE_ID(reg) GET_BITFIELD(reg, 0, 2) | ||
167 | |||
168 | /* Device 14 function 0 */ | ||
169 | |||
170 | static const u32 tad_dram_rule[] = { | ||
171 | 0x40, 0x44, 0x48, 0x4c, | ||
172 | 0x50, 0x54, 0x58, 0x5c, | ||
173 | 0x60, 0x64, 0x68, 0x6c, | ||
174 | }; | ||
175 | #define MAX_TAD ARRAY_SIZE(tad_dram_rule) | ||
176 | |||
177 | #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff) | ||
178 | #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11) | ||
179 | #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9) | ||
180 | #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7) | ||
181 | #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5) | ||
182 | #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3) | ||
183 | #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1) | ||
184 | |||
185 | /* Device 15, function 0 */ | ||
186 | |||
187 | #define MCMTR 0x7c | ||
188 | |||
189 | #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2) | ||
190 | #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1) | ||
191 | #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0) | ||
192 | |||
193 | /* Device 15, function 1 */ | ||
194 | |||
195 | #define RASENABLES 0xac | ||
196 | #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0) | ||
197 | |||
198 | /* Device 15, functions 2-5 */ | ||
199 | |||
200 | static const int mtr_regs[] = { | ||
201 | 0x80, 0x84, 0x88, | ||
202 | }; | ||
203 | |||
204 | #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19) | ||
205 | #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14) | ||
206 | #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13) | ||
207 | #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4) | ||
208 | #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1) | ||
209 | |||
210 | static const u32 tad_ch_nilv_offset[] = { | ||
211 | 0x90, 0x94, 0x98, 0x9c, | ||
212 | 0xa0, 0xa4, 0xa8, 0xac, | ||
213 | 0xb0, 0xb4, 0xb8, 0xbc, | ||
214 | }; | ||
215 | #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29) | ||
216 | #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26) | ||
217 | |||
218 | static const u32 rir_way_limit[] = { | ||
219 | 0x108, 0x10c, 0x110, 0x114, 0x118, | ||
220 | }; | ||
221 | #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit) | ||
222 | |||
223 | #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31) | ||
224 | #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29) | ||
225 | #define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff) | ||
226 | |||
227 | #define MAX_RIR_WAY 8 | ||
228 | |||
229 | static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { | ||
230 | { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c }, | ||
231 | { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c }, | ||
232 | { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c }, | ||
233 | { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c }, | ||
234 | { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, | ||
235 | }; | ||
236 | |||
237 | #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) | ||
238 | #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) | ||
239 | |||
240 | /* Device 16, functions 2-7 */ | ||
241 | |||
242 | /* | ||
243 | * FIXME: Implement the error count reads directly | ||
244 | */ | ||
245 | |||
246 | static const u32 correrrcnt[] = { | ||
247 | 0x104, 0x108, 0x10c, 0x110, | ||
248 | }; | ||
249 | |||
250 | #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) | ||
251 | #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) | ||
252 | #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) | ||
253 | #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) | ||
254 | |||
255 | static const u32 correrrthrsld[] = { | ||
256 | 0x11c, 0x120, 0x124, 0x128, | ||
257 | }; | ||
258 | |||
259 | #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) | ||
260 | #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) | ||
261 | |||
262 | |||
263 | /* Device 17, function 0 */ | ||
264 | |||
265 | #define RANK_CFG_A 0x0328 | ||
266 | |||
267 | #define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11) | ||
268 | |||
269 | /* | ||
270 | * sbridge structs | ||
271 | */ | ||
272 | |||
273 | #define NUM_CHANNELS 4 | ||
274 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ | ||
275 | |||
276 | struct sbridge_info { | ||
277 | u32 mcmtr; | ||
278 | }; | ||
279 | |||
280 | struct sbridge_channel { | ||
281 | u32 ranks; | ||
282 | u32 dimms; | ||
283 | }; | ||
284 | |||
285 | struct pci_id_descr { | ||
286 | int dev; | ||
287 | int func; | ||
288 | int dev_id; | ||
289 | int optional; | ||
290 | }; | ||
291 | |||
292 | struct pci_id_table { | ||
293 | const struct pci_id_descr *descr; | ||
294 | int n_devs; | ||
295 | }; | ||
296 | |||
297 | struct sbridge_dev { | ||
298 | struct list_head list; | ||
299 | u8 bus, mc; | ||
300 | u8 node_id, source_id; | ||
301 | struct pci_dev **pdev; | ||
302 | int n_devs; | ||
303 | struct mem_ctl_info *mci; | ||
304 | }; | ||
305 | |||
306 | struct sbridge_pvt { | ||
307 | struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; | ||
308 | struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0; | ||
309 | struct pci_dev *pci_br; | ||
310 | struct pci_dev *pci_tad[NUM_CHANNELS]; | ||
311 | |||
312 | struct sbridge_dev *sbridge_dev; | ||
313 | |||
314 | struct sbridge_info info; | ||
315 | struct sbridge_channel channel[NUM_CHANNELS]; | ||
316 | |||
317 | /* Memory type detection */ | ||
318 | bool is_mirrored, is_lockstep, is_close_pg; | ||
319 | |||
320 | /* Fifo double buffers */ | ||
321 | struct mce mce_entry[MCE_LOG_LEN]; | ||
322 | struct mce mce_outentry[MCE_LOG_LEN]; | ||
323 | |||
324 | /* Fifo in/out counters */ | ||
325 | unsigned mce_in, mce_out; | ||
326 | |||
327 | /* Count indicator to show errors not got */ | ||
328 | unsigned mce_overrun; | ||
329 | |||
330 | /* Memory description */ | ||
331 | u64 tolm, tohm; | ||
332 | }; | ||
333 | |||
334 | #define PCI_DESCR(device, function, device_id) \ | ||
335 | .dev = (device), \ | ||
336 | .func = (function), \ | ||
337 | .dev_id = (device_id) | ||
338 | |||
339 | static const struct pci_id_descr pci_dev_descr_sbridge[] = { | ||
340 | /* Processor Home Agent */ | ||
341 | { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0) }, | ||
342 | |||
343 | /* Memory controller */ | ||
344 | { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA) }, | ||
345 | { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS) }, | ||
346 | { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0) }, | ||
347 | { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1) }, | ||
348 | { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2) }, | ||
349 | { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3) }, | ||
350 | { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO) }, | ||
351 | |||
352 | /* System Address Decoder */ | ||
353 | { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0) }, | ||
354 | { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1) }, | ||
355 | |||
356 | /* Broadcast Registers */ | ||
357 | { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR) }, | ||
358 | }; | ||
359 | |||
360 | #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } | ||
361 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { | ||
362 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), | ||
363 | {0,} /* 0 terminated list. */ | ||
364 | }; | ||
365 | |||
366 | /* | ||
367 | * pci_device_id table for which devices we are looking for | ||
368 | */ | ||
369 | static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = { | ||
370 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, | ||
371 | {0,} /* 0 terminated list. */ | ||
372 | }; | ||
373 | |||
374 | |||
375 | /**************************************************************************** | ||
376 | Ancillary status routines | ||
377 | ****************************************************************************/ | ||
378 | |||
379 | static inline int numrank(u32 mtr) | ||
380 | { | ||
381 | int ranks = (1 << RANK_CNT_BITS(mtr)); | ||
382 | |||
383 | if (ranks > 4) { | ||
384 | edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n", | ||
385 | ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr); | ||
386 | return -EINVAL; | ||
387 | } | ||
388 | |||
389 | return ranks; | ||
390 | } | ||
391 | |||
392 | static inline int numrow(u32 mtr) | ||
393 | { | ||
394 | int rows = (RANK_WIDTH_BITS(mtr) + 12); | ||
395 | |||
396 | if (rows < 13 || rows > 18) { | ||
397 | edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n", | ||
398 | rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); | ||
399 | return -EINVAL; | ||
400 | } | ||
401 | |||
402 | return 1 << rows; | ||
403 | } | ||
404 | |||
405 | static inline int numcol(u32 mtr) | ||
406 | { | ||
407 | int cols = (COL_WIDTH_BITS(mtr) + 10); | ||
408 | |||
409 | if (cols > 12) { | ||
410 | edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n", | ||
411 | cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); | ||
412 | return -EINVAL; | ||
413 | } | ||
414 | |||
415 | return 1 << cols; | ||
416 | } | ||
417 | |||
418 | static struct sbridge_dev *get_sbridge_dev(u8 bus) | ||
419 | { | ||
420 | struct sbridge_dev *sbridge_dev; | ||
421 | |||
422 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { | ||
423 | if (sbridge_dev->bus == bus) | ||
424 | return sbridge_dev; | ||
425 | } | ||
426 | |||
427 | return NULL; | ||
428 | } | ||
429 | |||
430 | static struct sbridge_dev *alloc_sbridge_dev(u8 bus, | ||
431 | const struct pci_id_table *table) | ||
432 | { | ||
433 | struct sbridge_dev *sbridge_dev; | ||
434 | |||
435 | sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL); | ||
436 | if (!sbridge_dev) | ||
437 | return NULL; | ||
438 | |||
439 | sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs, | ||
440 | GFP_KERNEL); | ||
441 | if (!sbridge_dev->pdev) { | ||
442 | kfree(sbridge_dev); | ||
443 | return NULL; | ||
444 | } | ||
445 | |||
446 | sbridge_dev->bus = bus; | ||
447 | sbridge_dev->n_devs = table->n_devs; | ||
448 | list_add_tail(&sbridge_dev->list, &sbridge_edac_list); | ||
449 | |||
450 | return sbridge_dev; | ||
451 | } | ||
452 | |||
453 | static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) | ||
454 | { | ||
455 | list_del(&sbridge_dev->list); | ||
456 | kfree(sbridge_dev->pdev); | ||
457 | kfree(sbridge_dev); | ||
458 | } | ||
459 | |||
460 | /**************************************************************************** | ||
461 | Memory check routines | ||
462 | ****************************************************************************/ | ||
463 | static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot, | ||
464 | unsigned func) | ||
465 | { | ||
466 | struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus); | ||
467 | int i; | ||
468 | |||
469 | if (!sbridge_dev) | ||
470 | return NULL; | ||
471 | |||
472 | for (i = 0; i < sbridge_dev->n_devs; i++) { | ||
473 | if (!sbridge_dev->pdev[i]) | ||
474 | continue; | ||
475 | |||
476 | if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot && | ||
477 | PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) { | ||
478 | edac_dbg(1, "Associated %02x.%02x.%d with %p\n", | ||
479 | bus, slot, func, sbridge_dev->pdev[i]); | ||
480 | return sbridge_dev->pdev[i]; | ||
481 | } | ||
482 | } | ||
483 | |||
484 | return NULL; | ||
485 | } | ||
486 | |||
487 | /** | ||
488 | * check_if_ecc_is_active() - Checks if ECC is active | ||
489 | * bus: Device bus | ||
490 | */ | ||
491 | static int check_if_ecc_is_active(const u8 bus) | ||
492 | { | ||
493 | struct pci_dev *pdev = NULL; | ||
494 | u32 mcmtr; | ||
495 | |||
496 | pdev = get_pdev_slot_func(bus, 15, 0); | ||
497 | if (!pdev) { | ||
498 | sbridge_printk(KERN_ERR, "Couldn't find PCI device " | ||
499 | "%2x.%02d.%d!!!\n", | ||
500 | bus, 15, 0); | ||
501 | return -ENODEV; | ||
502 | } | ||
503 | |||
504 | pci_read_config_dword(pdev, MCMTR, &mcmtr); | ||
505 | if (!IS_ECC_ENABLED(mcmtr)) { | ||
506 | sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); | ||
507 | return -ENODEV; | ||
508 | } | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int get_dimm_config(struct mem_ctl_info *mci) | ||
513 | { | ||
514 | struct sbridge_pvt *pvt = mci->pvt_info; | ||
515 | struct dimm_info *dimm; | ||
516 | unsigned i, j, banks, ranks, rows, cols, npages; | ||
517 | u64 size; | ||
518 | u32 reg; | ||
519 | enum edac_type mode; | ||
520 | enum mem_type mtype; | ||
521 | |||
522 | pci_read_config_dword(pvt->pci_br, SAD_TARGET, ®); | ||
523 | pvt->sbridge_dev->source_id = SOURCE_ID(reg); | ||
524 | |||
525 | pci_read_config_dword(pvt->pci_br, SAD_CONTROL, ®); | ||
526 | pvt->sbridge_dev->node_id = NODE_ID(reg); | ||
527 | edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", | ||
528 | pvt->sbridge_dev->mc, | ||
529 | pvt->sbridge_dev->node_id, | ||
530 | pvt->sbridge_dev->source_id); | ||
531 | |||
532 | pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); | ||
533 | if (IS_MIRROR_ENABLED(reg)) { | ||
534 | edac_dbg(0, "Memory mirror is enabled\n"); | ||
535 | pvt->is_mirrored = true; | ||
536 | } else { | ||
537 | edac_dbg(0, "Memory mirror is disabled\n"); | ||
538 | pvt->is_mirrored = false; | ||
539 | } | ||
540 | |||
541 | pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); | ||
542 | if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { | ||
543 | edac_dbg(0, "Lockstep is enabled\n"); | ||
544 | mode = EDAC_S8ECD8ED; | ||
545 | pvt->is_lockstep = true; | ||
546 | } else { | ||
547 | edac_dbg(0, "Lockstep is disabled\n"); | ||
548 | mode = EDAC_S4ECD4ED; | ||
549 | pvt->is_lockstep = false; | ||
550 | } | ||
551 | if (IS_CLOSE_PG(pvt->info.mcmtr)) { | ||
552 | edac_dbg(0, "address map is on closed page mode\n"); | ||
553 | pvt->is_close_pg = true; | ||
554 | } else { | ||
555 | edac_dbg(0, "address map is on open page mode\n"); | ||
556 | pvt->is_close_pg = false; | ||
557 | } | ||
558 | |||
559 | pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, ®); | ||
560 | if (IS_RDIMM_ENABLED(reg)) { | ||
561 | /* FIXME: Can also be LRDIMM */ | ||
562 | edac_dbg(0, "Memory is registered\n"); | ||
563 | mtype = MEM_RDDR3; | ||
564 | } else { | ||
565 | edac_dbg(0, "Memory is unregistered\n"); | ||
566 | mtype = MEM_DDR3; | ||
567 | } | ||
568 | |||
569 | /* On all supported DDR3 DIMM types, there are 8 banks available */ | ||
570 | banks = 8; | ||
571 | |||
572 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
573 | u32 mtr; | ||
574 | |||
575 | for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) { | ||
576 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, | ||
577 | i, j, 0); | ||
578 | pci_read_config_dword(pvt->pci_tad[i], | ||
579 | mtr_regs[j], &mtr); | ||
580 | edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); | ||
581 | if (IS_DIMM_PRESENT(mtr)) { | ||
582 | pvt->channel[i].dimms++; | ||
583 | |||
584 | ranks = numrank(mtr); | ||
585 | rows = numrow(mtr); | ||
586 | cols = numcol(mtr); | ||
587 | |||
588 | /* DDR3 has 8 I/O banks */ | ||
589 | size = ((u64)rows * cols * banks * ranks) >> (20 - 3); | ||
590 | npages = MiB_TO_PAGES(size); | ||
591 | |||
592 | edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", | ||
593 | pvt->sbridge_dev->mc, i, j, | ||
594 | size, npages, | ||
595 | banks, ranks, rows, cols); | ||
596 | |||
597 | dimm->nr_pages = npages; | ||
598 | dimm->grain = 32; | ||
599 | dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4; | ||
600 | dimm->mtype = mtype; | ||
601 | dimm->edac_mode = mode; | ||
602 | snprintf(dimm->label, sizeof(dimm->label), | ||
603 | "CPU_SrcID#%u_Channel#%u_DIMM#%u", | ||
604 | pvt->sbridge_dev->source_id, i, j); | ||
605 | } | ||
606 | } | ||
607 | } | ||
608 | |||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | static void get_memory_layout(const struct mem_ctl_info *mci) | ||
613 | { | ||
614 | struct sbridge_pvt *pvt = mci->pvt_info; | ||
615 | int i, j, k, n_sads, n_tads, sad_interl; | ||
616 | u32 reg; | ||
617 | u64 limit, prv = 0; | ||
618 | u64 tmp_mb; | ||
619 | u32 mb, kb; | ||
620 | u32 rir_way; | ||
621 | |||
622 | /* | ||
623 | * Step 1) Get TOLM/TOHM ranges | ||
624 | */ | ||
625 | |||
626 | /* Address range is 32:28 */ | ||
627 | pci_read_config_dword(pvt->pci_sad1, TOLM, | ||
628 | ®); | ||
629 | pvt->tolm = GET_TOLM(reg); | ||
630 | tmp_mb = (1 + pvt->tolm) >> 20; | ||
631 | |||
632 | mb = div_u64_rem(tmp_mb, 1000, &kb); | ||
633 | edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); | ||
634 | |||
635 | /* Address range is already 45:25 */ | ||
636 | pci_read_config_dword(pvt->pci_sad1, TOHM, | ||
637 | ®); | ||
638 | pvt->tohm = GET_TOHM(reg); | ||
639 | tmp_mb = (1 + pvt->tohm) >> 20; | ||
640 | |||
641 | mb = div_u64_rem(tmp_mb, 1000, &kb); | ||
642 | edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm); | ||
643 | |||
644 | /* | ||
645 | * Step 2) Get SAD range and SAD Interleave list | ||
646 | * TAD registers contain the interleave wayness. However, it | ||
647 | * seems simpler to just discover it indirectly, with the | ||
648 | * algorithm bellow. | ||
649 | */ | ||
650 | prv = 0; | ||
651 | for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { | ||
652 | /* SAD_LIMIT Address range is 45:26 */ | ||
653 | pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], | ||
654 | ®); | ||
655 | limit = SAD_LIMIT(reg); | ||
656 | |||
657 | if (!DRAM_RULE_ENABLE(reg)) | ||
658 | continue; | ||
659 | |||
660 | if (limit <= prv) | ||
661 | break; | ||
662 | |||
663 | tmp_mb = (limit + 1) >> 20; | ||
664 | mb = div_u64_rem(tmp_mb, 1000, &kb); | ||
665 | edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", | ||
666 | n_sads, | ||
667 | get_dram_attr(reg), | ||
668 | mb, kb, | ||
669 | ((u64)tmp_mb) << 20L, | ||
670 | INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", | ||
671 | reg); | ||
672 | prv = limit; | ||
673 | |||
674 | pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], | ||
675 | ®); | ||
676 | sad_interl = sad_pkg(reg, 0); | ||
677 | for (j = 0; j < 8; j++) { | ||
678 | if (j > 0 && sad_interl == sad_pkg(reg, j)) | ||
679 | break; | ||
680 | |||
681 | edac_dbg(0, "SAD#%d, interleave #%d: %d\n", | ||
682 | n_sads, j, sad_pkg(reg, j)); | ||
683 | } | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * Step 3) Get TAD range | ||
688 | */ | ||
689 | prv = 0; | ||
690 | for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { | ||
691 | pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], | ||
692 | ®); | ||
693 | limit = TAD_LIMIT(reg); | ||
694 | if (limit <= prv) | ||
695 | break; | ||
696 | tmp_mb = (limit + 1) >> 20; | ||
697 | |||
698 | mb = div_u64_rem(tmp_mb, 1000, &kb); | ||
699 | edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", | ||
700 | n_tads, mb, kb, | ||
701 | ((u64)tmp_mb) << 20L, | ||
702 | (u32)TAD_SOCK(reg), | ||
703 | (u32)TAD_CH(reg), | ||
704 | (u32)TAD_TGT0(reg), | ||
705 | (u32)TAD_TGT1(reg), | ||
706 | (u32)TAD_TGT2(reg), | ||
707 | (u32)TAD_TGT3(reg), | ||
708 | reg); | ||
709 | prv = limit; | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Step 4) Get TAD offsets, per each channel | ||
714 | */ | ||
715 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
716 | if (!pvt->channel[i].dimms) | ||
717 | continue; | ||
718 | for (j = 0; j < n_tads; j++) { | ||
719 | pci_read_config_dword(pvt->pci_tad[i], | ||
720 | tad_ch_nilv_offset[j], | ||
721 | ®); | ||
722 | tmp_mb = TAD_OFFSET(reg) >> 20; | ||
723 | mb = div_u64_rem(tmp_mb, 1000, &kb); | ||
724 | edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", | ||
725 | i, j, | ||
726 | mb, kb, | ||
727 | ((u64)tmp_mb) << 20L, | ||
728 | reg); | ||
729 | } | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * Step 6) Get RIR Wayness/Limit, per each channel | ||
734 | */ | ||
735 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
736 | if (!pvt->channel[i].dimms) | ||
737 | continue; | ||
738 | for (j = 0; j < MAX_RIR_RANGES; j++) { | ||
739 | pci_read_config_dword(pvt->pci_tad[i], | ||
740 | rir_way_limit[j], | ||
741 | ®); | ||
742 | |||
743 | if (!IS_RIR_VALID(reg)) | ||
744 | continue; | ||
745 | |||
746 | tmp_mb = RIR_LIMIT(reg) >> 20; | ||
747 | rir_way = 1 << RIR_WAY(reg); | ||
748 | mb = div_u64_rem(tmp_mb, 1000, &kb); | ||
749 | edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", | ||
750 | i, j, | ||
751 | mb, kb, | ||
752 | ((u64)tmp_mb) << 20L, | ||
753 | rir_way, | ||
754 | reg); | ||
755 | |||
756 | for (k = 0; k < rir_way; k++) { | ||
757 | pci_read_config_dword(pvt->pci_tad[i], | ||
758 | rir_offset[j][k], | ||
759 | ®); | ||
760 | tmp_mb = RIR_OFFSET(reg) << 6; | ||
761 | |||
762 | mb = div_u64_rem(tmp_mb, 1000, &kb); | ||
763 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", | ||
764 | i, j, k, | ||
765 | mb, kb, | ||
766 | ((u64)tmp_mb) << 20L, | ||
767 | (u32)RIR_RNK_TGT(reg), | ||
768 | reg); | ||
769 | } | ||
770 | } | ||
771 | } | ||
772 | } | ||
773 | |||
774 | struct mem_ctl_info *get_mci_for_node_id(u8 node_id) | ||
775 | { | ||
776 | struct sbridge_dev *sbridge_dev; | ||
777 | |||
778 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { | ||
779 | if (sbridge_dev->node_id == node_id) | ||
780 | return sbridge_dev->mci; | ||
781 | } | ||
782 | return NULL; | ||
783 | } | ||
784 | |||
785 | static int get_memory_error_data(struct mem_ctl_info *mci, | ||
786 | u64 addr, | ||
787 | u8 *socket, | ||
788 | long *channel_mask, | ||
789 | u8 *rank, | ||
790 | char **area_type, char *msg) | ||
791 | { | ||
792 | struct mem_ctl_info *new_mci; | ||
793 | struct sbridge_pvt *pvt = mci->pvt_info; | ||
794 | int n_rir, n_sads, n_tads, sad_way, sck_xch; | ||
795 | int sad_interl, idx, base_ch; | ||
796 | int interleave_mode; | ||
797 | unsigned sad_interleave[MAX_INTERLEAVE]; | ||
798 | u32 reg; | ||
799 | u8 ch_way,sck_way; | ||
800 | u32 tad_offset; | ||
801 | u32 rir_way; | ||
802 | u32 mb, kb; | ||
803 | u64 ch_addr, offset, limit, prv = 0; | ||
804 | |||
805 | |||
806 | /* | ||
807 | * Step 0) Check if the address is at special memory ranges | ||
808 | * The check bellow is probably enough to fill all cases where | ||
809 | * the error is not inside a memory, except for the legacy | ||
810 | * range (e. g. VGA addresses). It is unlikely, however, that the | ||
811 | * memory controller would generate an error on that range. | ||
812 | */ | ||
813 | if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { | ||
814 | sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); | ||
815 | return -EINVAL; | ||
816 | } | ||
817 | if (addr >= (u64)pvt->tohm) { | ||
818 | sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); | ||
819 | return -EINVAL; | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * Step 1) Get socket | ||
824 | */ | ||
825 | for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { | ||
826 | pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], | ||
827 | ®); | ||
828 | |||
829 | if (!DRAM_RULE_ENABLE(reg)) | ||
830 | continue; | ||
831 | |||
832 | limit = SAD_LIMIT(reg); | ||
833 | if (limit <= prv) { | ||
834 | sprintf(msg, "Can't discover the memory socket"); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | if (addr <= limit) | ||
838 | break; | ||
839 | prv = limit; | ||
840 | } | ||
841 | if (n_sads == MAX_SAD) { | ||
842 | sprintf(msg, "Can't discover the memory socket"); | ||
843 | return -EINVAL; | ||
844 | } | ||
845 | *area_type = get_dram_attr(reg); | ||
846 | interleave_mode = INTERLEAVE_MODE(reg); | ||
847 | |||
848 | pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], | ||
849 | ®); | ||
850 | sad_interl = sad_pkg(reg, 0); | ||
851 | for (sad_way = 0; sad_way < 8; sad_way++) { | ||
852 | if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way)) | ||
853 | break; | ||
854 | sad_interleave[sad_way] = sad_pkg(reg, sad_way); | ||
855 | edac_dbg(0, "SAD interleave #%d: %d\n", | ||
856 | sad_way, sad_interleave[sad_way]); | ||
857 | } | ||
858 | edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", | ||
859 | pvt->sbridge_dev->mc, | ||
860 | n_sads, | ||
861 | addr, | ||
862 | limit, | ||
863 | sad_way + 7, | ||
864 | interleave_mode ? "" : "XOR[18:16]"); | ||
865 | if (interleave_mode) | ||
866 | idx = ((addr >> 6) ^ (addr >> 16)) & 7; | ||
867 | else | ||
868 | idx = (addr >> 6) & 7; | ||
869 | switch (sad_way) { | ||
870 | case 1: | ||
871 | idx = 0; | ||
872 | break; | ||
873 | case 2: | ||
874 | idx = idx & 1; | ||
875 | break; | ||
876 | case 4: | ||
877 | idx = idx & 3; | ||
878 | break; | ||
879 | case 8: | ||
880 | break; | ||
881 | default: | ||
882 | sprintf(msg, "Can't discover socket interleave"); | ||
883 | return -EINVAL; | ||
884 | } | ||
885 | *socket = sad_interleave[idx]; | ||
886 | edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", | ||
887 | idx, sad_way, *socket); | ||
888 | |||
889 | /* | ||
890 | * Move to the proper node structure, in order to access the | ||
891 | * right PCI registers | ||
892 | */ | ||
893 | new_mci = get_mci_for_node_id(*socket); | ||
894 | if (!new_mci) { | ||
895 | sprintf(msg, "Struct for socket #%u wasn't initialized", | ||
896 | *socket); | ||
897 | return -EINVAL; | ||
898 | } | ||
899 | mci = new_mci; | ||
900 | pvt = mci->pvt_info; | ||
901 | |||
902 | /* | ||
903 | * Step 2) Get memory channel | ||
904 | */ | ||
905 | prv = 0; | ||
906 | for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { | ||
907 | pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], | ||
908 | ®); | ||
909 | limit = TAD_LIMIT(reg); | ||
910 | if (limit <= prv) { | ||
911 | sprintf(msg, "Can't discover the memory channel"); | ||
912 | return -EINVAL; | ||
913 | } | ||
914 | if (addr <= limit) | ||
915 | break; | ||
916 | prv = limit; | ||
917 | } | ||
918 | ch_way = TAD_CH(reg) + 1; | ||
919 | sck_way = TAD_SOCK(reg) + 1; | ||
920 | /* | ||
921 | * FIXME: Is it right to always use channel 0 for offsets? | ||
922 | */ | ||
923 | pci_read_config_dword(pvt->pci_tad[0], | ||
924 | tad_ch_nilv_offset[n_tads], | ||
925 | &tad_offset); | ||
926 | |||
927 | if (ch_way == 3) | ||
928 | idx = addr >> 6; | ||
929 | else | ||
930 | idx = addr >> (6 + sck_way); | ||
931 | idx = idx % ch_way; | ||
932 | |||
933 | /* | ||
934 | * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ??? | ||
935 | */ | ||
936 | switch (idx) { | ||
937 | case 0: | ||
938 | base_ch = TAD_TGT0(reg); | ||
939 | break; | ||
940 | case 1: | ||
941 | base_ch = TAD_TGT1(reg); | ||
942 | break; | ||
943 | case 2: | ||
944 | base_ch = TAD_TGT2(reg); | ||
945 | break; | ||
946 | case 3: | ||
947 | base_ch = TAD_TGT3(reg); | ||
948 | break; | ||
949 | default: | ||
950 | sprintf(msg, "Can't discover the TAD target"); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | *channel_mask = 1 << base_ch; | ||
954 | |||
955 | if (pvt->is_mirrored) { | ||
956 | *channel_mask |= 1 << ((base_ch + 2) % 4); | ||
957 | switch(ch_way) { | ||
958 | case 2: | ||
959 | case 4: | ||
960 | sck_xch = 1 << sck_way * (ch_way >> 1); | ||
961 | break; | ||
962 | default: | ||
963 | sprintf(msg, "Invalid mirror set. Can't decode addr"); | ||
964 | return -EINVAL; | ||
965 | } | ||
966 | } else | ||
967 | sck_xch = (1 << sck_way) * ch_way; | ||
968 | |||
969 | if (pvt->is_lockstep) | ||
970 | *channel_mask |= 1 << ((base_ch + 1) % 4); | ||
971 | |||
972 | offset = TAD_OFFSET(tad_offset); | ||
973 | |||
974 | edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", | ||
975 | n_tads, | ||
976 | addr, | ||
977 | limit, | ||
978 | (u32)TAD_SOCK(reg), | ||
979 | ch_way, | ||
980 | offset, | ||
981 | idx, | ||
982 | base_ch, | ||
983 | *channel_mask); | ||
984 | |||
985 | /* Calculate channel address */ | ||
986 | /* Remove the TAD offset */ | ||
987 | |||
988 | if (offset > addr) { | ||
989 | sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", | ||
990 | offset, addr); | ||
991 | return -EINVAL; | ||
992 | } | ||
993 | addr -= offset; | ||
994 | /* Store the low bits [0:6] of the addr */ | ||
995 | ch_addr = addr & 0x7f; | ||
996 | /* Remove socket wayness and remove 6 bits */ | ||
997 | addr >>= 6; | ||
998 | addr = div_u64(addr, sck_xch); | ||
999 | #if 0 | ||
1000 | /* Divide by channel way */ | ||
1001 | addr = addr / ch_way; | ||
1002 | #endif | ||
1003 | /* Recover the last 6 bits */ | ||
1004 | ch_addr |= addr << 6; | ||
1005 | |||
1006 | /* | ||
1007 | * Step 3) Decode rank | ||
1008 | */ | ||
1009 | for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { | ||
1010 | pci_read_config_dword(pvt->pci_tad[base_ch], | ||
1011 | rir_way_limit[n_rir], | ||
1012 | ®); | ||
1013 | |||
1014 | if (!IS_RIR_VALID(reg)) | ||
1015 | continue; | ||
1016 | |||
1017 | limit = RIR_LIMIT(reg); | ||
1018 | mb = div_u64_rem(limit >> 20, 1000, &kb); | ||
1019 | edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", | ||
1020 | n_rir, | ||
1021 | mb, kb, | ||
1022 | limit, | ||
1023 | 1 << RIR_WAY(reg)); | ||
1024 | if (ch_addr <= limit) | ||
1025 | break; | ||
1026 | } | ||
1027 | if (n_rir == MAX_RIR_RANGES) { | ||
1028 | sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", | ||
1029 | ch_addr); | ||
1030 | return -EINVAL; | ||
1031 | } | ||
1032 | rir_way = RIR_WAY(reg); | ||
1033 | if (pvt->is_close_pg) | ||
1034 | idx = (ch_addr >> 6); | ||
1035 | else | ||
1036 | idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ | ||
1037 | idx %= 1 << rir_way; | ||
1038 | |||
1039 | pci_read_config_dword(pvt->pci_tad[base_ch], | ||
1040 | rir_offset[n_rir][idx], | ||
1041 | ®); | ||
1042 | *rank = RIR_RNK_TGT(reg); | ||
1043 | |||
1044 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", | ||
1045 | n_rir, | ||
1046 | ch_addr, | ||
1047 | limit, | ||
1048 | rir_way, | ||
1049 | idx); | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1054 | /**************************************************************************** | ||
1055 | Device initialization routines: put/get, init/exit | ||
1056 | ****************************************************************************/ | ||
1057 | |||
1058 | /* | ||
1059 | * sbridge_put_all_devices 'put' all the devices that we have | ||
1060 | * reserved via 'get' | ||
1061 | */ | ||
1062 | static void sbridge_put_devices(struct sbridge_dev *sbridge_dev) | ||
1063 | { | ||
1064 | int i; | ||
1065 | |||
1066 | edac_dbg(0, "\n"); | ||
1067 | for (i = 0; i < sbridge_dev->n_devs; i++) { | ||
1068 | struct pci_dev *pdev = sbridge_dev->pdev[i]; | ||
1069 | if (!pdev) | ||
1070 | continue; | ||
1071 | edac_dbg(0, "Removing dev %02x:%02x.%d\n", | ||
1072 | pdev->bus->number, | ||
1073 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | ||
1074 | pci_dev_put(pdev); | ||
1075 | } | ||
1076 | } | ||
1077 | |||
1078 | static void sbridge_put_all_devices(void) | ||
1079 | { | ||
1080 | struct sbridge_dev *sbridge_dev, *tmp; | ||
1081 | |||
1082 | list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) { | ||
1083 | sbridge_put_devices(sbridge_dev); | ||
1084 | free_sbridge_dev(sbridge_dev); | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | /* | ||
1089 | * sbridge_get_all_devices Find and perform 'get' operation on the MCH's | ||
1090 | * device/functions we want to reference for this driver | ||
1091 | * | ||
1092 | * Need to 'get' device 16 func 1 and func 2 | ||
1093 | */ | ||
1094 | static int sbridge_get_onedevice(struct pci_dev **prev, | ||
1095 | u8 *num_mc, | ||
1096 | const struct pci_id_table *table, | ||
1097 | const unsigned devno) | ||
1098 | { | ||
1099 | struct sbridge_dev *sbridge_dev; | ||
1100 | const struct pci_id_descr *dev_descr = &table->descr[devno]; | ||
1101 | |||
1102 | struct pci_dev *pdev = NULL; | ||
1103 | u8 bus = 0; | ||
1104 | |||
1105 | sbridge_printk(KERN_INFO, | ||
1106 | "Seeking for: dev %02x.%d PCI ID %04x:%04x\n", | ||
1107 | dev_descr->dev, dev_descr->func, | ||
1108 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | ||
1109 | |||
1110 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
1111 | dev_descr->dev_id, *prev); | ||
1112 | |||
1113 | if (!pdev) { | ||
1114 | if (*prev) { | ||
1115 | *prev = pdev; | ||
1116 | return 0; | ||
1117 | } | ||
1118 | |||
1119 | if (dev_descr->optional) | ||
1120 | return 0; | ||
1121 | |||
1122 | if (devno == 0) | ||
1123 | return -ENODEV; | ||
1124 | |||
1125 | sbridge_printk(KERN_INFO, | ||
1126 | "Device not found: dev %02x.%d PCI ID %04x:%04x\n", | ||
1127 | dev_descr->dev, dev_descr->func, | ||
1128 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | ||
1129 | |||
1130 | /* End of list, leave */ | ||
1131 | return -ENODEV; | ||
1132 | } | ||
1133 | bus = pdev->bus->number; | ||
1134 | |||
1135 | sbridge_dev = get_sbridge_dev(bus); | ||
1136 | if (!sbridge_dev) { | ||
1137 | sbridge_dev = alloc_sbridge_dev(bus, table); | ||
1138 | if (!sbridge_dev) { | ||
1139 | pci_dev_put(pdev); | ||
1140 | return -ENOMEM; | ||
1141 | } | ||
1142 | (*num_mc)++; | ||
1143 | } | ||
1144 | |||
1145 | if (sbridge_dev->pdev[devno]) { | ||
1146 | sbridge_printk(KERN_ERR, | ||
1147 | "Duplicated device for " | ||
1148 | "dev %02x:%d.%d PCI ID %04x:%04x\n", | ||
1149 | bus, dev_descr->dev, dev_descr->func, | ||
1150 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | ||
1151 | pci_dev_put(pdev); | ||
1152 | return -ENODEV; | ||
1153 | } | ||
1154 | |||
1155 | sbridge_dev->pdev[devno] = pdev; | ||
1156 | |||
1157 | /* Sanity check */ | ||
1158 | if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev || | ||
1159 | PCI_FUNC(pdev->devfn) != dev_descr->func)) { | ||
1160 | sbridge_printk(KERN_ERR, | ||
1161 | "Device PCI ID %04x:%04x " | ||
1162 | "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n", | ||
1163 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id, | ||
1164 | bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), | ||
1165 | bus, dev_descr->dev, dev_descr->func); | ||
1166 | return -ENODEV; | ||
1167 | } | ||
1168 | |||
1169 | /* Be sure that the device is enabled */ | ||
1170 | if (unlikely(pci_enable_device(pdev) < 0)) { | ||
1171 | sbridge_printk(KERN_ERR, | ||
1172 | "Couldn't enable " | ||
1173 | "dev %02x:%d.%d PCI ID %04x:%04x\n", | ||
1174 | bus, dev_descr->dev, dev_descr->func, | ||
1175 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | ||
1176 | return -ENODEV; | ||
1177 | } | ||
1178 | |||
1179 | edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n", | ||
1180 | bus, dev_descr->dev, dev_descr->func, | ||
1181 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | ||
1182 | |||
1183 | /* | ||
1184 | * As stated on drivers/pci/search.c, the reference count for | ||
1185 | * @from is always decremented if it is not %NULL. So, as we need | ||
1186 | * to get all devices up to null, we need to do a get for the device | ||
1187 | */ | ||
1188 | pci_dev_get(pdev); | ||
1189 | |||
1190 | *prev = pdev; | ||
1191 | |||
1192 | return 0; | ||
1193 | } | ||
1194 | |||
1195 | static int sbridge_get_all_devices(u8 *num_mc) | ||
1196 | { | ||
1197 | int i, rc; | ||
1198 | struct pci_dev *pdev = NULL; | ||
1199 | const struct pci_id_table *table = pci_dev_descr_sbridge_table; | ||
1200 | |||
1201 | while (table && table->descr) { | ||
1202 | for (i = 0; i < table->n_devs; i++) { | ||
1203 | pdev = NULL; | ||
1204 | do { | ||
1205 | rc = sbridge_get_onedevice(&pdev, num_mc, | ||
1206 | table, i); | ||
1207 | if (rc < 0) { | ||
1208 | if (i == 0) { | ||
1209 | i = table->n_devs; | ||
1210 | break; | ||
1211 | } | ||
1212 | sbridge_put_all_devices(); | ||
1213 | return -ENODEV; | ||
1214 | } | ||
1215 | } while (pdev); | ||
1216 | } | ||
1217 | table++; | ||
1218 | } | ||
1219 | |||
1220 | return 0; | ||
1221 | } | ||
1222 | |||
1223 | static int mci_bind_devs(struct mem_ctl_info *mci, | ||
1224 | struct sbridge_dev *sbridge_dev) | ||
1225 | { | ||
1226 | struct sbridge_pvt *pvt = mci->pvt_info; | ||
1227 | struct pci_dev *pdev; | ||
1228 | int i, func, slot; | ||
1229 | |||
1230 | for (i = 0; i < sbridge_dev->n_devs; i++) { | ||
1231 | pdev = sbridge_dev->pdev[i]; | ||
1232 | if (!pdev) | ||
1233 | continue; | ||
1234 | slot = PCI_SLOT(pdev->devfn); | ||
1235 | func = PCI_FUNC(pdev->devfn); | ||
1236 | switch (slot) { | ||
1237 | case 12: | ||
1238 | switch (func) { | ||
1239 | case 6: | ||
1240 | pvt->pci_sad0 = pdev; | ||
1241 | break; | ||
1242 | case 7: | ||
1243 | pvt->pci_sad1 = pdev; | ||
1244 | break; | ||
1245 | default: | ||
1246 | goto error; | ||
1247 | } | ||
1248 | break; | ||
1249 | case 13: | ||
1250 | switch (func) { | ||
1251 | case 6: | ||
1252 | pvt->pci_br = pdev; | ||
1253 | break; | ||
1254 | default: | ||
1255 | goto error; | ||
1256 | } | ||
1257 | break; | ||
1258 | case 14: | ||
1259 | switch (func) { | ||
1260 | case 0: | ||
1261 | pvt->pci_ha0 = pdev; | ||
1262 | break; | ||
1263 | default: | ||
1264 | goto error; | ||
1265 | } | ||
1266 | break; | ||
1267 | case 15: | ||
1268 | switch (func) { | ||
1269 | case 0: | ||
1270 | pvt->pci_ta = pdev; | ||
1271 | break; | ||
1272 | case 1: | ||
1273 | pvt->pci_ras = pdev; | ||
1274 | break; | ||
1275 | case 2: | ||
1276 | case 3: | ||
1277 | case 4: | ||
1278 | case 5: | ||
1279 | pvt->pci_tad[func - 2] = pdev; | ||
1280 | break; | ||
1281 | default: | ||
1282 | goto error; | ||
1283 | } | ||
1284 | break; | ||
1285 | case 17: | ||
1286 | switch (func) { | ||
1287 | case 0: | ||
1288 | pvt->pci_ddrio = pdev; | ||
1289 | break; | ||
1290 | default: | ||
1291 | goto error; | ||
1292 | } | ||
1293 | break; | ||
1294 | default: | ||
1295 | goto error; | ||
1296 | } | ||
1297 | |||
1298 | edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", | ||
1299 | sbridge_dev->bus, | ||
1300 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), | ||
1301 | pdev); | ||
1302 | } | ||
1303 | |||
1304 | /* Check if everything were registered */ | ||
1305 | if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 || | ||
1306 | !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta || | ||
1307 | !pvt->pci_ddrio) | ||
1308 | goto enodev; | ||
1309 | |||
1310 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
1311 | if (!pvt->pci_tad[i]) | ||
1312 | goto enodev; | ||
1313 | } | ||
1314 | return 0; | ||
1315 | |||
1316 | enodev: | ||
1317 | sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); | ||
1318 | return -ENODEV; | ||
1319 | |||
1320 | error: | ||
1321 | sbridge_printk(KERN_ERR, "Device %d, function %d " | ||
1322 | "is out of the expected range\n", | ||
1323 | slot, func); | ||
1324 | return -EINVAL; | ||
1325 | } | ||
1326 | |||
1327 | /**************************************************************************** | ||
1328 | Error check routines | ||
1329 | ****************************************************************************/ | ||
1330 | |||
1331 | /* | ||
1332 | * While Sandy Bridge has error count registers, SMI BIOS read values from | ||
1333 | * and resets the counters. So, they are not reliable for the OS to read | ||
1334 | * from them. So, we have no option but to just trust on whatever MCE is | ||
1335 | * telling us about the errors. | ||
1336 | */ | ||
1337 | static void sbridge_mce_output_error(struct mem_ctl_info *mci, | ||
1338 | const struct mce *m) | ||
1339 | { | ||
1340 | struct mem_ctl_info *new_mci; | ||
1341 | struct sbridge_pvt *pvt = mci->pvt_info; | ||
1342 | enum hw_event_mc_err_type tp_event; | ||
1343 | char *type, *optype, msg[256]; | ||
1344 | bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); | ||
1345 | bool overflow = GET_BITFIELD(m->status, 62, 62); | ||
1346 | bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); | ||
1347 | bool recoverable = GET_BITFIELD(m->status, 56, 56); | ||
1348 | u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); | ||
1349 | u32 mscod = GET_BITFIELD(m->status, 16, 31); | ||
1350 | u32 errcode = GET_BITFIELD(m->status, 0, 15); | ||
1351 | u32 channel = GET_BITFIELD(m->status, 0, 3); | ||
1352 | u32 optypenum = GET_BITFIELD(m->status, 4, 6); | ||
1353 | long channel_mask, first_channel; | ||
1354 | u8 rank, socket; | ||
1355 | int rc, dimm; | ||
1356 | char *area_type = NULL; | ||
1357 | |||
1358 | if (uncorrected_error) { | ||
1359 | if (ripv) { | ||
1360 | type = "FATAL"; | ||
1361 | tp_event = HW_EVENT_ERR_FATAL; | ||
1362 | } else { | ||
1363 | type = "NON_FATAL"; | ||
1364 | tp_event = HW_EVENT_ERR_UNCORRECTED; | ||
1365 | } | ||
1366 | } else { | ||
1367 | type = "CORRECTED"; | ||
1368 | tp_event = HW_EVENT_ERR_CORRECTED; | ||
1369 | } | ||
1370 | |||
1371 | /* | ||
1372 | * According with Table 15-9 of the Intel Architecture spec vol 3A, | ||
1373 | * memory errors should fit in this mask: | ||
1374 | * 000f 0000 1mmm cccc (binary) | ||
1375 | * where: | ||
1376 | * f = Correction Report Filtering Bit. If 1, subsequent errors | ||
1377 | * won't be shown | ||
1378 | * mmm = error type | ||
1379 | * cccc = channel | ||
1380 | * If the mask doesn't match, report an error to the parsing logic | ||
1381 | */ | ||
1382 | if (! ((errcode & 0xef80) == 0x80)) { | ||
1383 | optype = "Can't parse: it is not a mem"; | ||
1384 | } else { | ||
1385 | switch (optypenum) { | ||
1386 | case 0: | ||
1387 | optype = "generic undef request error"; | ||
1388 | break; | ||
1389 | case 1: | ||
1390 | optype = "memory read error"; | ||
1391 | break; | ||
1392 | case 2: | ||
1393 | optype = "memory write error"; | ||
1394 | break; | ||
1395 | case 3: | ||
1396 | optype = "addr/cmd error"; | ||
1397 | break; | ||
1398 | case 4: | ||
1399 | optype = "memory scrubbing error"; | ||
1400 | break; | ||
1401 | default: | ||
1402 | optype = "reserved"; | ||
1403 | break; | ||
1404 | } | ||
1405 | } | ||
1406 | |||
1407 | rc = get_memory_error_data(mci, m->addr, &socket, | ||
1408 | &channel_mask, &rank, &area_type, msg); | ||
1409 | if (rc < 0) | ||
1410 | goto err_parsing; | ||
1411 | new_mci = get_mci_for_node_id(socket); | ||
1412 | if (!new_mci) { | ||
1413 | strcpy(msg, "Error: socket got corrupted!"); | ||
1414 | goto err_parsing; | ||
1415 | } | ||
1416 | mci = new_mci; | ||
1417 | pvt = mci->pvt_info; | ||
1418 | |||
1419 | first_channel = find_first_bit(&channel_mask, NUM_CHANNELS); | ||
1420 | |||
1421 | if (rank < 4) | ||
1422 | dimm = 0; | ||
1423 | else if (rank < 8) | ||
1424 | dimm = 1; | ||
1425 | else | ||
1426 | dimm = 2; | ||
1427 | |||
1428 | |||
1429 | /* | ||
1430 | * FIXME: On some memory configurations (mirror, lockstep), the | ||
1431 | * Memory Controller can't point the error to a single DIMM. The | ||
1432 | * EDAC core should be handling the channel mask, in order to point | ||
1433 | * to the group of dimm's where the error may be happening. | ||
1434 | */ | ||
1435 | snprintf(msg, sizeof(msg), | ||
1436 | "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", | ||
1437 | overflow ? " OVERFLOW" : "", | ||
1438 | (uncorrected_error && recoverable) ? " recoverable" : "", | ||
1439 | area_type, | ||
1440 | mscod, errcode, | ||
1441 | socket, | ||
1442 | channel_mask, | ||
1443 | rank); | ||
1444 | |||
1445 | edac_dbg(0, "%s\n", msg); | ||
1446 | |||
1447 | /* FIXME: need support for channel mask */ | ||
1448 | |||
1449 | /* Call the helper to output message */ | ||
1450 | edac_mc_handle_error(tp_event, mci, core_err_cnt, | ||
1451 | m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, | ||
1452 | channel, dimm, -1, | ||
1453 | optype, msg); | ||
1454 | return; | ||
1455 | err_parsing: | ||
1456 | edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, | ||
1457 | -1, -1, -1, | ||
1458 | msg, ""); | ||
1459 | |||
1460 | } | ||
1461 | |||
1462 | /* | ||
1463 | * sbridge_check_error Retrieve and process errors reported by the | ||
1464 | * hardware. Called by the Core module. | ||
1465 | */ | ||
1466 | static void sbridge_check_error(struct mem_ctl_info *mci) | ||
1467 | { | ||
1468 | struct sbridge_pvt *pvt = mci->pvt_info; | ||
1469 | int i; | ||
1470 | unsigned count = 0; | ||
1471 | struct mce *m; | ||
1472 | |||
1473 | /* | ||
1474 | * MCE first step: Copy all mce errors into a temporary buffer | ||
1475 | * We use a double buffering here, to reduce the risk of | ||
1476 | * loosing an error. | ||
1477 | */ | ||
1478 | smp_rmb(); | ||
1479 | count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) | ||
1480 | % MCE_LOG_LEN; | ||
1481 | if (!count) | ||
1482 | return; | ||
1483 | |||
1484 | m = pvt->mce_outentry; | ||
1485 | if (pvt->mce_in + count > MCE_LOG_LEN) { | ||
1486 | unsigned l = MCE_LOG_LEN - pvt->mce_in; | ||
1487 | |||
1488 | memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); | ||
1489 | smp_wmb(); | ||
1490 | pvt->mce_in = 0; | ||
1491 | count -= l; | ||
1492 | m += l; | ||
1493 | } | ||
1494 | memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); | ||
1495 | smp_wmb(); | ||
1496 | pvt->mce_in += count; | ||
1497 | |||
1498 | smp_rmb(); | ||
1499 | if (pvt->mce_overrun) { | ||
1500 | sbridge_printk(KERN_ERR, "Lost %d memory errors\n", | ||
1501 | pvt->mce_overrun); | ||
1502 | smp_wmb(); | ||
1503 | pvt->mce_overrun = 0; | ||
1504 | } | ||
1505 | |||
1506 | /* | ||
1507 | * MCE second step: parse errors and display | ||
1508 | */ | ||
1509 | for (i = 0; i < count; i++) | ||
1510 | sbridge_mce_output_error(mci, &pvt->mce_outentry[i]); | ||
1511 | } | ||
1512 | |||
1513 | /* | ||
1514 | * sbridge_mce_check_error Replicates mcelog routine to get errors | ||
1515 | * This routine simply queues mcelog errors, and | ||
1516 | * return. The error itself should be handled later | ||
1517 | * by sbridge_check_error. | ||
1518 | * WARNING: As this routine should be called at NMI time, extra care should | ||
1519 | * be taken to avoid deadlocks, and to be as fast as possible. | ||
1520 | */ | ||
1521 | static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, | ||
1522 | void *data) | ||
1523 | { | ||
1524 | struct mce *mce = (struct mce *)data; | ||
1525 | struct mem_ctl_info *mci; | ||
1526 | struct sbridge_pvt *pvt; | ||
1527 | |||
1528 | mci = get_mci_for_node_id(mce->socketid); | ||
1529 | if (!mci) | ||
1530 | return NOTIFY_BAD; | ||
1531 | pvt = mci->pvt_info; | ||
1532 | |||
1533 | /* | ||
1534 | * Just let mcelog handle it if the error is | ||
1535 | * outside the memory controller. A memory error | ||
1536 | * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. | ||
1537 | * bit 12 has an special meaning. | ||
1538 | */ | ||
1539 | if ((mce->status & 0xefff) >> 7 != 1) | ||
1540 | return NOTIFY_DONE; | ||
1541 | |||
1542 | printk("sbridge: HANDLING MCE MEMORY ERROR\n"); | ||
1543 | |||
1544 | printk("CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", | ||
1545 | mce->extcpu, mce->mcgstatus, mce->bank, mce->status); | ||
1546 | printk("TSC %llx ", mce->tsc); | ||
1547 | printk("ADDR %llx ", mce->addr); | ||
1548 | printk("MISC %llx ", mce->misc); | ||
1549 | |||
1550 | printk("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | ||
1551 | mce->cpuvendor, mce->cpuid, mce->time, | ||
1552 | mce->socketid, mce->apicid); | ||
1553 | |||
1554 | /* Only handle if it is the right mc controller */ | ||
1555 | if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc) | ||
1556 | return NOTIFY_DONE; | ||
1557 | |||
1558 | smp_rmb(); | ||
1559 | if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { | ||
1560 | smp_wmb(); | ||
1561 | pvt->mce_overrun++; | ||
1562 | return NOTIFY_DONE; | ||
1563 | } | ||
1564 | |||
1565 | /* Copy memory error at the ringbuffer */ | ||
1566 | memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); | ||
1567 | smp_wmb(); | ||
1568 | pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; | ||
1569 | |||
1570 | /* Handle fatal errors immediately */ | ||
1571 | if (mce->mcgstatus & 1) | ||
1572 | sbridge_check_error(mci); | ||
1573 | |||
1574 | /* Advice mcelog that the error were handled */ | ||
1575 | return NOTIFY_STOP; | ||
1576 | } | ||
1577 | |||
1578 | static struct notifier_block sbridge_mce_dec = { | ||
1579 | .notifier_call = sbridge_mce_check_error, | ||
1580 | }; | ||
1581 | |||
1582 | /**************************************************************************** | ||
1583 | EDAC register/unregister logic | ||
1584 | ****************************************************************************/ | ||
1585 | |||
1586 | static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) | ||
1587 | { | ||
1588 | struct mem_ctl_info *mci = sbridge_dev->mci; | ||
1589 | struct sbridge_pvt *pvt; | ||
1590 | |||
1591 | if (unlikely(!mci || !mci->pvt_info)) { | ||
1592 | edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); | ||
1593 | |||
1594 | sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); | ||
1595 | return; | ||
1596 | } | ||
1597 | |||
1598 | pvt = mci->pvt_info; | ||
1599 | |||
1600 | edac_dbg(0, "MC: mci = %p, dev = %p\n", | ||
1601 | mci, &sbridge_dev->pdev[0]->dev); | ||
1602 | |||
1603 | /* Remove MC sysfs nodes */ | ||
1604 | edac_mc_del_mc(mci->pdev); | ||
1605 | |||
1606 | edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); | ||
1607 | kfree(mci->ctl_name); | ||
1608 | edac_mc_free(mci); | ||
1609 | sbridge_dev->mci = NULL; | ||
1610 | } | ||
1611 | |||
1612 | static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) | ||
1613 | { | ||
1614 | struct mem_ctl_info *mci; | ||
1615 | struct edac_mc_layer layers[2]; | ||
1616 | struct sbridge_pvt *pvt; | ||
1617 | int rc; | ||
1618 | |||
1619 | /* Check the number of active and not disabled channels */ | ||
1620 | rc = check_if_ecc_is_active(sbridge_dev->bus); | ||
1621 | if (unlikely(rc < 0)) | ||
1622 | return rc; | ||
1623 | |||
1624 | /* allocate a new MC control structure */ | ||
1625 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | ||
1626 | layers[0].size = NUM_CHANNELS; | ||
1627 | layers[0].is_virt_csrow = false; | ||
1628 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
1629 | layers[1].size = MAX_DIMMS; | ||
1630 | layers[1].is_virt_csrow = true; | ||
1631 | mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers, | ||
1632 | sizeof(*pvt)); | ||
1633 | |||
1634 | if (unlikely(!mci)) | ||
1635 | return -ENOMEM; | ||
1636 | |||
1637 | edac_dbg(0, "MC: mci = %p, dev = %p\n", | ||
1638 | mci, &sbridge_dev->pdev[0]->dev); | ||
1639 | |||
1640 | pvt = mci->pvt_info; | ||
1641 | memset(pvt, 0, sizeof(*pvt)); | ||
1642 | |||
1643 | /* Associate sbridge_dev and mci for future usage */ | ||
1644 | pvt->sbridge_dev = sbridge_dev; | ||
1645 | sbridge_dev->mci = mci; | ||
1646 | |||
1647 | mci->mtype_cap = MEM_FLAG_DDR3; | ||
1648 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | ||
1649 | mci->edac_cap = EDAC_FLAG_NONE; | ||
1650 | mci->mod_name = "sbridge_edac.c"; | ||
1651 | mci->mod_ver = SBRIDGE_REVISION; | ||
1652 | mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); | ||
1653 | mci->dev_name = pci_name(sbridge_dev->pdev[0]); | ||
1654 | mci->ctl_page_to_phys = NULL; | ||
1655 | |||
1656 | /* Set the function pointer to an actual operation function */ | ||
1657 | mci->edac_check = sbridge_check_error; | ||
1658 | |||
1659 | /* Store pci devices at mci for faster access */ | ||
1660 | rc = mci_bind_devs(mci, sbridge_dev); | ||
1661 | if (unlikely(rc < 0)) | ||
1662 | goto fail0; | ||
1663 | |||
1664 | /* Get dimm basic config and the memory layout */ | ||
1665 | get_dimm_config(mci); | ||
1666 | get_memory_layout(mci); | ||
1667 | |||
1668 | /* record ptr to the generic device */ | ||
1669 | mci->pdev = &sbridge_dev->pdev[0]->dev; | ||
1670 | |||
1671 | /* add this new MC control structure to EDAC's list of MCs */ | ||
1672 | if (unlikely(edac_mc_add_mc(mci))) { | ||
1673 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | ||
1674 | rc = -EINVAL; | ||
1675 | goto fail0; | ||
1676 | } | ||
1677 | |||
1678 | return 0; | ||
1679 | |||
1680 | fail0: | ||
1681 | kfree(mci->ctl_name); | ||
1682 | edac_mc_free(mci); | ||
1683 | sbridge_dev->mci = NULL; | ||
1684 | return rc; | ||
1685 | } | ||
1686 | |||
1687 | /* | ||
1688 | * sbridge_probe Probe for ONE instance of device to see if it is | ||
1689 | * present. | ||
1690 | * return: | ||
1691 | * 0 for FOUND a device | ||
1692 | * < 0 for error code | ||
1693 | */ | ||
1694 | |||
1695 | static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
1696 | { | ||
1697 | int rc; | ||
1698 | u8 mc, num_mc = 0; | ||
1699 | struct sbridge_dev *sbridge_dev; | ||
1700 | |||
1701 | /* get the pci devices we want to reserve for our use */ | ||
1702 | mutex_lock(&sbridge_edac_lock); | ||
1703 | |||
1704 | /* | ||
1705 | * All memory controllers are allocated at the first pass. | ||
1706 | */ | ||
1707 | if (unlikely(probed >= 1)) { | ||
1708 | mutex_unlock(&sbridge_edac_lock); | ||
1709 | return -ENODEV; | ||
1710 | } | ||
1711 | probed++; | ||
1712 | |||
1713 | rc = sbridge_get_all_devices(&num_mc); | ||
1714 | if (unlikely(rc < 0)) | ||
1715 | goto fail0; | ||
1716 | mc = 0; | ||
1717 | |||
1718 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { | ||
1719 | edac_dbg(0, "Registering MC#%d (%d of %d)\n", | ||
1720 | mc, mc + 1, num_mc); | ||
1721 | sbridge_dev->mc = mc++; | ||
1722 | rc = sbridge_register_mci(sbridge_dev); | ||
1723 | if (unlikely(rc < 0)) | ||
1724 | goto fail1; | ||
1725 | } | ||
1726 | |||
1727 | sbridge_printk(KERN_INFO, "Driver loaded.\n"); | ||
1728 | |||
1729 | mutex_unlock(&sbridge_edac_lock); | ||
1730 | return 0; | ||
1731 | |||
1732 | fail1: | ||
1733 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) | ||
1734 | sbridge_unregister_mci(sbridge_dev); | ||
1735 | |||
1736 | sbridge_put_all_devices(); | ||
1737 | fail0: | ||
1738 | mutex_unlock(&sbridge_edac_lock); | ||
1739 | return rc; | ||
1740 | } | ||
1741 | |||
1742 | /* | ||
1743 | * sbridge_remove destructor for one instance of device | ||
1744 | * | ||
1745 | */ | ||
1746 | static void sbridge_remove(struct pci_dev *pdev) | ||
1747 | { | ||
1748 | struct sbridge_dev *sbridge_dev; | ||
1749 | |||
1750 | edac_dbg(0, "\n"); | ||
1751 | |||
1752 | /* | ||
1753 | * we have a trouble here: pdev value for removal will be wrong, since | ||
1754 | * it will point to the X58 register used to detect that the machine | ||
1755 | * is a Nehalem or upper design. However, due to the way several PCI | ||
1756 | * devices are grouped together to provide MC functionality, we need | ||
1757 | * to use a different method for releasing the devices | ||
1758 | */ | ||
1759 | |||
1760 | mutex_lock(&sbridge_edac_lock); | ||
1761 | |||
1762 | if (unlikely(!probed)) { | ||
1763 | mutex_unlock(&sbridge_edac_lock); | ||
1764 | return; | ||
1765 | } | ||
1766 | |||
1767 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) | ||
1768 | sbridge_unregister_mci(sbridge_dev); | ||
1769 | |||
1770 | /* Release PCI resources */ | ||
1771 | sbridge_put_all_devices(); | ||
1772 | |||
1773 | probed--; | ||
1774 | |||
1775 | mutex_unlock(&sbridge_edac_lock); | ||
1776 | } | ||
1777 | |||
1778 | MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl); | ||
1779 | |||
1780 | /* | ||
1781 | * sbridge_driver pci_driver structure for this module | ||
1782 | * | ||
1783 | */ | ||
1784 | static struct pci_driver sbridge_driver = { | ||
1785 | .name = "sbridge_edac", | ||
1786 | .probe = sbridge_probe, | ||
1787 | .remove = sbridge_remove, | ||
1788 | .id_table = sbridge_pci_tbl, | ||
1789 | }; | ||
1790 | |||
1791 | /* | ||
1792 | * sbridge_init Module entry function | ||
1793 | * Try to initialize this module for its devices | ||
1794 | */ | ||
1795 | static int __init sbridge_init(void) | ||
1796 | { | ||
1797 | int pci_rc; | ||
1798 | |||
1799 | edac_dbg(2, "\n"); | ||
1800 | |||
1801 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1802 | opstate_init(); | ||
1803 | |||
1804 | pci_rc = pci_register_driver(&sbridge_driver); | ||
1805 | |||
1806 | if (pci_rc >= 0) { | ||
1807 | mce_register_decode_chain(&sbridge_mce_dec); | ||
1808 | return 0; | ||
1809 | } | ||
1810 | |||
1811 | sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", | ||
1812 | pci_rc); | ||
1813 | |||
1814 | return pci_rc; | ||
1815 | } | ||
1816 | |||
1817 | /* | ||
1818 | * sbridge_exit() Module exit function | ||
1819 | * Unregister the driver | ||
1820 | */ | ||
1821 | static void __exit sbridge_exit(void) | ||
1822 | { | ||
1823 | edac_dbg(2, "\n"); | ||
1824 | pci_unregister_driver(&sbridge_driver); | ||
1825 | mce_unregister_decode_chain(&sbridge_mce_dec); | ||
1826 | } | ||
1827 | |||
1828 | module_init(sbridge_init); | ||
1829 | module_exit(sbridge_exit); | ||
1830 | |||
1831 | module_param(edac_op_state, int, 0444); | ||
1832 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
1833 | |||
1834 | MODULE_LICENSE("GPL"); | ||
1835 | MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); | ||
1836 | MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); | ||
1837 | MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - " | ||
1838 | SBRIDGE_REVISION); | ||
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c index a0820536b7d..1d5cf06f6c6 100644 --- a/drivers/edac/tile_edac.c +++ b/drivers/edac/tile_edac.c | |||
@@ -69,12 +69,9 @@ static void tile_edac_check(struct mem_ctl_info *mci) | |||
69 | 69 | ||
70 | /* Check if the current error count is different from the saved one. */ | 70 | /* Check if the current error count is different from the saved one. */ |
71 | if (mem_error.sbe_count != priv->ce_count) { | 71 | if (mem_error.sbe_count != priv->ce_count) { |
72 | dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node); | 72 | dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); |
73 | priv->ce_count = mem_error.sbe_count; | 73 | priv->ce_count = mem_error.sbe_count; |
74 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 74 | edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name); |
75 | 0, 0, 0, | ||
76 | 0, 0, -1, | ||
77 | mci->ctl_name, ""); | ||
78 | } | 75 | } |
79 | } | 76 | } |
80 | 77 | ||
@@ -82,12 +79,11 @@ static void tile_edac_check(struct mem_ctl_info *mci) | |||
82 | * Initialize the 'csrows' table within the mci control structure with the | 79 | * Initialize the 'csrows' table within the mci control structure with the |
83 | * addressing of memory. | 80 | * addressing of memory. |
84 | */ | 81 | */ |
85 | static int tile_edac_init_csrows(struct mem_ctl_info *mci) | 82 | static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) |
86 | { | 83 | { |
87 | struct csrow_info *csrow = mci->csrows[0]; | 84 | struct csrow_info *csrow = &mci->csrows[0]; |
88 | struct tile_edac_priv *priv = mci->pvt_info; | 85 | struct tile_edac_priv *priv = mci->pvt_info; |
89 | struct mshim_mem_info mem_info; | 86 | struct mshim_mem_info mem_info; |
90 | struct dimm_info *dimm = csrow->channels[0]->dimm; | ||
91 | 87 | ||
92 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, | 88 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, |
93 | sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != | 89 | sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != |
@@ -97,35 +93,36 @@ static int tile_edac_init_csrows(struct mem_ctl_info *mci) | |||
97 | } | 93 | } |
98 | 94 | ||
99 | if (mem_info.mem_ecc) | 95 | if (mem_info.mem_ecc) |
100 | dimm->edac_mode = EDAC_SECDED; | 96 | csrow->edac_mode = EDAC_SECDED; |
101 | else | 97 | else |
102 | dimm->edac_mode = EDAC_NONE; | 98 | csrow->edac_mode = EDAC_NONE; |
103 | switch (mem_info.mem_type) { | 99 | switch (mem_info.mem_type) { |
104 | case DDR2: | 100 | case DDR2: |
105 | dimm->mtype = MEM_DDR2; | 101 | csrow->mtype = MEM_DDR2; |
106 | break; | 102 | break; |
107 | 103 | ||
108 | case DDR3: | 104 | case DDR3: |
109 | dimm->mtype = MEM_DDR3; | 105 | csrow->mtype = MEM_DDR3; |
110 | break; | 106 | break; |
111 | 107 | ||
112 | default: | 108 | default: |
113 | return -1; | 109 | return -1; |
114 | } | 110 | } |
115 | 111 | ||
116 | dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT; | 112 | csrow->first_page = 0; |
117 | dimm->grain = TILE_EDAC_ERROR_GRAIN; | 113 | csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT; |
118 | dimm->dtype = DEV_UNKNOWN; | 114 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
115 | csrow->grain = TILE_EDAC_ERROR_GRAIN; | ||
116 | csrow->dtype = DEV_UNKNOWN; | ||
119 | 117 | ||
120 | return 0; | 118 | return 0; |
121 | } | 119 | } |
122 | 120 | ||
123 | static int tile_edac_mc_probe(struct platform_device *pdev) | 121 | static int __devinit tile_edac_mc_probe(struct platform_device *pdev) |
124 | { | 122 | { |
125 | char hv_file[32]; | 123 | char hv_file[32]; |
126 | int hv_devhdl; | 124 | int hv_devhdl; |
127 | struct mem_ctl_info *mci; | 125 | struct mem_ctl_info *mci; |
128 | struct edac_mc_layer layers[2]; | ||
129 | struct tile_edac_priv *priv; | 126 | struct tile_edac_priv *priv; |
130 | int rc; | 127 | int rc; |
131 | 128 | ||
@@ -135,30 +132,20 @@ static int tile_edac_mc_probe(struct platform_device *pdev) | |||
135 | return -EINVAL; | 132 | return -EINVAL; |
136 | 133 | ||
137 | /* A TILE MC has a single channel and one chip-select row. */ | 134 | /* A TILE MC has a single channel and one chip-select row. */ |
138 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 135 | mci = edac_mc_alloc(sizeof(struct tile_edac_priv), |
139 | layers[0].size = TILE_EDAC_NR_CSROWS; | 136 | TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id); |
140 | layers[0].is_virt_csrow = true; | ||
141 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
142 | layers[1].size = TILE_EDAC_NR_CHANS; | ||
143 | layers[1].is_virt_csrow = false; | ||
144 | mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, | ||
145 | sizeof(struct tile_edac_priv)); | ||
146 | if (mci == NULL) | 137 | if (mci == NULL) |
147 | return -ENOMEM; | 138 | return -ENOMEM; |
148 | priv = mci->pvt_info; | 139 | priv = mci->pvt_info; |
149 | priv->node = pdev->id; | 140 | priv->node = pdev->id; |
150 | priv->hv_devhdl = hv_devhdl; | 141 | priv->hv_devhdl = hv_devhdl; |
151 | 142 | ||
152 | mci->pdev = &pdev->dev; | 143 | mci->dev = &pdev->dev; |
153 | mci->mtype_cap = MEM_FLAG_DDR2; | 144 | mci->mtype_cap = MEM_FLAG_DDR2; |
154 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | 145 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; |
155 | 146 | ||
156 | mci->mod_name = DRV_NAME; | 147 | mci->mod_name = DRV_NAME; |
157 | #ifdef __tilegx__ | ||
158 | mci->ctl_name = "TILEGx_Memory_Controller"; | ||
159 | #else | ||
160 | mci->ctl_name = "TILEPro_Memory_Controller"; | 148 | mci->ctl_name = "TILEPro_Memory_Controller"; |
161 | #endif | ||
162 | mci->dev_name = dev_name(&pdev->dev); | 149 | mci->dev_name = dev_name(&pdev->dev); |
163 | mci->edac_check = tile_edac_check; | 150 | mci->edac_check = tile_edac_check; |
164 | 151 | ||
@@ -186,7 +173,7 @@ static int tile_edac_mc_probe(struct platform_device *pdev) | |||
186 | return 0; | 173 | return 0; |
187 | } | 174 | } |
188 | 175 | ||
189 | static int tile_edac_mc_remove(struct platform_device *pdev) | 176 | static int __devexit tile_edac_mc_remove(struct platform_device *pdev) |
190 | { | 177 | { |
191 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); | 178 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); |
192 | 179 | ||
@@ -202,7 +189,7 @@ static struct platform_driver tile_edac_mc_driver = { | |||
202 | .owner = THIS_MODULE, | 189 | .owner = THIS_MODULE, |
203 | }, | 190 | }, |
204 | .probe = tile_edac_mc_probe, | 191 | .probe = tile_edac_mc_probe, |
205 | .remove = tile_edac_mc_remove, | 192 | .remove = __devexit_p(tile_edac_mc_remove), |
206 | }; | 193 | }; |
207 | 194 | ||
208 | /* | 195 | /* |
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index c9db24d95ca..b6f47de152f 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c | |||
@@ -103,10 +103,10 @@ static int how_many_channel(struct pci_dev *pdev) | |||
103 | 103 | ||
104 | pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); | 104 | pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); |
105 | if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ | 105 | if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ |
106 | edac_dbg(0, "In single channel mode\n"); | 106 | debugf0("In single channel mode.\n"); |
107 | x38_channel_num = 1; | 107 | x38_channel_num = 1; |
108 | } else { | 108 | } else { |
109 | edac_dbg(0, "In dual channel mode\n"); | 109 | debugf0("In dual channel mode.\n"); |
110 | x38_channel_num = 2; | 110 | x38_channel_num = 2; |
111 | } | 111 | } |
112 | 112 | ||
@@ -151,7 +151,7 @@ static void x38_clear_error_info(struct mem_ctl_info *mci) | |||
151 | { | 151 | { |
152 | struct pci_dev *pdev; | 152 | struct pci_dev *pdev; |
153 | 153 | ||
154 | pdev = to_pci_dev(mci->pdev); | 154 | pdev = to_pci_dev(mci->dev); |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * Clear any error bits. | 157 | * Clear any error bits. |
@@ -172,7 +172,7 @@ static void x38_get_and_clear_error_info(struct mem_ctl_info *mci, | |||
172 | struct pci_dev *pdev; | 172 | struct pci_dev *pdev; |
173 | void __iomem *window = mci->pvt_info; | 173 | void __iomem *window = mci->pvt_info; |
174 | 174 | ||
175 | pdev = to_pci_dev(mci->pdev); | 175 | pdev = to_pci_dev(mci->dev); |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * This is a mess because there is no atomic way to read all the | 178 | * This is a mess because there is no atomic way to read all the |
@@ -215,26 +215,19 @@ static void x38_process_error_info(struct mem_ctl_info *mci, | |||
215 | return; | 215 | return; |
216 | 216 | ||
217 | if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { | 217 | if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { |
218 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, | 218 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
219 | -1, -1, -1, | ||
220 | "UE overwrote CE", ""); | ||
221 | info->errsts = info->errsts2; | 219 | info->errsts = info->errsts2; |
222 | } | 220 | } |
223 | 221 | ||
224 | for (channel = 0; channel < x38_channel_num; channel++) { | 222 | for (channel = 0; channel < x38_channel_num; channel++) { |
225 | log = info->eccerrlog[channel]; | 223 | log = info->eccerrlog[channel]; |
226 | if (log & X38_ECCERRLOG_UE) { | 224 | if (log & X38_ECCERRLOG_UE) { |
227 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 225 | edac_mc_handle_ue(mci, 0, 0, |
228 | 0, 0, 0, | 226 | eccerrlog_row(channel, log), "x38 UE"); |
229 | eccerrlog_row(channel, log), | ||
230 | -1, -1, | ||
231 | "x38 UE", ""); | ||
232 | } else if (log & X38_ECCERRLOG_CE) { | 227 | } else if (log & X38_ECCERRLOG_CE) { |
233 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 228 | edac_mc_handle_ce(mci, 0, 0, |
234 | 0, 0, eccerrlog_syndrome(log), | 229 | eccerrlog_syndrome(log), |
235 | eccerrlog_row(channel, log), | 230 | eccerrlog_row(channel, log), 0, "x38 CE"); |
236 | -1, -1, | ||
237 | "x38 CE", ""); | ||
238 | } | 231 | } |
239 | } | 232 | } |
240 | } | 233 | } |
@@ -243,7 +236,7 @@ static void x38_check(struct mem_ctl_info *mci) | |||
243 | { | 236 | { |
244 | struct x38_error_info info; | 237 | struct x38_error_info info; |
245 | 238 | ||
246 | edac_dbg(1, "MC%d\n", mci->mc_idx); | 239 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
247 | x38_get_and_clear_error_info(mci, &info); | 240 | x38_get_and_clear_error_info(mci, &info); |
248 | x38_process_error_info(mci, &info); | 241 | x38_process_error_info(mci, &info); |
249 | } | 242 | } |
@@ -324,14 +317,14 @@ static unsigned long drb_to_nr_pages( | |||
324 | static int x38_probe1(struct pci_dev *pdev, int dev_idx) | 317 | static int x38_probe1(struct pci_dev *pdev, int dev_idx) |
325 | { | 318 | { |
326 | int rc; | 319 | int rc; |
327 | int i, j; | 320 | int i; |
328 | struct mem_ctl_info *mci = NULL; | 321 | struct mem_ctl_info *mci = NULL; |
329 | struct edac_mc_layer layers[2]; | 322 | unsigned long last_page; |
330 | u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]; | 323 | u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]; |
331 | bool stacked; | 324 | bool stacked; |
332 | void __iomem *window; | 325 | void __iomem *window; |
333 | 326 | ||
334 | edac_dbg(0, "MC:\n"); | 327 | debugf0("MC: %s()\n", __func__); |
335 | 328 | ||
336 | window = x38_map_mchbar(pdev); | 329 | window = x38_map_mchbar(pdev); |
337 | if (!window) | 330 | if (!window) |
@@ -342,19 +335,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) | |||
342 | how_many_channel(pdev); | 335 | how_many_channel(pdev); |
343 | 336 | ||
344 | /* FIXME: unconventional pvt_info usage */ | 337 | /* FIXME: unconventional pvt_info usage */ |
345 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | 338 | mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0); |
346 | layers[0].size = X38_RANKS; | ||
347 | layers[0].is_virt_csrow = true; | ||
348 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
349 | layers[1].size = x38_channel_num; | ||
350 | layers[1].is_virt_csrow = false; | ||
351 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); | ||
352 | if (!mci) | 339 | if (!mci) |
353 | return -ENOMEM; | 340 | return -ENOMEM; |
354 | 341 | ||
355 | edac_dbg(3, "MC: init mci\n"); | 342 | debugf3("MC: %s(): init mci\n", __func__); |
356 | 343 | ||
357 | mci->pdev = &pdev->dev; | 344 | mci->dev = &pdev->dev; |
358 | mci->mtype_cap = MEM_FLAG_DDR2; | 345 | mci->mtype_cap = MEM_FLAG_DDR2; |
359 | 346 | ||
360 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | 347 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; |
@@ -376,38 +363,41 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) | |||
376 | * cumulative; the last one will contain the total memory | 363 | * cumulative; the last one will contain the total memory |
377 | * contained in all ranks. | 364 | * contained in all ranks. |
378 | */ | 365 | */ |
366 | last_page = -1UL; | ||
379 | for (i = 0; i < mci->nr_csrows; i++) { | 367 | for (i = 0; i < mci->nr_csrows; i++) { |
380 | unsigned long nr_pages; | 368 | unsigned long nr_pages; |
381 | struct csrow_info *csrow = mci->csrows[i]; | 369 | struct csrow_info *csrow = &mci->csrows[i]; |
382 | 370 | ||
383 | nr_pages = drb_to_nr_pages(drbs, stacked, | 371 | nr_pages = drb_to_nr_pages(drbs, stacked, |
384 | i / X38_RANKS_PER_CHANNEL, | 372 | i / X38_RANKS_PER_CHANNEL, |
385 | i % X38_RANKS_PER_CHANNEL); | 373 | i % X38_RANKS_PER_CHANNEL); |
386 | 374 | ||
387 | if (nr_pages == 0) | 375 | if (nr_pages == 0) { |
376 | csrow->mtype = MEM_EMPTY; | ||
388 | continue; | 377 | continue; |
378 | } | ||
389 | 379 | ||
390 | for (j = 0; j < x38_channel_num; j++) { | 380 | csrow->first_page = last_page + 1; |
391 | struct dimm_info *dimm = csrow->channels[j]->dimm; | 381 | last_page += nr_pages; |
382 | csrow->last_page = last_page; | ||
383 | csrow->nr_pages = nr_pages; | ||
392 | 384 | ||
393 | dimm->nr_pages = nr_pages / x38_channel_num; | 385 | csrow->grain = nr_pages << PAGE_SHIFT; |
394 | dimm->grain = nr_pages << PAGE_SHIFT; | 386 | csrow->mtype = MEM_DDR2; |
395 | dimm->mtype = MEM_DDR2; | 387 | csrow->dtype = DEV_UNKNOWN; |
396 | dimm->dtype = DEV_UNKNOWN; | 388 | csrow->edac_mode = EDAC_UNKNOWN; |
397 | dimm->edac_mode = EDAC_UNKNOWN; | ||
398 | } | ||
399 | } | 389 | } |
400 | 390 | ||
401 | x38_clear_error_info(mci); | 391 | x38_clear_error_info(mci); |
402 | 392 | ||
403 | rc = -ENODEV; | 393 | rc = -ENODEV; |
404 | if (edac_mc_add_mc(mci)) { | 394 | if (edac_mc_add_mc(mci)) { |
405 | edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); | 395 | debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); |
406 | goto fail; | 396 | goto fail; |
407 | } | 397 | } |
408 | 398 | ||
409 | /* get this far and it's successful */ | 399 | /* get this far and it's successful */ |
410 | edac_dbg(3, "MC: success\n"); | 400 | debugf3("MC: %s(): success\n", __func__); |
411 | return 0; | 401 | return 0; |
412 | 402 | ||
413 | fail: | 403 | fail: |
@@ -418,11 +408,12 @@ fail: | |||
418 | return rc; | 408 | return rc; |
419 | } | 409 | } |
420 | 410 | ||
421 | static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 411 | static int __devinit x38_init_one(struct pci_dev *pdev, |
412 | const struct pci_device_id *ent) | ||
422 | { | 413 | { |
423 | int rc; | 414 | int rc; |
424 | 415 | ||
425 | edac_dbg(0, "MC:\n"); | 416 | debugf0("MC: %s()\n", __func__); |
426 | 417 | ||
427 | if (pci_enable_device(pdev) < 0) | 418 | if (pci_enable_device(pdev) < 0) |
428 | return -EIO; | 419 | return -EIO; |
@@ -434,11 +425,11 @@ static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
434 | return rc; | 425 | return rc; |
435 | } | 426 | } |
436 | 427 | ||
437 | static void x38_remove_one(struct pci_dev *pdev) | 428 | static void __devexit x38_remove_one(struct pci_dev *pdev) |
438 | { | 429 | { |
439 | struct mem_ctl_info *mci; | 430 | struct mem_ctl_info *mci; |
440 | 431 | ||
441 | edac_dbg(0, "\n"); | 432 | debugf0("%s()\n", __func__); |
442 | 433 | ||
443 | mci = edac_mc_del_mc(&pdev->dev); | 434 | mci = edac_mc_del_mc(&pdev->dev); |
444 | if (!mci) | 435 | if (!mci) |
@@ -449,7 +440,7 @@ static void x38_remove_one(struct pci_dev *pdev) | |||
449 | edac_mc_free(mci); | 440 | edac_mc_free(mci); |
450 | } | 441 | } |
451 | 442 | ||
452 | static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = { | 443 | static const struct pci_device_id x38_pci_tbl[] __devinitdata = { |
453 | { | 444 | { |
454 | PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 445 | PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
455 | X38}, | 446 | X38}, |
@@ -463,7 +454,7 @@ MODULE_DEVICE_TABLE(pci, x38_pci_tbl); | |||
463 | static struct pci_driver x38_driver = { | 454 | static struct pci_driver x38_driver = { |
464 | .name = EDAC_MOD_STR, | 455 | .name = EDAC_MOD_STR, |
465 | .probe = x38_init_one, | 456 | .probe = x38_init_one, |
466 | .remove = x38_remove_one, | 457 | .remove = __devexit_p(x38_remove_one), |
467 | .id_table = x38_pci_tbl, | 458 | .id_table = x38_pci_tbl, |
468 | }; | 459 | }; |
469 | 460 | ||
@@ -471,7 +462,7 @@ static int __init x38_init(void) | |||
471 | { | 462 | { |
472 | int pci_rc; | 463 | int pci_rc; |
473 | 464 | ||
474 | edac_dbg(3, "MC:\n"); | 465 | debugf3("MC: %s()\n", __func__); |
475 | 466 | ||
476 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 467 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
477 | opstate_init(); | 468 | opstate_init(); |
@@ -485,14 +476,14 @@ static int __init x38_init(void) | |||
485 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 476 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
486 | PCI_DEVICE_ID_INTEL_X38_HB, NULL); | 477 | PCI_DEVICE_ID_INTEL_X38_HB, NULL); |
487 | if (!mci_pdev) { | 478 | if (!mci_pdev) { |
488 | edac_dbg(0, "x38 pci_get_device fail\n"); | 479 | debugf0("x38 pci_get_device fail\n"); |
489 | pci_rc = -ENODEV; | 480 | pci_rc = -ENODEV; |
490 | goto fail1; | 481 | goto fail1; |
491 | } | 482 | } |
492 | 483 | ||
493 | pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); | 484 | pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); |
494 | if (pci_rc < 0) { | 485 | if (pci_rc < 0) { |
495 | edac_dbg(0, "x38 init fail\n"); | 486 | debugf0("x38 init fail\n"); |
496 | pci_rc = -ENODEV; | 487 | pci_rc = -ENODEV; |
497 | goto fail1; | 488 | goto fail1; |
498 | } | 489 | } |
@@ -512,7 +503,7 @@ fail0: | |||
512 | 503 | ||
513 | static void __exit x38_exit(void) | 504 | static void __exit x38_exit(void) |
514 | { | 505 | { |
515 | edac_dbg(3, "MC:\n"); | 506 | debugf3("MC: %s()\n", __func__); |
516 | 507 | ||
517 | pci_unregister_driver(&x38_driver); | 508 | pci_unregister_driver(&x38_driver); |
518 | if (!x38_registered) { | 509 | if (!x38_registered) { |