diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/edac | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/edac')
45 files changed, 4541 insertions, 2746 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 70bb350de996..af1a17d42bd7 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -7,7 +7,7 @@ | |||
7 | menuconfig EDAC | 7 | menuconfig EDAC |
8 | bool "EDAC (Error Detection And Correction) reporting" | 8 | bool "EDAC (Error Detection And Correction) reporting" |
9 | depends on HAS_IOMEM | 9 | depends on HAS_IOMEM |
10 | depends on X86 || PPC | 10 | depends on X86 || PPC || TILE |
11 | help | 11 | help |
12 | EDAC is designed to report errors in the core system. | 12 | EDAC is designed to report errors in the core system. |
13 | These are low-level errors that are reported in the CPU or | 13 | These are low-level errors that are reported in the CPU or |
@@ -39,18 +39,28 @@ config EDAC_DEBUG | |||
39 | there're four debug levels (x=0,1,2,3 from low to high). | 39 | there're four debug levels (x=0,1,2,3 from low to high). |
40 | Usually you should select 'N'. | 40 | Usually you should select 'N'. |
41 | 41 | ||
42 | config EDAC_DECODE_MCE | 42 | config EDAC_DECODE_MCE |
43 | tristate "Decode MCEs in human-readable form (only on AMD for now)" | 43 | tristate "Decode MCEs in human-readable form (only on AMD for now)" |
44 | depends on CPU_SUP_AMD && X86_MCE | 44 | depends on CPU_SUP_AMD && X86_MCE |
45 | default y | 45 | default y |
46 | ---help--- | 46 | ---help--- |
47 | Enable this option if you want to decode Machine Check Exceptions | 47 | Enable this option if you want to decode Machine Check Exceptions |
48 | occuring on your machine in human-readable form. | 48 | occurring on your machine in human-readable form. |
49 | 49 | ||
50 | You should definitely say Y here in case you want to decode MCEs | 50 | You should definitely say Y here in case you want to decode MCEs |
51 | which occur really early upon boot, before the module infrastructure | 51 | which occur really early upon boot, before the module infrastructure |
52 | has been initialized. | 52 | has been initialized. |
53 | 53 | ||
54 | config EDAC_MCE_INJ | ||
55 | tristate "Simple MCE injection interface over /sysfs" | ||
56 | depends on EDAC_DECODE_MCE | ||
57 | default n | ||
58 | help | ||
59 | This is a simple interface to inject MCEs over /sysfs and test | ||
60 | the MCE decoding code in EDAC. | ||
61 | |||
62 | This is currently AMD-only. | ||
63 | |||
54 | config EDAC_MM_EDAC | 64 | config EDAC_MM_EDAC |
55 | tristate "Main Memory EDAC (Error Detection And Correction) reporting" | 65 | tristate "Main Memory EDAC (Error Detection And Correction) reporting" |
56 | help | 66 | help |
@@ -65,14 +75,14 @@ config EDAC_MCE | |||
65 | bool | 75 | bool |
66 | 76 | ||
67 | config EDAC_AMD64 | 77 | config EDAC_AMD64 |
68 | tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" | 78 | tristate "AMD64 (Opteron, Athlon64) K8, F10h" |
69 | depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE | 79 | depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE |
70 | help | 80 | help |
71 | Support for error detection and correction on the AMD 64 | 81 | Support for error detection and correction of DRAM ECC errors on |
72 | Families of Memory Controllers (K8, F10h and F11h) | 82 | the AMD64 families of memory controllers (K8 and F10h) |
73 | 83 | ||
74 | config EDAC_AMD64_ERROR_INJECTION | 84 | config EDAC_AMD64_ERROR_INJECTION |
75 | bool "Sysfs Error Injection facilities" | 85 | bool "Sysfs HW Error injection facilities" |
76 | depends on EDAC_AMD64 | 86 | depends on EDAC_AMD64 |
77 | help | 87 | help |
78 | Recent Opterons (Family 10h and later) provide for Memory Error | 88 | Recent Opterons (Family 10h and later) provide for Memory Error |
@@ -199,6 +209,13 @@ config EDAC_I5100 | |||
199 | Support for error detection and correction the Intel | 209 | Support for error detection and correction the Intel |
200 | San Clemente MCH. | 210 | San Clemente MCH. |
201 | 211 | ||
212 | config EDAC_I7300 | ||
213 | tristate "Intel Clarksboro MCH" | ||
214 | depends on EDAC_MM_EDAC && X86 && PCI | ||
215 | help | ||
216 | Support for error detection and correction the Intel | ||
217 | Clarksboro MCH (Intel 7300 chipset). | ||
218 | |||
202 | config EDAC_MPC85XX | 219 | config EDAC_MPC85XX |
203 | tristate "Freescale MPC83xx / MPC85xx" | 220 | tristate "Freescale MPC83xx / MPC85xx" |
204 | depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx) | 221 | depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx) |
@@ -265,4 +282,12 @@ config EDAC_CPC925 | |||
265 | a companion chip to the PowerPC 970 family of | 282 | a companion chip to the PowerPC 970 family of |
266 | processors. | 283 | processors. |
267 | 284 | ||
285 | config EDAC_TILE | ||
286 | tristate "Tilera Memory Controller" | ||
287 | depends on EDAC_MM_EDAC && TILE | ||
288 | default y | ||
289 | help | ||
290 | Support for error detection and correction on the | ||
291 | Tilera memory controller. | ||
292 | |||
268 | endif # EDAC | 293 | endif # EDAC |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index ca6b1bb24ccc..3e239133e29e 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
@@ -10,13 +10,16 @@ obj-$(CONFIG_EDAC) := edac_stub.o | |||
10 | obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o | 10 | obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o |
11 | obj-$(CONFIG_EDAC_MCE) += edac_mce.o | 11 | obj-$(CONFIG_EDAC_MCE) += edac_mce.o |
12 | 12 | ||
13 | edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o | 13 | edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o |
14 | edac_core-objs += edac_module.o edac_device_sysfs.o | 14 | edac_core-y += edac_module.o edac_device_sysfs.o |
15 | 15 | ||
16 | ifdef CONFIG_PCI | 16 | ifdef CONFIG_PCI |
17 | edac_core-objs += edac_pci.o edac_pci_sysfs.o | 17 | edac_core-y += edac_pci.o edac_pci_sysfs.o |
18 | endif | 18 | endif |
19 | 19 | ||
20 | obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o | ||
21 | |||
22 | edac_mce_amd-y := mce_amd.o | ||
20 | obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o | 23 | obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o |
21 | 24 | ||
22 | obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o | 25 | obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o |
@@ -24,6 +27,7 @@ obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o | |||
24 | obj-$(CONFIG_EDAC_I5000) += i5000_edac.o | 27 | obj-$(CONFIG_EDAC_I5000) += i5000_edac.o |
25 | obj-$(CONFIG_EDAC_I5100) += i5100_edac.o | 28 | obj-$(CONFIG_EDAC_I5100) += i5100_edac.o |
26 | obj-$(CONFIG_EDAC_I5400) += i5400_edac.o | 29 | obj-$(CONFIG_EDAC_I5400) += i5400_edac.o |
30 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o | ||
27 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o | 31 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o |
28 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o | 32 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o |
29 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o | 33 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o |
@@ -50,3 +54,4 @@ obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o | |||
50 | obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o | 54 | obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o |
51 | obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o | 55 | obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o |
52 | 56 | ||
57 | obj-$(CONFIG_EDAC_TILE) += tile_edac.o | ||
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index e7d5d6b5dcf6..9a8bebcf6b17 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include "amd64_edac.h" | 1 | #include "amd64_edac.h" |
2 | #include <asm/k8.h> | 2 | #include <asm/amd_nb.h> |
3 | 3 | ||
4 | static struct edac_pci_ctl_info *amd64_ctl_pci; | 4 | static struct edac_pci_ctl_info *amd64_ctl_pci; |
5 | 5 | ||
@@ -15,55 +15,14 @@ module_param(ecc_enable_override, int, 0644); | |||
15 | 15 | ||
16 | static struct msr __percpu *msrs; | 16 | static struct msr __percpu *msrs; |
17 | 17 | ||
18 | /* Lookup table for all possible MC control instances */ | ||
19 | struct amd64_pvt; | ||
20 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; | ||
21 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; | ||
22 | |||
23 | /* | 18 | /* |
24 | * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and | 19 | * count successfully initialized driver instances for setup_pci_device() |
25 | * later. | ||
26 | */ | 20 | */ |
27 | static int ddr2_dbam_revCG[] = { | 21 | static atomic_t drv_instances = ATOMIC_INIT(0); |
28 | [0] = 32, | ||
29 | [1] = 64, | ||
30 | [2] = 128, | ||
31 | [3] = 256, | ||
32 | [4] = 512, | ||
33 | [5] = 1024, | ||
34 | [6] = 2048, | ||
35 | }; | ||
36 | |||
37 | static int ddr2_dbam_revD[] = { | ||
38 | [0] = 32, | ||
39 | [1] = 64, | ||
40 | [2 ... 3] = 128, | ||
41 | [4] = 256, | ||
42 | [5] = 512, | ||
43 | [6] = 256, | ||
44 | [7] = 512, | ||
45 | [8 ... 9] = 1024, | ||
46 | [10] = 2048, | ||
47 | }; | ||
48 | 22 | ||
49 | static int ddr2_dbam[] = { [0] = 128, | 23 | /* Per-node driver instances */ |
50 | [1] = 256, | 24 | static struct mem_ctl_info **mcis; |
51 | [2 ... 4] = 512, | 25 | static struct ecc_settings **ecc_stngs; |
52 | [5 ... 6] = 1024, | ||
53 | [7 ... 8] = 2048, | ||
54 | [9 ... 10] = 4096, | ||
55 | [11] = 8192, | ||
56 | }; | ||
57 | |||
58 | static int ddr3_dbam[] = { [0] = -1, | ||
59 | [1] = 256, | ||
60 | [2] = 512, | ||
61 | [3 ... 4] = -1, | ||
62 | [5 ... 6] = 1024, | ||
63 | [7 ... 8] = 2048, | ||
64 | [9 ... 10] = 4096, | ||
65 | [11] = 8192, | ||
66 | }; | ||
67 | 26 | ||
68 | /* | 27 | /* |
69 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing | 28 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing |
@@ -72,8 +31,10 @@ static int ddr3_dbam[] = { [0] = -1, | |||
72 | * | 31 | * |
73 | *FIXME: Produce a better mapping/linearisation. | 32 | *FIXME: Produce a better mapping/linearisation. |
74 | */ | 33 | */ |
75 | 34 | struct scrubrate { | |
76 | struct scrubrate scrubrates[] = { | 35 | u32 scrubval; /* bit pattern for scrub rate */ |
36 | u32 bandwidth; /* bandwidth consumed (bytes/sec) */ | ||
37 | } scrubrates[] = { | ||
77 | { 0x01, 1600000000UL}, | 38 | { 0x01, 1600000000UL}, |
78 | { 0x02, 800000000UL}, | 39 | { 0x02, 800000000UL}, |
79 | { 0x03, 400000000UL}, | 40 | { 0x03, 400000000UL}, |
@@ -99,6 +60,79 @@ struct scrubrate scrubrates[] = { | |||
99 | { 0x00, 0UL}, /* scrubbing off */ | 60 | { 0x00, 0UL}, /* scrubbing off */ |
100 | }; | 61 | }; |
101 | 62 | ||
63 | static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
64 | u32 *val, const char *func) | ||
65 | { | ||
66 | int err = 0; | ||
67 | |||
68 | err = pci_read_config_dword(pdev, offset, val); | ||
69 | if (err) | ||
70 | amd64_warn("%s: error reading F%dx%03x.\n", | ||
71 | func, PCI_FUNC(pdev->devfn), offset); | ||
72 | |||
73 | return err; | ||
74 | } | ||
75 | |||
76 | int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
77 | u32 val, const char *func) | ||
78 | { | ||
79 | int err = 0; | ||
80 | |||
81 | err = pci_write_config_dword(pdev, offset, val); | ||
82 | if (err) | ||
83 | amd64_warn("%s: error writing to F%dx%03x.\n", | ||
84 | func, PCI_FUNC(pdev->devfn), offset); | ||
85 | |||
86 | return err; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * | ||
91 | * Depending on the family, F2 DCT reads need special handling: | ||
92 | * | ||
93 | * K8: has a single DCT only | ||
94 | * | ||
95 | * F10h: each DCT has its own set of regs | ||
96 | * DCT0 -> F2x040.. | ||
97 | * DCT1 -> F2x140.. | ||
98 | * | ||
99 | * F15h: we select which DCT we access using F1x10C[DctCfgSel] | ||
100 | * | ||
101 | */ | ||
102 | static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
103 | const char *func) | ||
104 | { | ||
105 | if (addr >= 0x100) | ||
106 | return -EINVAL; | ||
107 | |||
108 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
109 | } | ||
110 | |||
111 | static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
112 | const char *func) | ||
113 | { | ||
114 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
115 | } | ||
116 | |||
117 | static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | ||
118 | const char *func) | ||
119 | { | ||
120 | u32 reg = 0; | ||
121 | u8 dct = 0; | ||
122 | |||
123 | if (addr >= 0x140 && addr <= 0x1a0) { | ||
124 | dct = 1; | ||
125 | addr -= 0x100; | ||
126 | } | ||
127 | |||
128 | amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); | ||
129 | reg &= 0xfffffffe; | ||
130 | reg |= dct; | ||
131 | amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); | ||
132 | |||
133 | return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); | ||
134 | } | ||
135 | |||
102 | /* | 136 | /* |
103 | * Memory scrubber control interface. For K8, memory scrubbing is handled by | 137 | * Memory scrubber control interface. For K8, memory scrubbing is handled by |
104 | * hardware and can involve L2 cache, dcache as well as the main memory. With | 138 | * hardware and can involve L2 cache, dcache as well as the main memory. With |
@@ -117,8 +151,7 @@ struct scrubrate scrubrates[] = { | |||
117 | * scan the scrub rate mapping table for a close or matching bandwidth value to | 151 | * scan the scrub rate mapping table for a close or matching bandwidth value to |
118 | * issue. If requested is too big, then use last maximum value found. | 152 | * issue. If requested is too big, then use last maximum value found. |
119 | */ | 153 | */ |
120 | static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | 154 | static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) |
121 | u32 min_scrubrate) | ||
122 | { | 155 | { |
123 | u32 scrubval; | 156 | u32 scrubval; |
124 | int i; | 157 | int i; |
@@ -134,7 +167,7 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | |||
134 | * skip scrub rates which aren't recommended | 167 | * skip scrub rates which aren't recommended |
135 | * (see F10 BKDG, F3x58) | 168 | * (see F10 BKDG, F3x58) |
136 | */ | 169 | */ |
137 | if (scrubrates[i].scrubval < min_scrubrate) | 170 | if (scrubrates[i].scrubval < min_rate) |
138 | continue; | 171 | continue; |
139 | 172 | ||
140 | if (scrubrates[i].bandwidth <= new_bw) | 173 | if (scrubrates[i].bandwidth <= new_bw) |
@@ -148,123 +181,53 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | |||
148 | } | 181 | } |
149 | 182 | ||
150 | scrubval = scrubrates[i].scrubval; | 183 | scrubval = scrubrates[i].scrubval; |
151 | if (scrubval) | ||
152 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
153 | "Setting scrub rate bandwidth: %u\n", | ||
154 | scrubrates[i].bandwidth); | ||
155 | else | ||
156 | edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); | ||
157 | 184 | ||
158 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); | 185 | pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F); |
186 | |||
187 | if (scrubval) | ||
188 | return scrubrates[i].bandwidth; | ||
159 | 189 | ||
160 | return 0; | 190 | return 0; |
161 | } | 191 | } |
162 | 192 | ||
163 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) | 193 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) |
164 | { | 194 | { |
165 | struct amd64_pvt *pvt = mci->pvt_info; | 195 | struct amd64_pvt *pvt = mci->pvt_info; |
166 | u32 min_scrubrate = 0x0; | 196 | u32 min_scrubrate = 0x5; |
167 | 197 | ||
168 | switch (boot_cpu_data.x86) { | 198 | if (boot_cpu_data.x86 == 0xf) |
169 | case 0xf: | 199 | min_scrubrate = 0x0; |
170 | min_scrubrate = K8_MIN_SCRUB_RATE_BITS; | ||
171 | break; | ||
172 | case 0x10: | ||
173 | min_scrubrate = F10_MIN_SCRUB_RATE_BITS; | ||
174 | break; | ||
175 | case 0x11: | ||
176 | min_scrubrate = F11_MIN_SCRUB_RATE_BITS; | ||
177 | break; | ||
178 | 200 | ||
179 | default: | 201 | return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); |
180 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, bandwidth, | ||
184 | min_scrubrate); | ||
185 | } | 202 | } |
186 | 203 | ||
187 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | 204 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci) |
188 | { | 205 | { |
189 | struct amd64_pvt *pvt = mci->pvt_info; | 206 | struct amd64_pvt *pvt = mci->pvt_info; |
190 | u32 scrubval = 0; | 207 | u32 scrubval = 0; |
191 | int status = -1, i; | 208 | int i, retval = -EINVAL; |
192 | 209 | ||
193 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); | 210 | amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); |
194 | 211 | ||
195 | scrubval = scrubval & 0x001F; | 212 | scrubval = scrubval & 0x001F; |
196 | 213 | ||
197 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
198 | "pci-read, sdram scrub control value: %d \n", scrubval); | ||
199 | |||
200 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { | 214 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
201 | if (scrubrates[i].scrubval == scrubval) { | 215 | if (scrubrates[i].scrubval == scrubval) { |
202 | *bw = scrubrates[i].bandwidth; | 216 | retval = scrubrates[i].bandwidth; |
203 | status = 0; | ||
204 | break; | 217 | break; |
205 | } | 218 | } |
206 | } | 219 | } |
207 | 220 | return retval; | |
208 | return status; | ||
209 | } | ||
210 | |||
211 | /* Map from a CSROW entry to the mask entry that operates on it */ | ||
212 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | ||
213 | { | ||
214 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) | ||
215 | return csrow; | ||
216 | else | ||
217 | return csrow >> 1; | ||
218 | } | ||
219 | |||
220 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | ||
221 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) | ||
222 | { | ||
223 | if (dct == 0) | ||
224 | return pvt->dcsb0[csrow]; | ||
225 | else | ||
226 | return pvt->dcsb1[csrow]; | ||
227 | } | 221 | } |
228 | 222 | ||
229 | /* | 223 | /* |
230 | * Return the 'mask' address the i'th CS entry. This function is needed because | 224 | * returns true if the SysAddr given by sys_addr matches the |
231 | * there number of DCSM registers on Rev E and prior vs Rev F and later is | 225 | * DRAM base/limit associated with node_id |
232 | * different. | ||
233 | */ | 226 | */ |
234 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) | 227 | static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, |
228 | unsigned nid) | ||
235 | { | 229 | { |
236 | if (dct == 0) | 230 | u64 addr; |
237 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; | ||
238 | else | ||
239 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; | ||
240 | } | ||
241 | |||
242 | |||
243 | /* | ||
244 | * In *base and *limit, pass back the full 40-bit base and limit physical | ||
245 | * addresses for the node given by node_id. This information is obtained from | ||
246 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The | ||
247 | * base and limit addresses are of type SysAddr, as defined at the start of | ||
248 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses | ||
249 | * in the address range they represent. | ||
250 | */ | ||
251 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, | ||
252 | u64 *base, u64 *limit) | ||
253 | { | ||
254 | *base = pvt->dram_base[node_id]; | ||
255 | *limit = pvt->dram_limit[node_id]; | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated | ||
260 | * with node_id | ||
261 | */ | ||
262 | static int amd64_base_limit_match(struct amd64_pvt *pvt, | ||
263 | u64 sys_addr, int node_id) | ||
264 | { | ||
265 | u64 base, limit, addr; | ||
266 | |||
267 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); | ||
268 | 231 | ||
269 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be | 232 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be |
270 | * all ones if the most significant implemented address bit is 1. | 233 | * all ones if the most significant implemented address bit is 1. |
@@ -274,7 +237,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt, | |||
274 | */ | 237 | */ |
275 | addr = sys_addr & 0x000000ffffffffffull; | 238 | addr = sys_addr & 0x000000ffffffffffull; |
276 | 239 | ||
277 | return (addr >= base) && (addr <= limit); | 240 | return ((addr >= get_dram_base(pvt, nid)) && |
241 | (addr <= get_dram_limit(pvt, nid))); | ||
278 | } | 242 | } |
279 | 243 | ||
280 | /* | 244 | /* |
@@ -287,7 +251,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
287 | u64 sys_addr) | 251 | u64 sys_addr) |
288 | { | 252 | { |
289 | struct amd64_pvt *pvt; | 253 | struct amd64_pvt *pvt; |
290 | int node_id; | 254 | unsigned node_id; |
291 | u32 intlv_en, bits; | 255 | u32 intlv_en, bits; |
292 | 256 | ||
293 | /* | 257 | /* |
@@ -301,10 +265,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
301 | * registers. Therefore we arbitrarily choose to read it from the | 265 | * registers. Therefore we arbitrarily choose to read it from the |
302 | * register for node 0. | 266 | * register for node 0. |
303 | */ | 267 | */ |
304 | intlv_en = pvt->dram_IntlvEn[0]; | 268 | intlv_en = dram_intlv_en(pvt, 0); |
305 | 269 | ||
306 | if (intlv_en == 0) { | 270 | if (intlv_en == 0) { |
307 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { | 271 | for (node_id = 0; node_id < DRAM_RANGES; node_id++) { |
308 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) | 272 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
309 | goto found; | 273 | goto found; |
310 | } | 274 | } |
@@ -314,34 +278,30 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
314 | if (unlikely((intlv_en != 0x01) && | 278 | if (unlikely((intlv_en != 0x01) && |
315 | (intlv_en != 0x03) && | 279 | (intlv_en != 0x03) && |
316 | (intlv_en != 0x07))) { | 280 | (intlv_en != 0x07))) { |
317 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " | 281 | amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); |
318 | "IntlvEn field of DRAM Base Register for node 0: " | ||
319 | "this probably indicates a BIOS bug.\n", intlv_en); | ||
320 | return NULL; | 282 | return NULL; |
321 | } | 283 | } |
322 | 284 | ||
323 | bits = (((u32) sys_addr) >> 12) & intlv_en; | 285 | bits = (((u32) sys_addr) >> 12) & intlv_en; |
324 | 286 | ||
325 | for (node_id = 0; ; ) { | 287 | for (node_id = 0; ; ) { |
326 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) | 288 | if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits) |
327 | break; /* intlv_sel field matches */ | 289 | break; /* intlv_sel field matches */ |
328 | 290 | ||
329 | if (++node_id >= DRAM_REG_COUNT) | 291 | if (++node_id >= DRAM_RANGES) |
330 | goto err_no_match; | 292 | goto err_no_match; |
331 | } | 293 | } |
332 | 294 | ||
333 | /* sanity test for sys_addr */ | 295 | /* sanity test for sys_addr */ |
334 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | 296 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { |
335 | amd64_printk(KERN_WARNING, | 297 | amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" |
336 | "%s(): sys_addr 0x%llx falls outside base/limit " | 298 | "range for node %d with node interleaving enabled.\n", |
337 | "address range for node %d with node interleaving " | 299 | __func__, sys_addr, node_id); |
338 | "enabled.\n", | ||
339 | __func__, sys_addr, node_id); | ||
340 | return NULL; | 300 | return NULL; |
341 | } | 301 | } |
342 | 302 | ||
343 | found: | 303 | found: |
344 | return edac_mc_find(node_id); | 304 | return edac_mc_find((int)node_id); |
345 | 305 | ||
346 | err_no_match: | 306 | err_no_match: |
347 | debugf2("sys_addr 0x%lx doesn't match any node\n", | 307 | debugf2("sys_addr 0x%lx doesn't match any node\n", |
@@ -351,37 +311,50 @@ err_no_match: | |||
351 | } | 311 | } |
352 | 312 | ||
353 | /* | 313 | /* |
354 | * Extract the DRAM CS base address from selected csrow register. | 314 | * compute the CS base address of the @csrow on the DRAM controller @dct. |
355 | */ | 315 | * For details see F2x[5C:40] in the processor's BKDG |
356 | static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) | ||
357 | { | ||
358 | return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << | ||
359 | pvt->dcs_shift; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. | ||
364 | */ | 316 | */ |
365 | static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) | 317 | static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, |
318 | u64 *base, u64 *mask) | ||
366 | { | 319 | { |
367 | u64 dcsm_bits, other_bits; | 320 | u64 csbase, csmask, base_bits, mask_bits; |
368 | u64 mask; | 321 | u8 addr_shift; |
369 | 322 | ||
370 | /* Extract bits from DRAM CS Mask. */ | 323 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
371 | dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; | 324 | csbase = pvt->csels[dct].csbases[csrow]; |
325 | csmask = pvt->csels[dct].csmasks[csrow]; | ||
326 | base_bits = GENMASK(21, 31) | GENMASK(9, 15); | ||
327 | mask_bits = GENMASK(21, 29) | GENMASK(9, 15); | ||
328 | addr_shift = 4; | ||
329 | } else { | ||
330 | csbase = pvt->csels[dct].csbases[csrow]; | ||
331 | csmask = pvt->csels[dct].csmasks[csrow >> 1]; | ||
332 | addr_shift = 8; | ||
372 | 333 | ||
373 | other_bits = pvt->dcsm_mask; | 334 | if (boot_cpu_data.x86 == 0x15) |
374 | other_bits = ~(other_bits << pvt->dcs_shift); | 335 | base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); |
336 | else | ||
337 | base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); | ||
338 | } | ||
375 | 339 | ||
376 | /* | 340 | *base = (csbase & base_bits) << addr_shift; |
377 | * The extracted bits from DCSM belong in the spaces represented by | ||
378 | * the cleared bits in other_bits. | ||
379 | */ | ||
380 | mask = (dcsm_bits << pvt->dcs_shift) | other_bits; | ||
381 | 341 | ||
382 | return mask; | 342 | *mask = ~0ULL; |
343 | /* poke holes for the csmask */ | ||
344 | *mask &= ~(mask_bits << addr_shift); | ||
345 | /* OR them in */ | ||
346 | *mask |= (csmask & mask_bits) << addr_shift; | ||
383 | } | 347 | } |
384 | 348 | ||
349 | #define for_each_chip_select(i, dct, pvt) \ | ||
350 | for (i = 0; i < pvt->csels[dct].b_cnt; i++) | ||
351 | |||
352 | #define chip_select_base(i, dct, pvt) \ | ||
353 | pvt->csels[dct].csbases[i] | ||
354 | |||
355 | #define for_each_chip_select_mask(i, dct, pvt) \ | ||
356 | for (i = 0; i < pvt->csels[dct].m_cnt; i++) | ||
357 | |||
385 | /* | 358 | /* |
386 | * @input_addr is an InputAddr associated with the node given by mci. Return the | 359 | * @input_addr is an InputAddr associated with the node given by mci. Return the |
387 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). | 360 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). |
@@ -394,19 +367,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
394 | 367 | ||
395 | pvt = mci->pvt_info; | 368 | pvt = mci->pvt_info; |
396 | 369 | ||
397 | /* | 370 | for_each_chip_select(csrow, 0, pvt) { |
398 | * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS | 371 | if (!csrow_enabled(csrow, 0, pvt)) |
399 | * base/mask register pair, test the condition shown near the start of | ||
400 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | ||
401 | */ | ||
402 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { | ||
403 | |||
404 | /* This DRAM chip select is disabled on this node */ | ||
405 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | ||
406 | continue; | 372 | continue; |
407 | 373 | ||
408 | base = base_from_dct_base(pvt, csrow); | 374 | get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); |
409 | mask = ~mask_from_dct_mask(pvt, csrow); | 375 | |
376 | mask = ~mask; | ||
410 | 377 | ||
411 | if ((input_addr & mask) == (base & mask)) { | 378 | if ((input_addr & mask) == (base & mask)) { |
412 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", | 379 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", |
@@ -416,7 +383,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
416 | return csrow; | 383 | return csrow; |
417 | } | 384 | } |
418 | } | 385 | } |
419 | |||
420 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", | 386 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", |
421 | (unsigned long)input_addr, pvt->mc_node_id); | 387 | (unsigned long)input_addr, pvt->mc_node_id); |
422 | 388 | ||
@@ -424,19 +390,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
424 | } | 390 | } |
425 | 391 | ||
426 | /* | 392 | /* |
427 | * Return the base value defined by the DRAM Base register for the node | ||
428 | * represented by mci. This function returns the full 40-bit value despite the | ||
429 | * fact that the register only stores bits 39-24 of the value. See section | ||
430 | * 3.4.4.1 (BKDG #26094, K8, revA-E) | ||
431 | */ | ||
432 | static inline u64 get_dram_base(struct mem_ctl_info *mci) | ||
433 | { | ||
434 | struct amd64_pvt *pvt = mci->pvt_info; | ||
435 | |||
436 | return pvt->dram_base[pvt->mc_node_id]; | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) | 393 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) |
441 | * for the node represented by mci. Info is passed back in *hole_base, | 394 | * for the node represented by mci. Info is passed back in *hole_base, |
442 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if | 395 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if |
@@ -465,14 +418,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
465 | return 1; | 418 | return 1; |
466 | } | 419 | } |
467 | 420 | ||
468 | /* only valid for Fam10h */ | 421 | /* valid for Fam10h and above */ |
469 | if (boot_cpu_data.x86 == 0x10 && | 422 | if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { |
470 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { | ||
471 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); | 423 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); |
472 | return 1; | 424 | return 1; |
473 | } | 425 | } |
474 | 426 | ||
475 | if ((pvt->dhar & DHAR_VALID) == 0) { | 427 | if (!dhar_valid(pvt)) { |
476 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", | 428 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", |
477 | pvt->mc_node_id); | 429 | pvt->mc_node_id); |
478 | return 1; | 430 | return 1; |
@@ -496,15 +448,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
496 | * addresses in the hole so that they start at 0x100000000. | 448 | * addresses in the hole so that they start at 0x100000000. |
497 | */ | 449 | */ |
498 | 450 | ||
499 | base = dhar_base(pvt->dhar); | 451 | base = dhar_base(pvt); |
500 | 452 | ||
501 | *hole_base = base; | 453 | *hole_base = base; |
502 | *hole_size = (0x1ull << 32) - base; | 454 | *hole_size = (0x1ull << 32) - base; |
503 | 455 | ||
504 | if (boot_cpu_data.x86 > 0xf) | 456 | if (boot_cpu_data.x86 > 0xf) |
505 | *hole_offset = f10_dhar_offset(pvt->dhar); | 457 | *hole_offset = f10_dhar_offset(pvt); |
506 | else | 458 | else |
507 | *hole_offset = k8_dhar_offset(pvt->dhar); | 459 | *hole_offset = k8_dhar_offset(pvt); |
508 | 460 | ||
509 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | 461 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", |
510 | pvt->mc_node_id, (unsigned long)*hole_base, | 462 | pvt->mc_node_id, (unsigned long)*hole_base, |
@@ -545,10 +497,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); | |||
545 | */ | 497 | */ |
546 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | 498 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) |
547 | { | 499 | { |
500 | struct amd64_pvt *pvt = mci->pvt_info; | ||
548 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; | 501 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; |
549 | int ret = 0; | 502 | int ret = 0; |
550 | 503 | ||
551 | dram_base = get_dram_base(mci); | 504 | dram_base = get_dram_base(pvt, pvt->mc_node_id); |
552 | 505 | ||
553 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | 506 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, |
554 | &hole_size); | 507 | &hole_size); |
@@ -576,7 +529,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
576 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture | 529 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture |
577 | * Programmer's Manual Volume 1 Application Programming. | 530 | * Programmer's Manual Volume 1 Application Programming. |
578 | */ | 531 | */ |
579 | dram_addr = (sys_addr & 0xffffffffffull) - dram_base; | 532 | dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; |
580 | 533 | ||
581 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " | 534 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " |
582 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, | 535 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, |
@@ -612,9 +565,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
612 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | 565 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) |
613 | * concerning translating a DramAddr to an InputAddr. | 566 | * concerning translating a DramAddr to an InputAddr. |
614 | */ | 567 | */ |
615 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | 568 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); |
616 | input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + | 569 | input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + |
617 | (dram_addr & 0xfff); | 570 | (dram_addr & 0xfff); |
618 | 571 | ||
619 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", | 572 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", |
620 | intlv_shift, (unsigned long)dram_addr, | 573 | intlv_shift, (unsigned long)dram_addr, |
@@ -648,7 +601,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) | |||
648 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | 601 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) |
649 | { | 602 | { |
650 | struct amd64_pvt *pvt; | 603 | struct amd64_pvt *pvt; |
651 | int node_id, intlv_shift; | 604 | unsigned node_id, intlv_shift; |
652 | u64 bits, dram_addr; | 605 | u64 bits, dram_addr; |
653 | u32 intlv_sel; | 606 | u32 intlv_sel; |
654 | 607 | ||
@@ -662,10 +615,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
662 | */ | 615 | */ |
663 | pvt = mci->pvt_info; | 616 | pvt = mci->pvt_info; |
664 | node_id = pvt->mc_node_id; | 617 | node_id = pvt->mc_node_id; |
665 | BUG_ON((node_id < 0) || (node_id > 7)); | ||
666 | 618 | ||
667 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | 619 | BUG_ON(node_id > 7); |
668 | 620 | ||
621 | intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); | ||
669 | if (intlv_shift == 0) { | 622 | if (intlv_shift == 0) { |
670 | debugf1(" InputAddr 0x%lx translates to DramAddr of " | 623 | debugf1(" InputAddr 0x%lx translates to DramAddr of " |
671 | "same value\n", (unsigned long)input_addr); | 624 | "same value\n", (unsigned long)input_addr); |
@@ -673,10 +626,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
673 | return input_addr; | 626 | return input_addr; |
674 | } | 627 | } |
675 | 628 | ||
676 | bits = ((input_addr & 0xffffff000ull) << intlv_shift) + | 629 | bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) + |
677 | (input_addr & 0xfff); | 630 | (input_addr & 0xfff); |
678 | 631 | ||
679 | intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); | 632 | intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); |
680 | dram_addr = bits + (intlv_sel << 12); | 633 | dram_addr = bits + (intlv_sel << 12); |
681 | 634 | ||
682 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " | 635 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " |
@@ -693,7 +646,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |||
693 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | 646 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) |
694 | { | 647 | { |
695 | struct amd64_pvt *pvt = mci->pvt_info; | 648 | struct amd64_pvt *pvt = mci->pvt_info; |
696 | u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; | 649 | u64 hole_base, hole_offset, hole_size, base, sys_addr; |
697 | int ret = 0; | 650 | int ret = 0; |
698 | 651 | ||
699 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | 652 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, |
@@ -711,7 +664,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |||
711 | } | 664 | } |
712 | } | 665 | } |
713 | 666 | ||
714 | amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); | 667 | base = get_dram_base(pvt, pvt->mc_node_id); |
715 | sys_addr = dram_addr + base; | 668 | sys_addr = dram_addr + base; |
716 | 669 | ||
717 | /* | 670 | /* |
@@ -756,13 +709,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |||
756 | u64 base, mask; | 709 | u64 base, mask; |
757 | 710 | ||
758 | pvt = mci->pvt_info; | 711 | pvt = mci->pvt_info; |
759 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); | 712 | BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt)); |
760 | 713 | ||
761 | base = base_from_dct_base(pvt, csrow); | 714 | get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); |
762 | mask = mask_from_dct_mask(pvt, csrow); | ||
763 | 715 | ||
764 | *input_addr_min = base & ~mask; | 716 | *input_addr_min = base & ~mask; |
765 | *input_addr_max = base | mask | pvt->dcs_mask_notused; | 717 | *input_addr_max = base | mask; |
766 | } | 718 | } |
767 | 719 | ||
768 | /* Map the Error address to a PAGE and PAGE OFFSET. */ | 720 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
@@ -788,41 +740,20 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |||
788 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); | 740 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); |
789 | 741 | ||
790 | if (csrow == -1) | 742 | if (csrow == -1) |
791 | amd64_mc_printk(mci, KERN_ERR, | 743 | amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " |
792 | "Failed to translate InputAddr to csrow for " | 744 | "address 0x%lx\n", (unsigned long)sys_addr); |
793 | "address 0x%lx\n", (unsigned long)sys_addr); | ||
794 | return csrow; | 745 | return csrow; |
795 | } | 746 | } |
796 | 747 | ||
797 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); | 748 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
798 | 749 | ||
799 | static u16 extract_syndrome(struct err_regs *err) | ||
800 | { | ||
801 | return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); | ||
802 | } | ||
803 | |||
804 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) | ||
805 | { | ||
806 | if (boot_cpu_data.x86 == 0x11) | ||
807 | edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n"); | ||
808 | else if (boot_cpu_data.x86 == 0x10) | ||
809 | edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); | ||
810 | else if (boot_cpu_data.x86 == 0xf) | ||
811 | edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", | ||
812 | (pvt->ext_model >= K8_REV_F) ? | ||
813 | "Rev F or later" : "Rev E or earlier"); | ||
814 | else | ||
815 | /* we'll hardly ever ever get here */ | ||
816 | edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n"); | ||
817 | } | ||
818 | |||
819 | /* | 750 | /* |
820 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs | 751 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs |
821 | * are ECC capable. | 752 | * are ECC capable. |
822 | */ | 753 | */ |
823 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | 754 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) |
824 | { | 755 | { |
825 | int bit; | 756 | u8 bit; |
826 | enum dev_type edac_cap = EDAC_FLAG_NONE; | 757 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
827 | 758 | ||
828 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) | 759 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
@@ -835,8 +766,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |||
835 | return edac_cap; | 766 | return edac_cap; |
836 | } | 767 | } |
837 | 768 | ||
838 | 769 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); | |
839 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); | ||
840 | 770 | ||
841 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) | 771 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) |
842 | { | 772 | { |
@@ -849,8 +779,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
849 | debugf1(" PAR/ERR parity: %s\n", | 779 | debugf1(" PAR/ERR parity: %s\n", |
850 | (dclr & BIT(8)) ? "enabled" : "disabled"); | 780 | (dclr & BIT(8)) ? "enabled" : "disabled"); |
851 | 781 | ||
852 | debugf1(" DCT 128bit mode width: %s\n", | 782 | if (boot_cpu_data.x86 == 0x10) |
853 | (dclr & BIT(11)) ? "128b" : "64b"); | 783 | debugf1(" DCT 128bit mode width: %s\n", |
784 | (dclr & BIT(11)) ? "128b" : "64b"); | ||
854 | 785 | ||
855 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | 786 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", |
856 | (dclr & BIT(12)) ? "yes" : "no", | 787 | (dclr & BIT(12)) ? "yes" : "no", |
@@ -860,18 +791,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
860 | } | 791 | } |
861 | 792 | ||
862 | /* Display and decode various NB registers for debug purposes. */ | 793 | /* Display and decode various NB registers for debug purposes. */ |
863 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | 794 | static void dump_misc_regs(struct amd64_pvt *pvt) |
864 | { | 795 | { |
865 | int ganged; | ||
866 | |||
867 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); | 796 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
868 | 797 | ||
869 | debugf1(" NB two channel DRAM capable: %s\n", | 798 | debugf1(" NB two channel DRAM capable: %s\n", |
870 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); | 799 | (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); |
871 | 800 | ||
872 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", | 801 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
873 | (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", | 802 | (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", |
874 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); | 803 | (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); |
875 | 804 | ||
876 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | 805 | amd64_dump_dramcfg_low(pvt->dclr0, 0); |
877 | 806 | ||
@@ -879,154 +808,95 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |||
879 | 808 | ||
880 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " | 809 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
881 | "offset: 0x%08x\n", | 810 | "offset: 0x%08x\n", |
882 | pvt->dhar, | 811 | pvt->dhar, dhar_base(pvt), |
883 | dhar_base(pvt->dhar), | 812 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) |
884 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) | 813 | : f10_dhar_offset(pvt)); |
885 | : f10_dhar_offset(pvt->dhar)); | 814 | |
815 | debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); | ||
886 | 816 | ||
887 | debugf1(" DramHoleValid: %s\n", | 817 | amd64_debug_display_dimm_sizes(pvt, 0); |
888 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); | ||
889 | 818 | ||
890 | /* everything below this point is Fam10h and above */ | 819 | /* everything below this point is Fam10h and above */ |
891 | if (boot_cpu_data.x86 == 0xf) { | 820 | if (boot_cpu_data.x86 == 0xf) |
892 | amd64_debug_display_dimm_sizes(0, pvt); | ||
893 | return; | 821 | return; |
894 | } | ||
895 | 822 | ||
896 | amd64_printk(KERN_INFO, "using %s syndromes.\n", | 823 | amd64_debug_display_dimm_sizes(pvt, 1); |
897 | ((pvt->syn_type == 8) ? "x8" : "x4")); | 824 | |
825 | amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); | ||
898 | 826 | ||
899 | /* Only if NOT ganged does dclr1 have valid info */ | 827 | /* Only if NOT ganged does dclr1 have valid info */ |
900 | if (!dct_ganging_enabled(pvt)) | 828 | if (!dct_ganging_enabled(pvt)) |
901 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | 829 | amd64_dump_dramcfg_low(pvt->dclr1, 1); |
902 | |||
903 | /* | ||
904 | * Determine if ganged and then dump memory sizes for first controller, | ||
905 | * and if NOT ganged dump info for 2nd controller. | ||
906 | */ | ||
907 | ganged = dct_ganging_enabled(pvt); | ||
908 | |||
909 | amd64_debug_display_dimm_sizes(0, pvt); | ||
910 | |||
911 | if (!ganged) | ||
912 | amd64_debug_display_dimm_sizes(1, pvt); | ||
913 | } | ||
914 | |||
915 | /* Read in both of DBAM registers */ | ||
916 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | ||
917 | { | ||
918 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); | ||
919 | |||
920 | if (boot_cpu_data.x86 >= 0x10) | ||
921 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); | ||
922 | } | 830 | } |
923 | 831 | ||
924 | /* | 832 | /* |
925 | * NOTE: CPU Revision Dependent code: Rev E and Rev F | 833 | * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] |
926 | * | ||
927 | * Set the DCSB and DCSM mask values depending on the CPU revision value. Also | ||
928 | * set the shift factor for the DCSB and DCSM values. | ||
929 | * | ||
930 | * ->dcs_mask_notused, RevE: | ||
931 | * | ||
932 | * To find the max InputAddr for the csrow, start with the base address and set | ||
933 | * all bits that are "don't care" bits in the test at the start of section | ||
934 | * 3.5.4 (p. 84). | ||
935 | * | ||
936 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | ||
937 | * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS | ||
938 | * represents bits [24:20] and [12:0], which are all bits in the above-mentioned | ||
939 | * gaps. | ||
940 | * | ||
941 | * ->dcs_mask_notused, RevF and later: | ||
942 | * | ||
943 | * To find the max InputAddr for the csrow, start with the base address and set | ||
944 | * all bits that are "don't care" bits in the test at the start of NPT section | ||
945 | * 4.5.4 (p. 87). | ||
946 | * | ||
947 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | ||
948 | * between bit ranges [36:27] and [21:13]. | ||
949 | * | ||
950 | * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], | ||
951 | * which are all bits in the above-mentioned gaps. | ||
952 | */ | 834 | */ |
953 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | 835 | static void prep_chip_selects(struct amd64_pvt *pvt) |
954 | { | 836 | { |
955 | |||
956 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { | 837 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
957 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | 838 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
958 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | 839 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; |
959 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | ||
960 | pvt->dcs_shift = REV_E_DCS_SHIFT; | ||
961 | pvt->cs_count = 8; | ||
962 | pvt->num_dcsm = 8; | ||
963 | } else { | 840 | } else { |
964 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; | 841 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
965 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | 842 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; |
966 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | ||
967 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | ||
968 | |||
969 | if (boot_cpu_data.x86 == 0x11) { | ||
970 | pvt->cs_count = 4; | ||
971 | pvt->num_dcsm = 2; | ||
972 | } else { | ||
973 | pvt->cs_count = 8; | ||
974 | pvt->num_dcsm = 4; | ||
975 | } | ||
976 | } | 843 | } |
977 | } | 844 | } |
978 | 845 | ||
979 | /* | 846 | /* |
980 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers | 847 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers |
981 | */ | 848 | */ |
982 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | 849 | static void read_dct_base_mask(struct amd64_pvt *pvt) |
983 | { | 850 | { |
984 | int cs, reg; | 851 | int cs; |
852 | |||
853 | prep_chip_selects(pvt); | ||
985 | 854 | ||
986 | amd64_set_dct_base_and_mask(pvt); | 855 | for_each_chip_select(cs, 0, pvt) { |
856 | int reg0 = DCSB0 + (cs * 4); | ||
857 | int reg1 = DCSB1 + (cs * 4); | ||
858 | u32 *base0 = &pvt->csels[0].csbases[cs]; | ||
859 | u32 *base1 = &pvt->csels[1].csbases[cs]; | ||
987 | 860 | ||
988 | for (cs = 0; cs < pvt->cs_count; cs++) { | 861 | if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) |
989 | reg = K8_DCSB0 + (cs * 4); | ||
990 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) | ||
991 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", | 862 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
992 | cs, pvt->dcsb0[cs], reg); | 863 | cs, *base0, reg0); |
993 | 864 | ||
994 | /* If DCT are NOT ganged, then read in DCT1's base */ | 865 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
995 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 866 | continue; |
996 | reg = F10_DCSB1 + (cs * 4); | 867 | |
997 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, | 868 | if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) |
998 | &pvt->dcsb1[cs])) | 869 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
999 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", | 870 | cs, *base1, reg1); |
1000 | cs, pvt->dcsb1[cs], reg); | ||
1001 | } else { | ||
1002 | pvt->dcsb1[cs] = 0; | ||
1003 | } | ||
1004 | } | 871 | } |
1005 | 872 | ||
1006 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | 873 | for_each_chip_select_mask(cs, 0, pvt) { |
1007 | reg = K8_DCSM0 + (cs * 4); | 874 | int reg0 = DCSM0 + (cs * 4); |
1008 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) | 875 | int reg1 = DCSM1 + (cs * 4); |
876 | u32 *mask0 = &pvt->csels[0].csmasks[cs]; | ||
877 | u32 *mask1 = &pvt->csels[1].csmasks[cs]; | ||
878 | |||
879 | if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) | ||
1009 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", | 880 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
1010 | cs, pvt->dcsm0[cs], reg); | 881 | cs, *mask0, reg0); |
1011 | 882 | ||
1012 | /* If DCT are NOT ganged, then read in DCT1's mask */ | 883 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) |
1013 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 884 | continue; |
1014 | reg = F10_DCSM1 + (cs * 4); | 885 | |
1015 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, | 886 | if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) |
1016 | &pvt->dcsm1[cs])) | 887 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
1017 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", | 888 | cs, *mask1, reg1); |
1018 | cs, pvt->dcsm1[cs], reg); | ||
1019 | } else { | ||
1020 | pvt->dcsm1[cs] = 0; | ||
1021 | } | ||
1022 | } | 889 | } |
1023 | } | 890 | } |
1024 | 891 | ||
1025 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | 892 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) |
1026 | { | 893 | { |
1027 | enum mem_type type; | 894 | enum mem_type type; |
1028 | 895 | ||
1029 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { | 896 | /* F15h supports only DDR3 */ |
897 | if (boot_cpu_data.x86 >= 0x15) | ||
898 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | ||
899 | else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { | ||
1030 | if (pvt->dchr0 & DDR3_MODE) | 900 | if (pvt->dchr0 & DDR3_MODE) |
1031 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | 901 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; |
1032 | else | 902 | else |
@@ -1035,35 +905,22 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | |||
1035 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; | 905 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; |
1036 | } | 906 | } |
1037 | 907 | ||
1038 | debugf1(" Memory type is: %s\n", edac_mem_types[type]); | 908 | amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); |
1039 | 909 | ||
1040 | return type; | 910 | return type; |
1041 | } | 911 | } |
1042 | 912 | ||
1043 | /* | 913 | /* Get the number of DCT channels the memory controller is using. */ |
1044 | * Read the DRAM Configuration Low register. It differs between CG, D & E revs | ||
1045 | * and the later RevF memory controllers (DDR vs DDR2) | ||
1046 | * | ||
1047 | * Return: | ||
1048 | * number of memory channels in operation | ||
1049 | * Pass back: | ||
1050 | * contents of the DCL0_LOW register | ||
1051 | */ | ||
1052 | static int k8_early_channel_count(struct amd64_pvt *pvt) | 914 | static int k8_early_channel_count(struct amd64_pvt *pvt) |
1053 | { | 915 | { |
1054 | int flag, err = 0; | 916 | int flag; |
1055 | 917 | ||
1056 | err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | 918 | if (pvt->ext_model >= K8_REV_F) |
1057 | if (err) | ||
1058 | return err; | ||
1059 | |||
1060 | if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) { | ||
1061 | /* RevF (NPT) and later */ | 919 | /* RevF (NPT) and later */ |
1062 | flag = pvt->dclr0 & F10_WIDTH_128; | 920 | flag = pvt->dclr0 & WIDTH_128; |
1063 | } else { | 921 | else |
1064 | /* RevE and earlier */ | 922 | /* RevE and earlier */ |
1065 | flag = pvt->dclr0 & REVE_WIDTH_128; | 923 | flag = pvt->dclr0 & REVE_WIDTH_128; |
1066 | } | ||
1067 | 924 | ||
1068 | /* not used */ | 925 | /* not used */ |
1069 | pvt->dclr1 = 0; | 926 | pvt->dclr1 = 0; |
@@ -1071,55 +928,121 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) | |||
1071 | return (flag) ? 2 : 1; | 928 | return (flag) ? 2 : 1; |
1072 | } | 929 | } |
1073 | 930 | ||
1074 | /* extract the ERROR ADDRESS for the K8 CPUs */ | 931 | /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ |
1075 | static u64 k8_get_error_address(struct mem_ctl_info *mci, | 932 | static u64 get_error_address(struct mce *m) |
1076 | struct err_regs *info) | ||
1077 | { | 933 | { |
1078 | return (((u64) (info->nbeah & 0xff)) << 32) + | 934 | struct cpuinfo_x86 *c = &boot_cpu_data; |
1079 | (info->nbeal & ~0x03); | 935 | u64 addr; |
936 | u8 start_bit = 1; | ||
937 | u8 end_bit = 47; | ||
938 | |||
939 | if (c->x86 == 0xf) { | ||
940 | start_bit = 3; | ||
941 | end_bit = 39; | ||
942 | } | ||
943 | |||
944 | addr = m->addr & GENMASK(start_bit, end_bit); | ||
945 | |||
946 | /* | ||
947 | * Erratum 637 workaround | ||
948 | */ | ||
949 | if (c->x86 == 0x15) { | ||
950 | struct amd64_pvt *pvt; | ||
951 | u64 cc6_base, tmp_addr; | ||
952 | u32 tmp; | ||
953 | u8 mce_nid, intlv_en; | ||
954 | |||
955 | if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7) | ||
956 | return addr; | ||
957 | |||
958 | mce_nid = amd_get_nb_id(m->extcpu); | ||
959 | pvt = mcis[mce_nid]->pvt_info; | ||
960 | |||
961 | amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); | ||
962 | intlv_en = tmp >> 21 & 0x7; | ||
963 | |||
964 | /* add [47:27] + 3 trailing bits */ | ||
965 | cc6_base = (tmp & GENMASK(0, 20)) << 3; | ||
966 | |||
967 | /* reverse and add DramIntlvEn */ | ||
968 | cc6_base |= intlv_en ^ 0x7; | ||
969 | |||
970 | /* pin at [47:24] */ | ||
971 | cc6_base <<= 24; | ||
972 | |||
973 | if (!intlv_en) | ||
974 | return cc6_base | (addr & GENMASK(0, 23)); | ||
975 | |||
976 | amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); | ||
977 | |||
978 | /* faster log2 */ | ||
979 | tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1); | ||
980 | |||
981 | /* OR DramIntlvSel into bits [14:12] */ | ||
982 | tmp_addr |= (tmp & GENMASK(21, 23)) >> 9; | ||
983 | |||
984 | /* add remaining [11:0] bits from original MC4_ADDR */ | ||
985 | tmp_addr |= addr & GENMASK(0, 11); | ||
986 | |||
987 | return cc6_base | tmp_addr; | ||
988 | } | ||
989 | |||
990 | return addr; | ||
1080 | } | 991 | } |
1081 | 992 | ||
1082 | /* | 993 | static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) |
1083 | * Read the Base and Limit registers for K8 based Memory controllers; extract | ||
1084 | * fields from the 'raw' reg into separate data fields | ||
1085 | * | ||
1086 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN | ||
1087 | */ | ||
1088 | static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | ||
1089 | { | 994 | { |
1090 | u32 low; | 995 | struct cpuinfo_x86 *c = &boot_cpu_data; |
1091 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ | 996 | int off = range << 3; |
1092 | 997 | ||
1093 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); | 998 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); |
999 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); | ||
1094 | 1000 | ||
1095 | /* Extract parts into separate data entries */ | 1001 | if (c->x86 == 0xf) |
1096 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; | 1002 | return; |
1097 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; | ||
1098 | pvt->dram_rw_en[dram] = (low & 0x3); | ||
1099 | 1003 | ||
1100 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); | 1004 | if (!dram_rw(pvt, range)) |
1005 | return; | ||
1101 | 1006 | ||
1102 | /* | 1007 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); |
1103 | * Extract parts into separate data entries. Limit is the HIGHEST memory | 1008 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); |
1104 | * location of the region, so lower 24 bits need to be all ones | 1009 | |
1105 | */ | 1010 | /* Factor in CC6 save area by reading dst node's limit reg */ |
1106 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; | 1011 | if (c->x86 == 0x15) { |
1107 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; | 1012 | struct pci_dev *f1 = NULL; |
1108 | pvt->dram_DstNode[dram] = (low & 0x7); | 1013 | u8 nid = dram_dst_node(pvt, range); |
1014 | u32 llim; | ||
1015 | |||
1016 | f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1)); | ||
1017 | if (WARN_ON(!f1)) | ||
1018 | return; | ||
1019 | |||
1020 | amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); | ||
1021 | |||
1022 | pvt->ranges[range].lim.lo &= GENMASK(0, 15); | ||
1023 | |||
1024 | /* {[39:27],111b} */ | ||
1025 | pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; | ||
1026 | |||
1027 | pvt->ranges[range].lim.hi &= GENMASK(0, 7); | ||
1028 | |||
1029 | /* [47:40] */ | ||
1030 | pvt->ranges[range].lim.hi |= llim >> 13; | ||
1031 | |||
1032 | pci_dev_put(f1); | ||
1033 | } | ||
1109 | } | 1034 | } |
1110 | 1035 | ||
1111 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1036 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1112 | struct err_regs *err_info, u64 sys_addr) | 1037 | u16 syndrome) |
1113 | { | 1038 | { |
1114 | struct mem_ctl_info *src_mci; | 1039 | struct mem_ctl_info *src_mci; |
1040 | struct amd64_pvt *pvt = mci->pvt_info; | ||
1115 | int channel, csrow; | 1041 | int channel, csrow; |
1116 | u32 page, offset; | 1042 | u32 page, offset; |
1117 | u16 syndrome; | ||
1118 | |||
1119 | syndrome = extract_syndrome(err_info); | ||
1120 | 1043 | ||
1121 | /* CHIPKILL enabled */ | 1044 | /* CHIPKILL enabled */ |
1122 | if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { | 1045 | if (pvt->nbcfg & NBCFG_CHIPKILL) { |
1123 | channel = get_channel_from_ecc_syndrome(mci, syndrome); | 1046 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
1124 | if (channel < 0) { | 1047 | if (channel < 0) { |
1125 | /* | 1048 | /* |
@@ -1127,9 +1050,8 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1127 | * 2 DIMMs is in error. So we need to ID 'both' of them | 1050 | * 2 DIMMs is in error. So we need to ID 'both' of them |
1128 | * as suspect. | 1051 | * as suspect. |
1129 | */ | 1052 | */ |
1130 | amd64_mc_printk(mci, KERN_WARNING, | 1053 | amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " |
1131 | "unknown syndrome 0x%04x - possible " | 1054 | "error reporting race\n", syndrome); |
1132 | "error reporting race\n", syndrome); | ||
1133 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1055 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1134 | return; | 1056 | return; |
1135 | } | 1057 | } |
@@ -1151,8 +1073,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1151 | */ | 1073 | */ |
1152 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | 1074 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
1153 | if (!src_mci) { | 1075 | if (!src_mci) { |
1154 | amd64_mc_printk(mci, KERN_ERR, | 1076 | amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", |
1155 | "failed to map error address 0x%lx to a node\n", | ||
1156 | (unsigned long)sys_addr); | 1077 | (unsigned long)sys_addr); |
1157 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1078 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1158 | return; | 1079 | return; |
@@ -1170,18 +1091,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1170 | } | 1091 | } |
1171 | } | 1092 | } |
1172 | 1093 | ||
1173 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | 1094 | static int ddr2_cs_size(unsigned i, bool dct_width) |
1174 | { | 1095 | { |
1175 | int *dbam_map; | 1096 | unsigned shift = 0; |
1176 | 1097 | ||
1177 | if (pvt->ext_model >= K8_REV_F) | 1098 | if (i <= 2) |
1178 | dbam_map = ddr2_dbam; | 1099 | shift = i; |
1179 | else if (pvt->ext_model >= K8_REV_D) | 1100 | else if (!(i & 0x1)) |
1180 | dbam_map = ddr2_dbam_revD; | 1101 | shift = i >> 1; |
1181 | else | 1102 | else |
1182 | dbam_map = ddr2_dbam_revCG; | 1103 | shift = (i + 1) >> 1; |
1183 | 1104 | ||
1184 | return dbam_map[cs_mode]; | 1105 | return 128 << (shift + !!dct_width); |
1106 | } | ||
1107 | |||
1108 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, | ||
1109 | unsigned cs_mode) | ||
1110 | { | ||
1111 | u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; | ||
1112 | |||
1113 | if (pvt->ext_model >= K8_REV_F) { | ||
1114 | WARN_ON(cs_mode > 11); | ||
1115 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); | ||
1116 | } | ||
1117 | else if (pvt->ext_model >= K8_REV_D) { | ||
1118 | WARN_ON(cs_mode > 10); | ||
1119 | |||
1120 | if (cs_mode == 3 || cs_mode == 8) | ||
1121 | return 32 << (cs_mode - 1); | ||
1122 | else | ||
1123 | return 32 << cs_mode; | ||
1124 | } | ||
1125 | else { | ||
1126 | WARN_ON(cs_mode > 6); | ||
1127 | return 32 << cs_mode; | ||
1128 | } | ||
1185 | } | 1129 | } |
1186 | 1130 | ||
1187 | /* | 1131 | /* |
@@ -1192,17 +1136,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | |||
1192 | * Pass back: | 1136 | * Pass back: |
1193 | * contents of the DCL0_LOW register | 1137 | * contents of the DCL0_LOW register |
1194 | */ | 1138 | */ |
1195 | static int f10_early_channel_count(struct amd64_pvt *pvt) | 1139 | static int f1x_early_channel_count(struct amd64_pvt *pvt) |
1196 | { | 1140 | { |
1197 | int dbams[] = { DBAM0, DBAM1 }; | ||
1198 | int i, j, channels = 0; | 1141 | int i, j, channels = 0; |
1199 | u32 dbam; | ||
1200 | 1142 | ||
1201 | /* If we are in 128 bit mode, then we are using 2 channels */ | 1143 | /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ |
1202 | if (pvt->dclr0 & F10_WIDTH_128) { | 1144 | if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) |
1203 | channels = 2; | 1145 | return 2; |
1204 | return channels; | ||
1205 | } | ||
1206 | 1146 | ||
1207 | /* | 1147 | /* |
1208 | * Need to check if in unganged mode: In such, there are 2 channels, | 1148 | * Need to check if in unganged mode: In such, there are 2 channels, |
@@ -1219,9 +1159,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1219 | * is more than just one DIMM present in unganged mode. Need to check | 1159 | * is more than just one DIMM present in unganged mode. Need to check |
1220 | * both controllers since DIMMs can be placed in either one. | 1160 | * both controllers since DIMMs can be placed in either one. |
1221 | */ | 1161 | */ |
1222 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { | 1162 | for (i = 0; i < 2; i++) { |
1223 | if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) | 1163 | u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); |
1224 | goto err_reg; | ||
1225 | 1164 | ||
1226 | for (j = 0; j < 4; j++) { | 1165 | for (j = 0; j < 4; j++) { |
1227 | if (DBAM_DIMM(j, dbam) > 0) { | 1166 | if (DBAM_DIMM(j, dbam) > 0) { |
@@ -1234,248 +1173,194 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1234 | if (channels > 2) | 1173 | if (channels > 2) |
1235 | channels = 2; | 1174 | channels = 2; |
1236 | 1175 | ||
1237 | debugf0("MCT channel count: %d\n", channels); | 1176 | amd64_info("MCT channel count: %d\n", channels); |
1238 | 1177 | ||
1239 | return channels; | 1178 | return channels; |
1240 | |||
1241 | err_reg: | ||
1242 | return -1; | ||
1243 | |||
1244 | } | 1179 | } |
1245 | 1180 | ||
1246 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) | 1181 | static int ddr3_cs_size(unsigned i, bool dct_width) |
1247 | { | 1182 | { |
1248 | int *dbam_map; | 1183 | unsigned shift = 0; |
1184 | int cs_size = 0; | ||
1249 | 1185 | ||
1250 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | 1186 | if (i == 0 || i == 3 || i == 4) |
1251 | dbam_map = ddr3_dbam; | 1187 | cs_size = -1; |
1188 | else if (i <= 2) | ||
1189 | shift = i; | ||
1190 | else if (i == 12) | ||
1191 | shift = 7; | ||
1192 | else if (!(i & 0x1)) | ||
1193 | shift = i >> 1; | ||
1252 | else | 1194 | else |
1253 | dbam_map = ddr2_dbam; | 1195 | shift = (i + 1) >> 1; |
1254 | 1196 | ||
1255 | return dbam_map[cs_mode]; | 1197 | if (cs_size != -1) |
1256 | } | 1198 | cs_size = (128 * (1 << !!dct_width)) << shift; |
1257 | 1199 | ||
1258 | /* Enable extended configuration access via 0xCF8 feature */ | 1200 | return cs_size; |
1259 | static void amd64_setup(struct amd64_pvt *pvt) | ||
1260 | { | ||
1261 | u32 reg; | ||
1262 | |||
1263 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); | ||
1264 | |||
1265 | pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); | ||
1266 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | ||
1267 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | ||
1268 | } | 1201 | } |
1269 | 1202 | ||
1270 | /* Restore the extended configuration access via 0xCF8 feature */ | 1203 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, |
1271 | static void amd64_teardown(struct amd64_pvt *pvt) | 1204 | unsigned cs_mode) |
1272 | { | 1205 | { |
1273 | u32 reg; | 1206 | u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; |
1274 | |||
1275 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); | ||
1276 | 1207 | ||
1277 | reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; | 1208 | WARN_ON(cs_mode > 11); |
1278 | if (pvt->flags.cf8_extcfg) | ||
1279 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | ||
1280 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | ||
1281 | } | ||
1282 | 1209 | ||
1283 | static u64 f10_get_error_address(struct mem_ctl_info *mci, | 1210 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) |
1284 | struct err_regs *info) | 1211 | return ddr3_cs_size(cs_mode, dclr & WIDTH_128); |
1285 | { | 1212 | else |
1286 | return (((u64) (info->nbeah & 0xffff)) << 32) + | 1213 | return ddr2_cs_size(cs_mode, dclr & WIDTH_128); |
1287 | (info->nbeal & ~0x01); | ||
1288 | } | 1214 | } |
1289 | 1215 | ||
1290 | /* | 1216 | /* |
1291 | * Read the Base and Limit registers for F10 based Memory controllers. Extract | 1217 | * F15h supports only 64bit DCT interfaces |
1292 | * fields from the 'raw' reg into separate data fields. | ||
1293 | * | ||
1294 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. | ||
1295 | */ | 1218 | */ |
1296 | static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | 1219 | static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, |
1220 | unsigned cs_mode) | ||
1297 | { | 1221 | { |
1298 | u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; | 1222 | WARN_ON(cs_mode > 12); |
1299 | |||
1300 | low_offset = K8_DRAM_BASE_LOW + (dram << 3); | ||
1301 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); | ||
1302 | |||
1303 | /* read the 'raw' DRAM BASE Address register */ | ||
1304 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); | ||
1305 | 1223 | ||
1306 | /* Read from the ECS data register */ | 1224 | return ddr3_cs_size(cs_mode, false); |
1307 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base); | ||
1308 | |||
1309 | /* Extract parts into separate data entries */ | ||
1310 | pvt->dram_rw_en[dram] = (low_base & 0x3); | ||
1311 | |||
1312 | if (pvt->dram_rw_en[dram] == 0) | ||
1313 | return; | ||
1314 | |||
1315 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | ||
1316 | |||
1317 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | | ||
1318 | (((u64)low_base & 0xFFFF0000) << 8); | ||
1319 | |||
1320 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | ||
1321 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | ||
1322 | |||
1323 | /* read the 'raw' LIMIT registers */ | ||
1324 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); | ||
1325 | |||
1326 | /* Read from the ECS data register for the HIGH portion */ | ||
1327 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit); | ||
1328 | |||
1329 | pvt->dram_DstNode[dram] = (low_limit & 0x7); | ||
1330 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; | ||
1331 | |||
1332 | /* | ||
1333 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | ||
1334 | * memory location of the region, so low 24 bits need to be all ones. | ||
1335 | */ | ||
1336 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | | ||
1337 | (((u64) low_limit & 0xFFFF0000) << 8) | | ||
1338 | 0x00FFFFFF; | ||
1339 | } | 1225 | } |
1340 | 1226 | ||
1341 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | 1227 | static void read_dram_ctl_register(struct amd64_pvt *pvt) |
1342 | { | 1228 | { |
1343 | 1229 | ||
1344 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, | 1230 | if (boot_cpu_data.x86 == 0xf) |
1345 | &pvt->dram_ctl_select_low)) { | 1231 | return; |
1346 | debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " | 1232 | |
1347 | "High range addresses at: 0x%x\n", | 1233 | if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { |
1348 | pvt->dram_ctl_select_low, | 1234 | debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", |
1349 | dct_sel_baseaddr(pvt)); | 1235 | pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); |
1350 | 1236 | ||
1351 | debugf0(" DCT mode: %s, All DCTs on: %s\n", | 1237 | debugf0(" DCTs operate in %s mode.\n", |
1352 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), | 1238 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); |
1353 | (dct_dram_enabled(pvt) ? "yes" : "no")); | ||
1354 | 1239 | ||
1355 | if (!dct_ganging_enabled(pvt)) | 1240 | if (!dct_ganging_enabled(pvt)) |
1356 | debugf0(" Address range split per DCT: %s\n", | 1241 | debugf0(" Address range split per DCT: %s\n", |
1357 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | 1242 | (dct_high_range_enabled(pvt) ? "yes" : "no")); |
1358 | 1243 | ||
1359 | debugf0(" DCT data interleave for ECC: %s, " | 1244 | debugf0(" data interleave for ECC: %s, " |
1360 | "DRAM cleared since last warm reset: %s\n", | 1245 | "DRAM cleared since last warm reset: %s\n", |
1361 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | 1246 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), |
1362 | (dct_memory_cleared(pvt) ? "yes" : "no")); | 1247 | (dct_memory_cleared(pvt) ? "yes" : "no")); |
1363 | 1248 | ||
1364 | debugf0(" DCT channel interleave: %s, " | 1249 | debugf0(" channel interleave: %s, " |
1365 | "DCT interleave bits selector: 0x%x\n", | 1250 | "interleave bits selector: 0x%x\n", |
1366 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | 1251 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), |
1367 | dct_sel_interleave_addr(pvt)); | 1252 | dct_sel_interleave_addr(pvt)); |
1368 | } | 1253 | } |
1369 | 1254 | ||
1370 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, | 1255 | amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); |
1371 | &pvt->dram_ctl_select_high); | ||
1372 | } | 1256 | } |
1373 | 1257 | ||
1374 | /* | 1258 | /* |
1375 | * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory | 1259 | * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory |
1376 | * Interleaving Modes. | 1260 | * Interleaving Modes. |
1377 | */ | 1261 | */ |
1378 | static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, | 1262 | static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, |
1379 | int hi_range_sel, u32 intlv_en) | 1263 | bool hi_range_sel, u8 intlv_en) |
1380 | { | 1264 | { |
1381 | u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; | 1265 | u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; |
1382 | 1266 | ||
1383 | if (dct_ganging_enabled(pvt)) | 1267 | if (dct_ganging_enabled(pvt)) |
1384 | cs = 0; | 1268 | return 0; |
1385 | else if (hi_range_sel) | ||
1386 | cs = dct_sel_high; | ||
1387 | else if (dct_interleave_enabled(pvt)) { | ||
1388 | /* | ||
1389 | * see F2x110[DctSelIntLvAddr] - channel interleave mode | ||
1390 | */ | ||
1391 | if (dct_sel_interleave_addr(pvt) == 0) | ||
1392 | cs = sys_addr >> 6 & 1; | ||
1393 | else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { | ||
1394 | temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; | ||
1395 | 1269 | ||
1396 | if (dct_sel_interleave_addr(pvt) & 1) | 1270 | if (hi_range_sel) |
1397 | cs = (sys_addr >> 9 & 1) ^ temp; | 1271 | return dct_sel_high; |
1398 | else | ||
1399 | cs = (sys_addr >> 6 & 1) ^ temp; | ||
1400 | } else if (intlv_en & 4) | ||
1401 | cs = sys_addr >> 15 & 1; | ||
1402 | else if (intlv_en & 2) | ||
1403 | cs = sys_addr >> 14 & 1; | ||
1404 | else if (intlv_en & 1) | ||
1405 | cs = sys_addr >> 13 & 1; | ||
1406 | else | ||
1407 | cs = sys_addr >> 12 & 1; | ||
1408 | } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) | ||
1409 | cs = ~dct_sel_high & 1; | ||
1410 | else | ||
1411 | cs = 0; | ||
1412 | 1272 | ||
1413 | return cs; | 1273 | /* |
1414 | } | 1274 | * see F2x110[DctSelIntLvAddr] - channel interleave mode |
1275 | */ | ||
1276 | if (dct_interleave_enabled(pvt)) { | ||
1277 | u8 intlv_addr = dct_sel_interleave_addr(pvt); | ||
1415 | 1278 | ||
1416 | static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) | 1279 | /* return DCT select function: 0=DCT0, 1=DCT1 */ |
1417 | { | 1280 | if (!intlv_addr) |
1418 | if (intlv_en == 1) | 1281 | return sys_addr >> 6 & 1; |
1419 | return 1; | 1282 | |
1420 | else if (intlv_en == 3) | 1283 | if (intlv_addr & 0x2) { |
1421 | return 2; | 1284 | u8 shift = intlv_addr & 0x1 ? 9 : 6; |
1422 | else if (intlv_en == 7) | 1285 | u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; |
1423 | return 3; | 1286 | |
1287 | return ((sys_addr >> shift) & 1) ^ temp; | ||
1288 | } | ||
1289 | |||
1290 | return (sys_addr >> (12 + hweight8(intlv_en))) & 1; | ||
1291 | } | ||
1292 | |||
1293 | if (dct_high_range_enabled(pvt)) | ||
1294 | return ~dct_sel_high & 1; | ||
1424 | 1295 | ||
1425 | return 0; | 1296 | return 0; |
1426 | } | 1297 | } |
1427 | 1298 | ||
1428 | /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ | 1299 | /* Convert the sys_addr to the normalized DCT address */ |
1429 | static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, | 1300 | static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range, |
1430 | u32 dct_sel_base_addr, | 1301 | u64 sys_addr, bool hi_rng, |
1431 | u64 dct_sel_base_off, | 1302 | u32 dct_sel_base_addr) |
1432 | u32 hole_valid, u32 hole_off, | ||
1433 | u64 dram_base) | ||
1434 | { | 1303 | { |
1435 | u64 chan_off; | 1304 | u64 chan_off; |
1305 | u64 dram_base = get_dram_base(pvt, range); | ||
1306 | u64 hole_off = f10_dhar_offset(pvt); | ||
1307 | u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16; | ||
1436 | 1308 | ||
1437 | if (hi_range_sel) { | 1309 | if (hi_rng) { |
1438 | if (!(dct_sel_base_addr & 0xFFFF0000) && | 1310 | /* |
1439 | hole_valid && (sys_addr >= 0x100000000ULL)) | 1311 | * if |
1440 | chan_off = hole_off << 16; | 1312 | * base address of high range is below 4Gb |
1313 | * (bits [47:27] at [31:11]) | ||
1314 | * DRAM address space on this DCT is hoisted above 4Gb && | ||
1315 | * sys_addr > 4Gb | ||
1316 | * | ||
1317 | * remove hole offset from sys_addr | ||
1318 | * else | ||
1319 | * remove high range offset from sys_addr | ||
1320 | */ | ||
1321 | if ((!(dct_sel_base_addr >> 16) || | ||
1322 | dct_sel_base_addr < dhar_base(pvt)) && | ||
1323 | dhar_valid(pvt) && | ||
1324 | (sys_addr >= BIT_64(32))) | ||
1325 | chan_off = hole_off; | ||
1441 | else | 1326 | else |
1442 | chan_off = dct_sel_base_off; | 1327 | chan_off = dct_sel_base_off; |
1443 | } else { | 1328 | } else { |
1444 | if (hole_valid && (sys_addr >= 0x100000000ULL)) | 1329 | /* |
1445 | chan_off = hole_off << 16; | 1330 | * if |
1331 | * we have a valid hole && | ||
1332 | * sys_addr > 4Gb | ||
1333 | * | ||
1334 | * remove hole | ||
1335 | * else | ||
1336 | * remove dram base to normalize to DCT address | ||
1337 | */ | ||
1338 | if (dhar_valid(pvt) && (sys_addr >= BIT_64(32))) | ||
1339 | chan_off = hole_off; | ||
1446 | else | 1340 | else |
1447 | chan_off = dram_base & 0xFFFFF8000000ULL; | 1341 | chan_off = dram_base; |
1448 | } | 1342 | } |
1449 | 1343 | ||
1450 | return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - | 1344 | return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47)); |
1451 | (chan_off & 0x0000FFFFFF800000ULL); | ||
1452 | } | 1345 | } |
1453 | 1346 | ||
1454 | /* Hack for the time being - Can we get this from BIOS?? */ | ||
1455 | #define CH0SPARE_RANK 0 | ||
1456 | #define CH1SPARE_RANK 1 | ||
1457 | |||
1458 | /* | 1347 | /* |
1459 | * checks if the csrow passed in is marked as SPARED, if so returns the new | 1348 | * checks if the csrow passed in is marked as SPARED, if so returns the new |
1460 | * spare row | 1349 | * spare row |
1461 | */ | 1350 | */ |
1462 | static inline int f10_process_possible_spare(int csrow, | 1351 | static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow) |
1463 | u32 cs, struct amd64_pvt *pvt) | 1352 | { |
1464 | { | 1353 | int tmp_cs; |
1465 | u32 swap_done; | 1354 | |
1466 | u32 bad_dram_cs; | 1355 | if (online_spare_swap_done(pvt, dct) && |
1467 | 1356 | csrow == online_spare_bad_dramcs(pvt, dct)) { | |
1468 | /* Depending on channel, isolate respective SPARING info */ | 1357 | |
1469 | if (cs) { | 1358 | for_each_chip_select(tmp_cs, dct, pvt) { |
1470 | swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); | 1359 | if (chip_select_base(tmp_cs, dct, pvt) & 0x2) { |
1471 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); | 1360 | csrow = tmp_cs; |
1472 | if (swap_done && (csrow == bad_dram_cs)) | 1361 | break; |
1473 | csrow = CH1SPARE_RANK; | 1362 | } |
1474 | } else { | 1363 | } |
1475 | swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); | ||
1476 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); | ||
1477 | if (swap_done && (csrow == bad_dram_cs)) | ||
1478 | csrow = CH0SPARE_RANK; | ||
1479 | } | 1364 | } |
1480 | return csrow; | 1365 | return csrow; |
1481 | } | 1366 | } |
@@ -1488,53 +1373,39 @@ static inline int f10_process_possible_spare(int csrow, | |||
1488 | * -EINVAL: NOT FOUND | 1373 | * -EINVAL: NOT FOUND |
1489 | * 0..csrow = Chip-Select Row | 1374 | * 0..csrow = Chip-Select Row |
1490 | */ | 1375 | */ |
1491 | static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | 1376 | static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) |
1492 | { | 1377 | { |
1493 | struct mem_ctl_info *mci; | 1378 | struct mem_ctl_info *mci; |
1494 | struct amd64_pvt *pvt; | 1379 | struct amd64_pvt *pvt; |
1495 | u32 cs_base, cs_mask; | 1380 | u64 cs_base, cs_mask; |
1496 | int cs_found = -EINVAL; | 1381 | int cs_found = -EINVAL; |
1497 | int csrow; | 1382 | int csrow; |
1498 | 1383 | ||
1499 | mci = mci_lookup[nid]; | 1384 | mci = mcis[nid]; |
1500 | if (!mci) | 1385 | if (!mci) |
1501 | return cs_found; | 1386 | return cs_found; |
1502 | 1387 | ||
1503 | pvt = mci->pvt_info; | 1388 | pvt = mci->pvt_info; |
1504 | 1389 | ||
1505 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | 1390 | debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); |
1506 | |||
1507 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { | ||
1508 | 1391 | ||
1509 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | 1392 | for_each_chip_select(csrow, dct, pvt) { |
1510 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | 1393 | if (!csrow_enabled(csrow, dct, pvt)) |
1511 | continue; | 1394 | continue; |
1512 | 1395 | ||
1513 | /* | 1396 | get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); |
1514 | * We have an ENABLED CSROW, Isolate just the MASK bits of the | ||
1515 | * target: [28:19] and [13:5], which map to [36:27] and [21:13] | ||
1516 | * of the actual address. | ||
1517 | */ | ||
1518 | cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; | ||
1519 | |||
1520 | /* | ||
1521 | * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and | ||
1522 | * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) | ||
1523 | */ | ||
1524 | cs_mask = amd64_get_dct_mask(pvt, cs, csrow); | ||
1525 | 1397 | ||
1526 | debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", | 1398 | debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", |
1527 | csrow, cs_base, cs_mask); | 1399 | csrow, cs_base, cs_mask); |
1528 | 1400 | ||
1529 | cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; | 1401 | cs_mask = ~cs_mask; |
1530 | 1402 | ||
1531 | debugf1(" Final CSMask=0x%x\n", cs_mask); | 1403 | debugf1(" (InputAddr & ~CSMask)=0x%llx " |
1532 | debugf1(" (InputAddr & ~CSMask)=0x%x " | 1404 | "(CSBase & ~CSMask)=0x%llx\n", |
1533 | "(CSBase & ~CSMask)=0x%x\n", | 1405 | (in_addr & cs_mask), (cs_base & cs_mask)); |
1534 | (in_addr & ~cs_mask), (cs_base & ~cs_mask)); | ||
1535 | 1406 | ||
1536 | if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { | 1407 | if ((in_addr & cs_mask) == (cs_base & cs_mask)) { |
1537 | cs_found = f10_process_possible_spare(csrow, cs, pvt); | 1408 | cs_found = f10_process_possible_spare(pvt, dct, csrow); |
1538 | 1409 | ||
1539 | debugf1(" MATCH csrow=%d\n", cs_found); | 1410 | debugf1(" MATCH csrow=%d\n", cs_found); |
1540 | break; | 1411 | break; |
@@ -1543,38 +1414,71 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |||
1543 | return cs_found; | 1414 | return cs_found; |
1544 | } | 1415 | } |
1545 | 1416 | ||
1546 | /* For a given @dram_range, check if @sys_addr falls within it. */ | 1417 | /* |
1547 | static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | 1418 | * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is |
1548 | u64 sys_addr, int *nid, int *chan_sel) | 1419 | * swapped with a region located at the bottom of memory so that the GPU can use |
1420 | * the interleaved region and thus two channels. | ||
1421 | */ | ||
1422 | static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) | ||
1549 | { | 1423 | { |
1550 | int node_id, cs_found = -EINVAL, high_range = 0; | 1424 | u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; |
1551 | u32 intlv_en, intlv_sel, intlv_shift, hole_off; | ||
1552 | u32 hole_valid, tmp, dct_sel_base, channel; | ||
1553 | u64 dram_base, chan_addr, dct_sel_base_off; | ||
1554 | 1425 | ||
1555 | dram_base = pvt->dram_base[dram_range]; | 1426 | if (boot_cpu_data.x86 == 0x10) { |
1556 | intlv_en = pvt->dram_IntlvEn[dram_range]; | 1427 | /* only revC3 and revE have that feature */ |
1428 | if (boot_cpu_data.x86_model < 4 || | ||
1429 | (boot_cpu_data.x86_model < 0xa && | ||
1430 | boot_cpu_data.x86_mask < 3)) | ||
1431 | return sys_addr; | ||
1432 | } | ||
1557 | 1433 | ||
1558 | node_id = pvt->dram_DstNode[dram_range]; | 1434 | amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); |
1559 | intlv_sel = pvt->dram_IntlvSel[dram_range]; | ||
1560 | 1435 | ||
1561 | debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", | 1436 | if (!(swap_reg & 0x1)) |
1562 | dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); | 1437 | return sys_addr; |
1563 | 1438 | ||
1564 | /* | 1439 | swap_base = (swap_reg >> 3) & 0x7f; |
1565 | * This assumes that one node's DHAR is the same as all the other | 1440 | swap_limit = (swap_reg >> 11) & 0x7f; |
1566 | * nodes' DHAR. | 1441 | rgn_size = (swap_reg >> 20) & 0x7f; |
1567 | */ | 1442 | tmp_addr = sys_addr >> 27; |
1568 | hole_off = (pvt->dhar & 0x0000FF80); | 1443 | |
1569 | hole_valid = (pvt->dhar & 0x1); | 1444 | if (!(sys_addr >> 34) && |
1570 | dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; | 1445 | (((tmp_addr >= swap_base) && |
1446 | (tmp_addr <= swap_limit)) || | ||
1447 | (tmp_addr < rgn_size))) | ||
1448 | return sys_addr ^ (u64)swap_base << 27; | ||
1571 | 1449 | ||
1572 | debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", | 1450 | return sys_addr; |
1573 | hole_off, hole_valid, intlv_sel); | 1451 | } |
1574 | 1452 | ||
1575 | if (intlv_en || | 1453 | /* For a given @dram_range, check if @sys_addr falls within it. */ |
1576 | (intlv_sel != ((sys_addr >> 12) & intlv_en))) | 1454 | static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, |
1455 | u64 sys_addr, int *nid, int *chan_sel) | ||
1456 | { | ||
1457 | int cs_found = -EINVAL; | ||
1458 | u64 chan_addr; | ||
1459 | u32 dct_sel_base; | ||
1460 | u8 channel; | ||
1461 | bool high_range = false; | ||
1462 | |||
1463 | u8 node_id = dram_dst_node(pvt, range); | ||
1464 | u8 intlv_en = dram_intlv_en(pvt, range); | ||
1465 | u32 intlv_sel = dram_intlv_sel(pvt, range); | ||
1466 | |||
1467 | debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", | ||
1468 | range, sys_addr, get_dram_limit(pvt, range)); | ||
1469 | |||
1470 | if (dhar_valid(pvt) && | ||
1471 | dhar_base(pvt) <= sys_addr && | ||
1472 | sys_addr < BIT_64(32)) { | ||
1473 | amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", | ||
1474 | sys_addr); | ||
1577 | return -EINVAL; | 1475 | return -EINVAL; |
1476 | } | ||
1477 | |||
1478 | if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) | ||
1479 | return -EINVAL; | ||
1480 | |||
1481 | sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); | ||
1578 | 1482 | ||
1579 | dct_sel_base = dct_sel_baseaddr(pvt); | 1483 | dct_sel_base = dct_sel_baseaddr(pvt); |
1580 | 1484 | ||
@@ -1585,38 +1489,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |||
1585 | if (dct_high_range_enabled(pvt) && | 1489 | if (dct_high_range_enabled(pvt) && |
1586 | !dct_ganging_enabled(pvt) && | 1490 | !dct_ganging_enabled(pvt) && |
1587 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) | 1491 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) |
1588 | high_range = 1; | 1492 | high_range = true; |
1589 | 1493 | ||
1590 | channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); | 1494 | channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en); |
1591 | 1495 | ||
1592 | chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, | 1496 | chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr, |
1593 | dct_sel_base_off, hole_valid, | 1497 | high_range, dct_sel_base); |
1594 | hole_off, dram_base); | ||
1595 | 1498 | ||
1596 | intlv_shift = f10_map_intlv_en_to_shift(intlv_en); | 1499 | /* Remove node interleaving, see F1x120 */ |
1500 | if (intlv_en) | ||
1501 | chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) | | ||
1502 | (chan_addr & 0xfff); | ||
1597 | 1503 | ||
1598 | /* remove Node ID (in case of memory interleaving) */ | 1504 | /* remove channel interleave */ |
1599 | tmp = chan_addr & 0xFC0; | ||
1600 | |||
1601 | chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; | ||
1602 | |||
1603 | /* remove channel interleave and hash */ | ||
1604 | if (dct_interleave_enabled(pvt) && | 1505 | if (dct_interleave_enabled(pvt) && |
1605 | !dct_high_range_enabled(pvt) && | 1506 | !dct_high_range_enabled(pvt) && |
1606 | !dct_ganging_enabled(pvt)) { | 1507 | !dct_ganging_enabled(pvt)) { |
1607 | if (dct_sel_interleave_addr(pvt) != 1) | 1508 | |
1608 | chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; | 1509 | if (dct_sel_interleave_addr(pvt) != 1) { |
1609 | else { | 1510 | if (dct_sel_interleave_addr(pvt) == 0x3) |
1610 | tmp = chan_addr & 0xFC0; | 1511 | /* hash 9 */ |
1611 | chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) | 1512 | chan_addr = ((chan_addr >> 10) << 9) | |
1612 | | tmp; | 1513 | (chan_addr & 0x1ff); |
1613 | } | 1514 | else |
1515 | /* A[6] or hash 6 */ | ||
1516 | chan_addr = ((chan_addr >> 7) << 6) | | ||
1517 | (chan_addr & 0x3f); | ||
1518 | } else | ||
1519 | /* A[12] */ | ||
1520 | chan_addr = ((chan_addr >> 13) << 12) | | ||
1521 | (chan_addr & 0xfff); | ||
1614 | } | 1522 | } |
1615 | 1523 | ||
1616 | debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", | 1524 | debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); |
1617 | chan_addr, (u32)(chan_addr >> 8)); | ||
1618 | 1525 | ||
1619 | cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); | 1526 | cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); |
1620 | 1527 | ||
1621 | if (cs_found >= 0) { | 1528 | if (cs_found >= 0) { |
1622 | *nid = node_id; | 1529 | *nid = node_id; |
@@ -1625,23 +1532,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |||
1625 | return cs_found; | 1532 | return cs_found; |
1626 | } | 1533 | } |
1627 | 1534 | ||
1628 | static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | 1535 | static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, |
1629 | int *node, int *chan_sel) | 1536 | int *node, int *chan_sel) |
1630 | { | 1537 | { |
1631 | int dram_range, cs_found = -EINVAL; | 1538 | int cs_found = -EINVAL; |
1632 | u64 dram_base, dram_limit; | 1539 | unsigned range; |
1633 | 1540 | ||
1634 | for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { | 1541 | for (range = 0; range < DRAM_RANGES; range++) { |
1635 | 1542 | ||
1636 | if (!pvt->dram_rw_en[dram_range]) | 1543 | if (!dram_rw(pvt, range)) |
1637 | continue; | 1544 | continue; |
1638 | 1545 | ||
1639 | dram_base = pvt->dram_base[dram_range]; | 1546 | if ((get_dram_base(pvt, range) <= sys_addr) && |
1640 | dram_limit = pvt->dram_limit[dram_range]; | 1547 | (get_dram_limit(pvt, range) >= sys_addr)) { |
1641 | |||
1642 | if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { | ||
1643 | 1548 | ||
1644 | cs_found = f10_match_to_this_node(pvt, dram_range, | 1549 | cs_found = f1x_match_to_this_node(pvt, range, |
1645 | sys_addr, node, | 1550 | sys_addr, node, |
1646 | chan_sel); | 1551 | chan_sel); |
1647 | if (cs_found >= 0) | 1552 | if (cs_found >= 0) |
@@ -1658,16 +1563,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |||
1658 | * The @sys_addr is usually an error address received from the hardware | 1563 | * The @sys_addr is usually an error address received from the hardware |
1659 | * (MCX_ADDR). | 1564 | * (MCX_ADDR). |
1660 | */ | 1565 | */ |
1661 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1566 | static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, |
1662 | struct err_regs *err_info, | 1567 | u16 syndrome) |
1663 | u64 sys_addr) | ||
1664 | { | 1568 | { |
1665 | struct amd64_pvt *pvt = mci->pvt_info; | 1569 | struct amd64_pvt *pvt = mci->pvt_info; |
1666 | u32 page, offset; | 1570 | u32 page, offset; |
1667 | int nid, csrow, chan = 0; | 1571 | int nid, csrow, chan = 0; |
1668 | u16 syndrome; | ||
1669 | 1572 | ||
1670 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | 1573 | csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); |
1671 | 1574 | ||
1672 | if (csrow < 0) { | 1575 | if (csrow < 0) { |
1673 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1576 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
@@ -1676,14 +1579,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1676 | 1579 | ||
1677 | error_address_to_page_and_offset(sys_addr, &page, &offset); | 1580 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
1678 | 1581 | ||
1679 | syndrome = extract_syndrome(err_info); | ||
1680 | |||
1681 | /* | 1582 | /* |
1682 | * We need the syndromes for channel detection only when we're | 1583 | * We need the syndromes for channel detection only when we're |
1683 | * ganged. Otherwise @chan should already contain the channel at | 1584 | * ganged. Otherwise @chan should already contain the channel at |
1684 | * this point. | 1585 | * this point. |
1685 | */ | 1586 | */ |
1686 | if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) | 1587 | if (dct_ganging_enabled(pvt)) |
1687 | chan = get_channel_from_ecc_syndrome(mci, syndrome); | 1588 | chan = get_channel_from_ecc_syndrome(mci, syndrome); |
1688 | 1589 | ||
1689 | if (chan >= 0) | 1590 | if (chan >= 0) |
@@ -1700,16 +1601,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1700 | 1601 | ||
1701 | /* | 1602 | /* |
1702 | * debug routine to display the memory sizes of all logical DIMMs and its | 1603 | * debug routine to display the memory sizes of all logical DIMMs and its |
1703 | * CSROWs as well | 1604 | * CSROWs |
1704 | */ | 1605 | */ |
1705 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | 1606 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) |
1706 | { | 1607 | { |
1707 | int dimm, size0, size1, factor = 0; | 1608 | int dimm, size0, size1, factor = 0; |
1708 | u32 dbam; | 1609 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; |
1709 | u32 *dcsb; | 1610 | u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; |
1710 | 1611 | ||
1711 | if (boot_cpu_data.x86 == 0xf) { | 1612 | if (boot_cpu_data.x86 == 0xf) { |
1712 | if (pvt->dclr0 & F10_WIDTH_128) | 1613 | if (pvt->dclr0 & WIDTH_128) |
1713 | factor = 1; | 1614 | factor = 1; |
1714 | 1615 | ||
1715 | /* K8 families < revF not supported yet */ | 1616 | /* K8 families < revF not supported yet */ |
@@ -1719,11 +1620,11 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1719 | WARN_ON(ctrl != 0); | 1620 | WARN_ON(ctrl != 0); |
1720 | } | 1621 | } |
1721 | 1622 | ||
1722 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | 1623 | dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; |
1723 | ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); | 1624 | dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases |
1625 | : pvt->csels[0].csbases; | ||
1724 | 1626 | ||
1725 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | 1627 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); |
1726 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; | ||
1727 | 1628 | ||
1728 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); | 1629 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); |
1729 | 1630 | ||
@@ -1731,67 +1632,53 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1731 | for (dimm = 0; dimm < 4; dimm++) { | 1632 | for (dimm = 0; dimm < 4; dimm++) { |
1732 | 1633 | ||
1733 | size0 = 0; | 1634 | size0 = 0; |
1734 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) | 1635 | if (dcsb[dimm*2] & DCSB_CS_ENABLE) |
1735 | size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); | 1636 | size0 = pvt->ops->dbam_to_cs(pvt, ctrl, |
1637 | DBAM_DIMM(dimm, dbam)); | ||
1736 | 1638 | ||
1737 | size1 = 0; | 1639 | size1 = 0; |
1738 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | 1640 | if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) |
1739 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); | 1641 | size1 = pvt->ops->dbam_to_cs(pvt, ctrl, |
1642 | DBAM_DIMM(dimm, dbam)); | ||
1740 | 1643 | ||
1741 | edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", | 1644 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
1742 | dimm * 2, size0 << factor, | 1645 | dimm * 2, size0 << factor, |
1743 | dimm * 2 + 1, size1 << factor); | 1646 | dimm * 2 + 1, size1 << factor); |
1744 | } | 1647 | } |
1745 | } | 1648 | } |
1746 | 1649 | ||
1747 | /* | ||
1748 | * There currently are 3 types type of MC devices for AMD Athlon/Opterons | ||
1749 | * (as per PCI DEVICE_IDs): | ||
1750 | * | ||
1751 | * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI | ||
1752 | * DEVICE ID, even though there is differences between the different Revisions | ||
1753 | * (CG,D,E,F). | ||
1754 | * | ||
1755 | * Family F10h and F11h. | ||
1756 | * | ||
1757 | */ | ||
1758 | static struct amd64_family_type amd64_family_types[] = { | 1650 | static struct amd64_family_type amd64_family_types[] = { |
1759 | [K8_CPUS] = { | 1651 | [K8_CPUS] = { |
1760 | .ctl_name = "RevF", | 1652 | .ctl_name = "K8", |
1761 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, | 1653 | .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, |
1762 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, | 1654 | .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, |
1763 | .ops = { | 1655 | .ops = { |
1764 | .early_channel_count = k8_early_channel_count, | 1656 | .early_channel_count = k8_early_channel_count, |
1765 | .get_error_address = k8_get_error_address, | ||
1766 | .read_dram_base_limit = k8_read_dram_base_limit, | ||
1767 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, | 1657 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, |
1768 | .dbam_to_cs = k8_dbam_to_chip_select, | 1658 | .dbam_to_cs = k8_dbam_to_chip_select, |
1659 | .read_dct_pci_cfg = k8_read_dct_pci_cfg, | ||
1769 | } | 1660 | } |
1770 | }, | 1661 | }, |
1771 | [F10_CPUS] = { | 1662 | [F10_CPUS] = { |
1772 | .ctl_name = "Family 10h", | 1663 | .ctl_name = "F10h", |
1773 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, | 1664 | .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, |
1774 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, | 1665 | .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, |
1775 | .ops = { | 1666 | .ops = { |
1776 | .early_channel_count = f10_early_channel_count, | 1667 | .early_channel_count = f1x_early_channel_count, |
1777 | .get_error_address = f10_get_error_address, | 1668 | .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, |
1778 | .read_dram_base_limit = f10_read_dram_base_limit, | ||
1779 | .read_dram_ctl_register = f10_read_dram_ctl_register, | ||
1780 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | ||
1781 | .dbam_to_cs = f10_dbam_to_chip_select, | 1669 | .dbam_to_cs = f10_dbam_to_chip_select, |
1670 | .read_dct_pci_cfg = f10_read_dct_pci_cfg, | ||
1782 | } | 1671 | } |
1783 | }, | 1672 | }, |
1784 | [F11_CPUS] = { | 1673 | [F15_CPUS] = { |
1785 | .ctl_name = "Family 11h", | 1674 | .ctl_name = "F15h", |
1786 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, | 1675 | .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, |
1787 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, | 1676 | .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3, |
1788 | .ops = { | 1677 | .ops = { |
1789 | .early_channel_count = f10_early_channel_count, | 1678 | .early_channel_count = f1x_early_channel_count, |
1790 | .get_error_address = f10_get_error_address, | 1679 | .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, |
1791 | .read_dram_base_limit = f10_read_dram_base_limit, | 1680 | .dbam_to_cs = f15_dbam_to_chip_select, |
1792 | .read_dram_ctl_register = f10_read_dram_ctl_register, | 1681 | .read_dct_pci_cfg = f15_read_dct_pci_cfg, |
1793 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | ||
1794 | .dbam_to_cs = f10_dbam_to_chip_select, | ||
1795 | } | 1682 | } |
1796 | }, | 1683 | }, |
1797 | }; | 1684 | }; |
@@ -1881,15 +1768,15 @@ static u16 x8_vectors[] = { | |||
1881 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, | 1768 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, |
1882 | }; | 1769 | }; |
1883 | 1770 | ||
1884 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | 1771 | static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, |
1885 | int v_dim) | 1772 | unsigned v_dim) |
1886 | { | 1773 | { |
1887 | unsigned int i, err_sym; | 1774 | unsigned int i, err_sym; |
1888 | 1775 | ||
1889 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { | 1776 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { |
1890 | u16 s = syndrome; | 1777 | u16 s = syndrome; |
1891 | int v_idx = err_sym * v_dim; | 1778 | unsigned v_idx = err_sym * v_dim; |
1892 | int v_end = (err_sym + 1) * v_dim; | 1779 | unsigned v_end = (err_sym + 1) * v_dim; |
1893 | 1780 | ||
1894 | /* walk over all 16 bits of the syndrome */ | 1781 | /* walk over all 16 bits of the syndrome */ |
1895 | for (i = 1; i < (1U << 16); i <<= 1) { | 1782 | for (i = 1; i < (1U << 16); i <<= 1) { |
@@ -1961,54 +1848,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |||
1961 | struct amd64_pvt *pvt = mci->pvt_info; | 1848 | struct amd64_pvt *pvt = mci->pvt_info; |
1962 | int err_sym = -1; | 1849 | int err_sym = -1; |
1963 | 1850 | ||
1964 | if (pvt->syn_type == 8) | 1851 | if (pvt->ecc_sym_sz == 8) |
1965 | err_sym = decode_syndrome(syndrome, x8_vectors, | 1852 | err_sym = decode_syndrome(syndrome, x8_vectors, |
1966 | ARRAY_SIZE(x8_vectors), | 1853 | ARRAY_SIZE(x8_vectors), |
1967 | pvt->syn_type); | 1854 | pvt->ecc_sym_sz); |
1968 | else if (pvt->syn_type == 4) | 1855 | else if (pvt->ecc_sym_sz == 4) |
1969 | err_sym = decode_syndrome(syndrome, x4_vectors, | 1856 | err_sym = decode_syndrome(syndrome, x4_vectors, |
1970 | ARRAY_SIZE(x4_vectors), | 1857 | ARRAY_SIZE(x4_vectors), |
1971 | pvt->syn_type); | 1858 | pvt->ecc_sym_sz); |
1972 | else { | 1859 | else { |
1973 | amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n", | 1860 | amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); |
1974 | __func__, pvt->syn_type); | ||
1975 | return err_sym; | 1861 | return err_sym; |
1976 | } | 1862 | } |
1977 | 1863 | ||
1978 | return map_err_sym_to_channel(err_sym, pvt->syn_type); | 1864 | return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); |
1979 | } | 1865 | } |
1980 | 1866 | ||
1981 | /* | 1867 | /* |
1982 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR | 1868 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR |
1983 | * ADDRESS and process. | 1869 | * ADDRESS and process. |
1984 | */ | 1870 | */ |
1985 | static void amd64_handle_ce(struct mem_ctl_info *mci, | 1871 | static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) |
1986 | struct err_regs *info) | ||
1987 | { | 1872 | { |
1988 | struct amd64_pvt *pvt = mci->pvt_info; | 1873 | struct amd64_pvt *pvt = mci->pvt_info; |
1989 | u64 sys_addr; | 1874 | u64 sys_addr; |
1875 | u16 syndrome; | ||
1990 | 1876 | ||
1991 | /* Ensure that the Error Address is VALID */ | 1877 | /* Ensure that the Error Address is VALID */ |
1992 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | 1878 | if (!(m->status & MCI_STATUS_ADDRV)) { |
1993 | amd64_mc_printk(mci, KERN_ERR, | 1879 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
1994 | "HW has no ERROR_ADDRESS available\n"); | ||
1995 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1880 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1996 | return; | 1881 | return; |
1997 | } | 1882 | } |
1998 | 1883 | ||
1999 | sys_addr = pvt->ops->get_error_address(mci, info); | 1884 | sys_addr = get_error_address(m); |
1885 | syndrome = extract_syndrome(m->status); | ||
2000 | 1886 | ||
2001 | amd64_mc_printk(mci, KERN_ERR, | 1887 | amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
2002 | "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); | ||
2003 | 1888 | ||
2004 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); | 1889 | pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome); |
2005 | } | 1890 | } |
2006 | 1891 | ||
2007 | /* Handle any Un-correctable Errors (UEs) */ | 1892 | /* Handle any Un-correctable Errors (UEs) */ |
2008 | static void amd64_handle_ue(struct mem_ctl_info *mci, | 1893 | static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) |
2009 | struct err_regs *info) | ||
2010 | { | 1894 | { |
2011 | struct amd64_pvt *pvt = mci->pvt_info; | ||
2012 | struct mem_ctl_info *log_mci, *src_mci = NULL; | 1895 | struct mem_ctl_info *log_mci, *src_mci = NULL; |
2013 | int csrow; | 1896 | int csrow; |
2014 | u64 sys_addr; | 1897 | u64 sys_addr; |
@@ -2016,14 +1899,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2016 | 1899 | ||
2017 | log_mci = mci; | 1900 | log_mci = mci; |
2018 | 1901 | ||
2019 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | 1902 | if (!(m->status & MCI_STATUS_ADDRV)) { |
2020 | amd64_mc_printk(mci, KERN_CRIT, | 1903 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
2021 | "HW has no ERROR_ADDRESS available\n"); | ||
2022 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1904 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2023 | return; | 1905 | return; |
2024 | } | 1906 | } |
2025 | 1907 | ||
2026 | sys_addr = pvt->ops->get_error_address(mci, info); | 1908 | sys_addr = get_error_address(m); |
2027 | 1909 | ||
2028 | /* | 1910 | /* |
2029 | * Find out which node the error address belongs to. This may be | 1911 | * Find out which node the error address belongs to. This may be |
@@ -2031,9 +1913,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2031 | */ | 1913 | */ |
2032 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | 1914 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
2033 | if (!src_mci) { | 1915 | if (!src_mci) { |
2034 | amd64_mc_printk(mci, KERN_CRIT, | 1916 | amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", |
2035 | "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", | 1917 | (unsigned long)sys_addr); |
2036 | (unsigned long)sys_addr); | ||
2037 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1918 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2038 | return; | 1919 | return; |
2039 | } | 1920 | } |
@@ -2042,9 +1923,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2042 | 1923 | ||
2043 | csrow = sys_addr_to_csrow(log_mci, sys_addr); | 1924 | csrow = sys_addr_to_csrow(log_mci, sys_addr); |
2044 | if (csrow < 0) { | 1925 | if (csrow < 0) { |
2045 | amd64_mc_printk(mci, KERN_CRIT, | 1926 | amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", |
2046 | "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", | 1927 | (unsigned long)sys_addr); |
2047 | (unsigned long)sys_addr); | ||
2048 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1928 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2049 | } else { | 1929 | } else { |
2050 | error_address_to_page_and_offset(sys_addr, &page, &offset); | 1930 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
@@ -2053,14 +1933,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2053 | } | 1933 | } |
2054 | 1934 | ||
2055 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | 1935 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, |
2056 | struct err_regs *info) | 1936 | struct mce *m) |
2057 | { | 1937 | { |
2058 | u32 ec = ERROR_CODE(info->nbsl); | 1938 | u16 ec = EC(m->status); |
2059 | u32 xec = EXT_ERROR_CODE(info->nbsl); | 1939 | u8 xec = XEC(m->status, 0x1f); |
2060 | int ecc_type = (info->nbsh >> 13) & 0x3; | 1940 | u8 ecc_type = (m->status >> 45) & 0x3; |
2061 | 1941 | ||
2062 | /* Bail early out if this was an 'observed' error */ | 1942 | /* Bail early out if this was an 'observed' error */ |
2063 | if (PP(ec) == K8_NBSL_PP_OBS) | 1943 | if (PP(ec) == NBSL_PP_OBS) |
2064 | return; | 1944 | return; |
2065 | 1945 | ||
2066 | /* Do only ECC errors */ | 1946 | /* Do only ECC errors */ |
@@ -2068,103 +1948,68 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | |||
2068 | return; | 1948 | return; |
2069 | 1949 | ||
2070 | if (ecc_type == 2) | 1950 | if (ecc_type == 2) |
2071 | amd64_handle_ce(mci, info); | 1951 | amd64_handle_ce(mci, m); |
2072 | else if (ecc_type == 1) | 1952 | else if (ecc_type == 1) |
2073 | amd64_handle_ue(mci, info); | 1953 | amd64_handle_ue(mci, m); |
2074 | } | 1954 | } |
2075 | 1955 | ||
2076 | void amd64_decode_bus_error(int node_id, struct err_regs *regs) | 1956 | void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) |
2077 | { | 1957 | { |
2078 | struct mem_ctl_info *mci = mci_lookup[node_id]; | 1958 | struct mem_ctl_info *mci = mcis[node_id]; |
2079 | |||
2080 | __amd64_decode_bus_error(mci, regs); | ||
2081 | |||
2082 | /* | ||
2083 | * Check the UE bit of the NB status high register, if set generate some | ||
2084 | * logs. If NOT a GART error, then process the event as a NO-INFO event. | ||
2085 | * If it was a GART error, skip that process. | ||
2086 | * | ||
2087 | * FIXME: this should go somewhere else, if at all. | ||
2088 | */ | ||
2089 | if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors) | ||
2090 | edac_mc_handle_ue_no_info(mci, "UE bit is set"); | ||
2091 | 1959 | ||
1960 | __amd64_decode_bus_error(mci, m); | ||
2092 | } | 1961 | } |
2093 | 1962 | ||
2094 | /* | 1963 | /* |
2095 | * Input: | 1964 | * Use pvt->F2 which contains the F2 CPU PCI device to get the related |
2096 | * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer | 1965 | * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. |
2097 | * 2) AMD Family index value | ||
2098 | * | ||
2099 | * Ouput: | ||
2100 | * Upon return of 0, the following filled in: | ||
2101 | * | ||
2102 | * struct pvt->addr_f1_ctl | ||
2103 | * struct pvt->misc_f3_ctl | ||
2104 | * | ||
2105 | * Filled in with related device funcitions of 'dram_f2_ctl' | ||
2106 | * These devices are "reserved" via the pci_get_device() | ||
2107 | * | ||
2108 | * Upon return of 1 (error status): | ||
2109 | * | ||
2110 | * Nothing reserved | ||
2111 | */ | 1966 | */ |
2112 | static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx) | 1967 | static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id) |
2113 | { | 1968 | { |
2114 | const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx]; | ||
2115 | |||
2116 | /* Reserve the ADDRESS MAP Device */ | 1969 | /* Reserve the ADDRESS MAP Device */ |
2117 | pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | 1970 | pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); |
2118 | amd64_dev->addr_f1_ctl, | 1971 | if (!pvt->F1) { |
2119 | pvt->dram_f2_ctl); | 1972 | amd64_err("error address map device not found: " |
2120 | 1973 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2121 | if (!pvt->addr_f1_ctl) { | 1974 | PCI_VENDOR_ID_AMD, f1_id); |
2122 | amd64_printk(KERN_ERR, "error address map device not found: " | 1975 | return -ENODEV; |
2123 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
2124 | PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl); | ||
2125 | return 1; | ||
2126 | } | 1976 | } |
2127 | 1977 | ||
2128 | /* Reserve the MISC Device */ | 1978 | /* Reserve the MISC Device */ |
2129 | pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | 1979 | pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2); |
2130 | amd64_dev->misc_f3_ctl, | 1980 | if (!pvt->F3) { |
2131 | pvt->dram_f2_ctl); | 1981 | pci_dev_put(pvt->F1); |
1982 | pvt->F1 = NULL; | ||
2132 | 1983 | ||
2133 | if (!pvt->misc_f3_ctl) { | 1984 | amd64_err("error F3 device not found: " |
2134 | pci_dev_put(pvt->addr_f1_ctl); | 1985 | "vendor %x device 0x%x (broken BIOS?)\n", |
2135 | pvt->addr_f1_ctl = NULL; | 1986 | PCI_VENDOR_ID_AMD, f3_id); |
2136 | 1987 | ||
2137 | amd64_printk(KERN_ERR, "error miscellaneous device not found: " | 1988 | return -ENODEV; |
2138 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
2139 | PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl); | ||
2140 | return 1; | ||
2141 | } | 1989 | } |
2142 | 1990 | debugf1("F1: %s\n", pci_name(pvt->F1)); | |
2143 | debugf1(" Addr Map device PCI Bus ID:\t%s\n", | 1991 | debugf1("F2: %s\n", pci_name(pvt->F2)); |
2144 | pci_name(pvt->addr_f1_ctl)); | 1992 | debugf1("F3: %s\n", pci_name(pvt->F3)); |
2145 | debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n", | ||
2146 | pci_name(pvt->dram_f2_ctl)); | ||
2147 | debugf1(" Misc device PCI Bus ID:\t%s\n", | ||
2148 | pci_name(pvt->misc_f3_ctl)); | ||
2149 | 1993 | ||
2150 | return 0; | 1994 | return 0; |
2151 | } | 1995 | } |
2152 | 1996 | ||
2153 | static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) | 1997 | static void free_mc_sibling_devs(struct amd64_pvt *pvt) |
2154 | { | 1998 | { |
2155 | pci_dev_put(pvt->addr_f1_ctl); | 1999 | pci_dev_put(pvt->F1); |
2156 | pci_dev_put(pvt->misc_f3_ctl); | 2000 | pci_dev_put(pvt->F3); |
2157 | } | 2001 | } |
2158 | 2002 | ||
2159 | /* | 2003 | /* |
2160 | * Retrieve the hardware registers of the memory controller (this includes the | 2004 | * Retrieve the hardware registers of the memory controller (this includes the |
2161 | * 'Address Map' and 'Misc' device regs) | 2005 | * 'Address Map' and 'Misc' device regs) |
2162 | */ | 2006 | */ |
2163 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) | 2007 | static void read_mc_regs(struct amd64_pvt *pvt) |
2164 | { | 2008 | { |
2009 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
2165 | u64 msr_val; | 2010 | u64 msr_val; |
2166 | u32 tmp; | 2011 | u32 tmp; |
2167 | int dram; | 2012 | unsigned range; |
2168 | 2013 | ||
2169 | /* | 2014 | /* |
2170 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | 2015 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since |
@@ -2181,78 +2026,66 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
2181 | } else | 2026 | } else |
2182 | debugf0(" TOP_MEM2 disabled.\n"); | 2027 | debugf0(" TOP_MEM2 disabled.\n"); |
2183 | 2028 | ||
2184 | amd64_cpu_display_info(pvt); | 2029 | amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); |
2185 | 2030 | ||
2186 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); | 2031 | read_dram_ctl_register(pvt); |
2187 | 2032 | ||
2188 | if (pvt->ops->read_dram_ctl_register) | 2033 | for (range = 0; range < DRAM_RANGES; range++) { |
2189 | pvt->ops->read_dram_ctl_register(pvt); | 2034 | u8 rw; |
2190 | 2035 | ||
2191 | for (dram = 0; dram < DRAM_REG_COUNT; dram++) { | 2036 | /* read settings for this DRAM range */ |
2192 | /* | 2037 | read_dram_base_limit_regs(pvt, range); |
2193 | * Call CPU specific READ function to get the DRAM Base and | ||
2194 | * Limit values from the DCT. | ||
2195 | */ | ||
2196 | pvt->ops->read_dram_base_limit(pvt, dram); | ||
2197 | 2038 | ||
2198 | /* | 2039 | rw = dram_rw(pvt, range); |
2199 | * Only print out debug info on rows with both R and W Enabled. | 2040 | if (!rw) |
2200 | * Normal processing, compiler should optimize this whole 'if' | 2041 | continue; |
2201 | * debug output block away. | 2042 | |
2202 | */ | 2043 | debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", |
2203 | if (pvt->dram_rw_en[dram] != 0) { | 2044 | range, |
2204 | debugf1(" DRAM-BASE[%d]: 0x%016llx " | 2045 | get_dram_base(pvt, range), |
2205 | "DRAM-LIMIT: 0x%016llx\n", | 2046 | get_dram_limit(pvt, range)); |
2206 | dram, | 2047 | |
2207 | pvt->dram_base[dram], | 2048 | debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", |
2208 | pvt->dram_limit[dram]); | 2049 | dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", |
2209 | 2050 | (rw & 0x1) ? "R" : "-", | |
2210 | debugf1(" IntlvEn=%s %s %s " | 2051 | (rw & 0x2) ? "W" : "-", |
2211 | "IntlvSel=%d DstNode=%d\n", | 2052 | dram_intlv_sel(pvt, range), |
2212 | pvt->dram_IntlvEn[dram] ? | 2053 | dram_dst_node(pvt, range)); |
2213 | "Enabled" : "Disabled", | ||
2214 | (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", | ||
2215 | (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", | ||
2216 | pvt->dram_IntlvSel[dram], | ||
2217 | pvt->dram_DstNode[dram]); | ||
2218 | } | ||
2219 | } | 2054 | } |
2220 | 2055 | ||
2221 | amd64_read_dct_base_mask(pvt); | 2056 | read_dct_base_mask(pvt); |
2222 | 2057 | ||
2223 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); | 2058 | amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); |
2224 | amd64_read_dbam_reg(pvt); | 2059 | amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); |
2225 | 2060 | ||
2226 | amd64_read_pci_cfg(pvt->misc_f3_ctl, | 2061 | amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); |
2227 | F10_ONLINE_SPARE, &pvt->online_spare); | ||
2228 | 2062 | ||
2229 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | 2063 | amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); |
2230 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); | 2064 | amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); |
2231 | 2065 | ||
2232 | if (boot_cpu_data.x86 >= 0x10) { | 2066 | if (!dct_ganging_enabled(pvt)) { |
2233 | if (!dct_ganging_enabled(pvt)) { | 2067 | amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); |
2234 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); | 2068 | amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); |
2235 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); | ||
2236 | } | ||
2237 | amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp); | ||
2238 | } | 2069 | } |
2239 | 2070 | ||
2240 | if (boot_cpu_data.x86 == 0x10 && | 2071 | pvt->ecc_sym_sz = 4; |
2241 | boot_cpu_data.x86_model > 7 && | 2072 | |
2242 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ | 2073 | if (c->x86 >= 0x10) { |
2243 | tmp & BIT(25)) | 2074 | amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); |
2244 | pvt->syn_type = 8; | 2075 | amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); |
2245 | else | ||
2246 | pvt->syn_type = 4; | ||
2247 | 2076 | ||
2248 | amd64_dump_misc_regs(pvt); | 2077 | /* F10h, revD and later can do x8 ECC too */ |
2078 | if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) | ||
2079 | pvt->ecc_sym_sz = 8; | ||
2080 | } | ||
2081 | dump_misc_regs(pvt); | ||
2249 | } | 2082 | } |
2250 | 2083 | ||
2251 | /* | 2084 | /* |
2252 | * NOTE: CPU Revision Dependent code | 2085 | * NOTE: CPU Revision Dependent code |
2253 | * | 2086 | * |
2254 | * Input: | 2087 | * Input: |
2255 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) | 2088 | * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1) |
2256 | * k8 private pointer to --> | 2089 | * k8 private pointer to --> |
2257 | * DRAM Bank Address mapping register | 2090 | * DRAM Bank Address mapping register |
2258 | * node_id | 2091 | * node_id |
@@ -2282,7 +2115,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
2282 | * encompasses | 2115 | * encompasses |
2283 | * | 2116 | * |
2284 | */ | 2117 | */ |
2285 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | 2118 | static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) |
2286 | { | 2119 | { |
2287 | u32 cs_mode, nr_pages; | 2120 | u32 cs_mode, nr_pages; |
2288 | 2121 | ||
@@ -2295,7 +2128,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |||
2295 | */ | 2128 | */ |
2296 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; | 2129 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
2297 | 2130 | ||
2298 | nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); | 2131 | nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); |
2299 | 2132 | ||
2300 | /* | 2133 | /* |
2301 | * If dual channel then double the memory size of single channel. | 2134 | * If dual channel then double the memory size of single channel. |
@@ -2314,26 +2147,26 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |||
2314 | * Initialize the array of csrow attribute instances, based on the values | 2147 | * Initialize the array of csrow attribute instances, based on the values |
2315 | * from pci config hardware registers. | 2148 | * from pci config hardware registers. |
2316 | */ | 2149 | */ |
2317 | static int amd64_init_csrows(struct mem_ctl_info *mci) | 2150 | static int init_csrows(struct mem_ctl_info *mci) |
2318 | { | 2151 | { |
2319 | struct csrow_info *csrow; | 2152 | struct csrow_info *csrow; |
2320 | struct amd64_pvt *pvt; | 2153 | struct amd64_pvt *pvt = mci->pvt_info; |
2321 | u64 input_addr_min, input_addr_max, sys_addr; | 2154 | u64 input_addr_min, input_addr_max, sys_addr, base, mask; |
2155 | u32 val; | ||
2322 | int i, empty = 1; | 2156 | int i, empty = 1; |
2323 | 2157 | ||
2324 | pvt = mci->pvt_info; | 2158 | amd64_read_pci_cfg(pvt->F3, NBCFG, &val); |
2325 | 2159 | ||
2326 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); | 2160 | pvt->nbcfg = val; |
2327 | 2161 | ||
2328 | debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, | 2162 | debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", |
2329 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2163 | pvt->mc_node_id, val, |
2330 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" | 2164 | !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); |
2331 | ); | ||
2332 | 2165 | ||
2333 | for (i = 0; i < pvt->cs_count; i++) { | 2166 | for_each_chip_select(i, 0, pvt) { |
2334 | csrow = &mci->csrows[i]; | 2167 | csrow = &mci->csrows[i]; |
2335 | 2168 | ||
2336 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | 2169 | if (!csrow_enabled(i, 0, pvt)) { |
2337 | debugf1("----CSROW %d EMPTY for node %d\n", i, | 2170 | debugf1("----CSROW %d EMPTY for node %d\n", i, |
2338 | pvt->mc_node_id); | 2171 | pvt->mc_node_id); |
2339 | continue; | 2172 | continue; |
@@ -2343,16 +2176,18 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2343 | i, pvt->mc_node_id); | 2176 | i, pvt->mc_node_id); |
2344 | 2177 | ||
2345 | empty = 0; | 2178 | empty = 0; |
2346 | csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); | 2179 | csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); |
2347 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); | 2180 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); |
2348 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); | 2181 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); |
2349 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); | 2182 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); |
2350 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); | 2183 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); |
2351 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); | 2184 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); |
2352 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); | 2185 | |
2186 | get_cs_base_and_mask(pvt, i, 0, &base, &mask); | ||
2187 | csrow->page_mask = ~mask; | ||
2353 | /* 8 bytes of resolution */ | 2188 | /* 8 bytes of resolution */ |
2354 | 2189 | ||
2355 | csrow->mtype = amd64_determine_memory_type(pvt); | 2190 | csrow->mtype = amd64_determine_memory_type(pvt, i); |
2356 | 2191 | ||
2357 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); | 2192 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); |
2358 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", | 2193 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", |
@@ -2368,9 +2203,9 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2368 | /* | 2203 | /* |
2369 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating | 2204 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating |
2370 | */ | 2205 | */ |
2371 | if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) | 2206 | if (pvt->nbcfg & NBCFG_ECC_ENABLE) |
2372 | csrow->edac_mode = | 2207 | csrow->edac_mode = |
2373 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? | 2208 | (pvt->nbcfg & NBCFG_CHIPKILL) ? |
2374 | EDAC_S4ECD4ED : EDAC_SECDED; | 2209 | EDAC_S4ECD4ED : EDAC_SECDED; |
2375 | else | 2210 | else |
2376 | csrow->edac_mode = EDAC_NONE; | 2211 | csrow->edac_mode = EDAC_NONE; |
@@ -2380,7 +2215,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2380 | } | 2215 | } |
2381 | 2216 | ||
2382 | /* get all cores on this DCT */ | 2217 | /* get all cores on this DCT */ |
2383 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | 2218 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid) |
2384 | { | 2219 | { |
2385 | int cpu; | 2220 | int cpu; |
2386 | 2221 | ||
@@ -2390,15 +2225,14 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | |||
2390 | } | 2225 | } |
2391 | 2226 | ||
2392 | /* check MCG_CTL on all the cpus on this node */ | 2227 | /* check MCG_CTL on all the cpus on this node */ |
2393 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | 2228 | static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) |
2394 | { | 2229 | { |
2395 | cpumask_var_t mask; | 2230 | cpumask_var_t mask; |
2396 | int cpu, nbe; | 2231 | int cpu, nbe; |
2397 | bool ret = false; | 2232 | bool ret = false; |
2398 | 2233 | ||
2399 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | 2234 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { |
2400 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | 2235 | amd64_warn("%s: Error allocating mask\n", __func__); |
2401 | __func__); | ||
2402 | return false; | 2236 | return false; |
2403 | } | 2237 | } |
2404 | 2238 | ||
@@ -2408,7 +2242,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) | |||
2408 | 2242 | ||
2409 | for_each_cpu(cpu, mask) { | 2243 | for_each_cpu(cpu, mask) { |
2410 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 2244 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2411 | nbe = reg->l & K8_MSR_MCGCTL_NBE; | 2245 | nbe = reg->l & MSR_MCGCTL_NBE; |
2412 | 2246 | ||
2413 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | 2247 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", |
2414 | cpu, reg->q, | 2248 | cpu, reg->q, |
@@ -2424,18 +2258,17 @@ out: | |||
2424 | return ret; | 2258 | return ret; |
2425 | } | 2259 | } |
2426 | 2260 | ||
2427 | static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | 2261 | static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) |
2428 | { | 2262 | { |
2429 | cpumask_var_t cmask; | 2263 | cpumask_var_t cmask; |
2430 | int cpu; | 2264 | int cpu; |
2431 | 2265 | ||
2432 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | 2266 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { |
2433 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | 2267 | amd64_warn("%s: error allocating mask\n", __func__); |
2434 | __func__); | ||
2435 | return false; | 2268 | return false; |
2436 | } | 2269 | } |
2437 | 2270 | ||
2438 | get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); | 2271 | get_cpus_on_this_dct_cpumask(cmask, nid); |
2439 | 2272 | ||
2440 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | 2273 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); |
2441 | 2274 | ||
@@ -2444,16 +2277,16 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |||
2444 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 2277 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2445 | 2278 | ||
2446 | if (on) { | 2279 | if (on) { |
2447 | if (reg->l & K8_MSR_MCGCTL_NBE) | 2280 | if (reg->l & MSR_MCGCTL_NBE) |
2448 | pvt->flags.nb_mce_enable = 1; | 2281 | s->flags.nb_mce_enable = 1; |
2449 | 2282 | ||
2450 | reg->l |= K8_MSR_MCGCTL_NBE; | 2283 | reg->l |= MSR_MCGCTL_NBE; |
2451 | } else { | 2284 | } else { |
2452 | /* | 2285 | /* |
2453 | * Turn off NB MCE reporting only when it was off before | 2286 | * Turn off NB MCE reporting only when it was off before |
2454 | */ | 2287 | */ |
2455 | if (!pvt->flags.nb_mce_enable) | 2288 | if (!s->flags.nb_mce_enable) |
2456 | reg->l &= ~K8_MSR_MCGCTL_NBE; | 2289 | reg->l &= ~MSR_MCGCTL_NBE; |
2457 | } | 2290 | } |
2458 | } | 2291 | } |
2459 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | 2292 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); |
@@ -2463,92 +2296,90 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |||
2463 | return 0; | 2296 | return 0; |
2464 | } | 2297 | } |
2465 | 2298 | ||
2466 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | 2299 | static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, |
2300 | struct pci_dev *F3) | ||
2467 | { | 2301 | { |
2468 | struct amd64_pvt *pvt = mci->pvt_info; | 2302 | bool ret = true; |
2469 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | 2303 | u32 value, mask = 0x3; /* UECC/CECC enable */ |
2304 | |||
2305 | if (toggle_ecc_err_reporting(s, nid, ON)) { | ||
2306 | amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); | ||
2307 | return false; | ||
2308 | } | ||
2470 | 2309 | ||
2471 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); | 2310 | amd64_read_pci_cfg(F3, NBCTL, &value); |
2472 | 2311 | ||
2473 | /* turn on UECCn and CECCEn bits */ | 2312 | s->old_nbctl = value & mask; |
2474 | pvt->old_nbctl = value & mask; | 2313 | s->nbctl_valid = true; |
2475 | pvt->nbctl_mcgctl_saved = 1; | ||
2476 | 2314 | ||
2477 | value |= mask; | 2315 | value |= mask; |
2478 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | 2316 | amd64_write_pci_cfg(F3, NBCTL, value); |
2479 | 2317 | ||
2480 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) | 2318 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2481 | amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " | ||
2482 | "MCGCTL!\n"); | ||
2483 | 2319 | ||
2484 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2320 | debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2321 | nid, value, !!(value & NBCFG_ECC_ENABLE)); | ||
2485 | 2322 | ||
2486 | debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | 2323 | if (!(value & NBCFG_ECC_ENABLE)) { |
2487 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2324 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); |
2488 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | ||
2489 | 2325 | ||
2490 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2326 | s->flags.nb_ecc_prev = 0; |
2491 | amd64_printk(KERN_WARNING, | ||
2492 | "This node reports that DRAM ECC is " | ||
2493 | "currently Disabled; ENABLING now\n"); | ||
2494 | |||
2495 | pvt->flags.nb_ecc_prev = 0; | ||
2496 | 2327 | ||
2497 | /* Attempt to turn on DRAM ECC Enable */ | 2328 | /* Attempt to turn on DRAM ECC Enable */ |
2498 | value |= K8_NBCFG_ECC_ENABLE; | 2329 | value |= NBCFG_ECC_ENABLE; |
2499 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | 2330 | amd64_write_pci_cfg(F3, NBCFG, value); |
2500 | 2331 | ||
2501 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2332 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2502 | 2333 | ||
2503 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2334 | if (!(value & NBCFG_ECC_ENABLE)) { |
2504 | amd64_printk(KERN_WARNING, | 2335 | amd64_warn("Hardware rejected DRAM ECC enable," |
2505 | "Hardware rejects Enabling DRAM ECC checking\n" | 2336 | "check memory DIMM configuration.\n"); |
2506 | "Check memory DIMM configuration\n"); | 2337 | ret = false; |
2507 | } else { | 2338 | } else { |
2508 | amd64_printk(KERN_DEBUG, | 2339 | amd64_info("Hardware accepted DRAM ECC Enable\n"); |
2509 | "Hardware accepted DRAM ECC Enable\n"); | ||
2510 | } | 2340 | } |
2511 | } else { | 2341 | } else { |
2512 | pvt->flags.nb_ecc_prev = 1; | 2342 | s->flags.nb_ecc_prev = 1; |
2513 | } | 2343 | } |
2514 | 2344 | ||
2515 | debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | 2345 | debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", |
2516 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2346 | nid, value, !!(value & NBCFG_ECC_ENABLE)); |
2517 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | ||
2518 | 2347 | ||
2519 | pvt->ctl_error_info.nbcfg = value; | 2348 | return ret; |
2520 | } | 2349 | } |
2521 | 2350 | ||
2522 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | 2351 | static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, |
2352 | struct pci_dev *F3) | ||
2523 | { | 2353 | { |
2524 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | 2354 | u32 value, mask = 0x3; /* UECC/CECC enable */ |
2525 | 2355 | ||
2526 | if (!pvt->nbctl_mcgctl_saved) | 2356 | |
2357 | if (!s->nbctl_valid) | ||
2527 | return; | 2358 | return; |
2528 | 2359 | ||
2529 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); | 2360 | amd64_read_pci_cfg(F3, NBCTL, &value); |
2530 | value &= ~mask; | 2361 | value &= ~mask; |
2531 | value |= pvt->old_nbctl; | 2362 | value |= s->old_nbctl; |
2532 | 2363 | ||
2533 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | 2364 | amd64_write_pci_cfg(F3, NBCTL, value); |
2534 | 2365 | ||
2535 | /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */ | 2366 | /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ |
2536 | if (!pvt->flags.nb_ecc_prev) { | 2367 | if (!s->flags.nb_ecc_prev) { |
2537 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2368 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2538 | value &= ~K8_NBCFG_ECC_ENABLE; | 2369 | value &= ~NBCFG_ECC_ENABLE; |
2539 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | 2370 | amd64_write_pci_cfg(F3, NBCFG, value); |
2540 | } | 2371 | } |
2541 | 2372 | ||
2542 | /* restore the NB Enable MCGCTL bit */ | 2373 | /* restore the NB Enable MCGCTL bit */ |
2543 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) | 2374 | if (toggle_ecc_err_reporting(s, nid, OFF)) |
2544 | amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n"); | 2375 | amd64_warn("Error restoring NB MCGCTL settings!\n"); |
2545 | } | 2376 | } |
2546 | 2377 | ||
2547 | /* | 2378 | /* |
2548 | * EDAC requires that the BIOS have ECC enabled before taking over the | 2379 | * EDAC requires that the BIOS have ECC enabled before |
2549 | * processing of ECC errors. This is because the BIOS can properly initialize | 2380 | * taking over the processing of ECC errors. A command line |
2550 | * the memory system completely. A command line option allows to force-enable | 2381 | * option allows to force-enable hardware ECC later in |
2551 | * hardware ECC later in amd64_enable_ecc_error_reporting(). | 2382 | * enable_ecc_error_reporting(). |
2552 | */ | 2383 | */ |
2553 | static const char *ecc_msg = | 2384 | static const char *ecc_msg = |
2554 | "ECC disabled in the BIOS or no ECC capability, module will not load.\n" | 2385 | "ECC disabled in the BIOS or no ECC capability, module will not load.\n" |
@@ -2556,38 +2387,28 @@ static const char *ecc_msg = | |||
2556 | "'ecc_enable_override'.\n" | 2387 | "'ecc_enable_override'.\n" |
2557 | " (Note that use of the override may cause unknown side effects.)\n"; | 2388 | " (Note that use of the override may cause unknown side effects.)\n"; |
2558 | 2389 | ||
2559 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | 2390 | static bool ecc_enabled(struct pci_dev *F3, u8 nid) |
2560 | { | 2391 | { |
2561 | u32 value; | 2392 | u32 value; |
2562 | u8 ecc_enabled = 0; | 2393 | u8 ecc_en = 0; |
2563 | bool nb_mce_en = false; | 2394 | bool nb_mce_en = false; |
2564 | 2395 | ||
2565 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2396 | amd64_read_pci_cfg(F3, NBCFG, &value); |
2566 | 2397 | ||
2567 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | 2398 | ecc_en = !!(value & NBCFG_ECC_ENABLE); |
2568 | if (!ecc_enabled) | 2399 | amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); |
2569 | amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " | ||
2570 | "is currently disabled, set F3x%x[22] (%s).\n", | ||
2571 | K8_NBCFG, pci_name(pvt->misc_f3_ctl)); | ||
2572 | else | ||
2573 | amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n"); | ||
2574 | 2400 | ||
2575 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); | 2401 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); |
2576 | if (!nb_mce_en) | 2402 | if (!nb_mce_en) |
2577 | amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " | 2403 | amd64_notice("NB MCE bank disabled, set MSR " |
2578 | "0x%08x[4] on node %d to enable.\n", | 2404 | "0x%08x[4] on node %d to enable.\n", |
2579 | MSR_IA32_MCG_CTL, pvt->mc_node_id); | 2405 | MSR_IA32_MCG_CTL, nid); |
2580 | 2406 | ||
2581 | if (!ecc_enabled || !nb_mce_en) { | 2407 | if (!ecc_en || !nb_mce_en) { |
2582 | if (!ecc_enable_override) { | 2408 | amd64_notice("%s", ecc_msg); |
2583 | amd64_printk(KERN_NOTICE, "%s", ecc_msg); | 2409 | return false; |
2584 | return -ENODEV; | ||
2585 | } else { | ||
2586 | amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n"); | ||
2587 | } | ||
2588 | } | 2410 | } |
2589 | 2411 | return true; | |
2590 | return 0; | ||
2591 | } | 2412 | } |
2592 | 2413 | ||
2593 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + | 2414 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + |
@@ -2596,39 +2417,41 @@ struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + | |||
2596 | 2417 | ||
2597 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; | 2418 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; |
2598 | 2419 | ||
2599 | static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) | 2420 | static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) |
2600 | { | 2421 | { |
2601 | unsigned int i = 0, j = 0; | 2422 | unsigned int i = 0, j = 0; |
2602 | 2423 | ||
2603 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) | 2424 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) |
2604 | sysfs_attrs[i] = amd64_dbg_attrs[i]; | 2425 | sysfs_attrs[i] = amd64_dbg_attrs[i]; |
2605 | 2426 | ||
2606 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) | 2427 | if (boot_cpu_data.x86 >= 0x10) |
2607 | sysfs_attrs[i] = amd64_inj_attrs[j]; | 2428 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) |
2429 | sysfs_attrs[i] = amd64_inj_attrs[j]; | ||
2608 | 2430 | ||
2609 | sysfs_attrs[i] = terminator; | 2431 | sysfs_attrs[i] = terminator; |
2610 | 2432 | ||
2611 | mci->mc_driver_sysfs_attributes = sysfs_attrs; | 2433 | mci->mc_driver_sysfs_attributes = sysfs_attrs; |
2612 | } | 2434 | } |
2613 | 2435 | ||
2614 | static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | 2436 | static void setup_mci_misc_attrs(struct mem_ctl_info *mci, |
2437 | struct amd64_family_type *fam) | ||
2615 | { | 2438 | { |
2616 | struct amd64_pvt *pvt = mci->pvt_info; | 2439 | struct amd64_pvt *pvt = mci->pvt_info; |
2617 | 2440 | ||
2618 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; | 2441 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; |
2619 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | 2442 | mci->edac_ctl_cap = EDAC_FLAG_NONE; |
2620 | 2443 | ||
2621 | if (pvt->nbcap & K8_NBCAP_SECDED) | 2444 | if (pvt->nbcap & NBCAP_SECDED) |
2622 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; | 2445 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; |
2623 | 2446 | ||
2624 | if (pvt->nbcap & K8_NBCAP_CHIPKILL) | 2447 | if (pvt->nbcap & NBCAP_CHIPKILL) |
2625 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; | 2448 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; |
2626 | 2449 | ||
2627 | mci->edac_cap = amd64_determine_edac_cap(pvt); | 2450 | mci->edac_cap = amd64_determine_edac_cap(pvt); |
2628 | mci->mod_name = EDAC_MOD_STR; | 2451 | mci->mod_name = EDAC_MOD_STR; |
2629 | mci->mod_ver = EDAC_AMD64_VERSION; | 2452 | mci->mod_ver = EDAC_AMD64_VERSION; |
2630 | mci->ctl_name = get_amd_family_name(pvt->mc_type_index); | 2453 | mci->ctl_name = fam->ctl_name; |
2631 | mci->dev_name = pci_name(pvt->dram_f2_ctl); | 2454 | mci->dev_name = pci_name(pvt->F2); |
2632 | mci->ctl_page_to_phys = NULL; | 2455 | mci->ctl_page_to_phys = NULL; |
2633 | 2456 | ||
2634 | /* memory scrubber interface */ | 2457 | /* memory scrubber interface */ |
@@ -2637,111 +2460,96 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | |||
2637 | } | 2460 | } |
2638 | 2461 | ||
2639 | /* | 2462 | /* |
2640 | * Init stuff for this DRAM Controller device. | 2463 | * returns a pointer to the family descriptor on success, NULL otherwise. |
2641 | * | ||
2642 | * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration | ||
2643 | * Space feature MUST be enabled on ALL Processors prior to actually reading | ||
2644 | * from the ECS registers. Since the loading of the module can occur on any | ||
2645 | * 'core', and cores don't 'see' all the other processors ECS data when the | ||
2646 | * others are NOT enabled. Our solution is to first enable ECS access in this | ||
2647 | * routine on all processors, gather some data in a amd64_pvt structure and | ||
2648 | * later come back in a finish-setup function to perform that final | ||
2649 | * initialization. See also amd64_init_2nd_stage() for that. | ||
2650 | */ | 2464 | */ |
2651 | static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, | 2465 | static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) |
2652 | int mc_type_index) | 2466 | { |
2467 | u8 fam = boot_cpu_data.x86; | ||
2468 | struct amd64_family_type *fam_type = NULL; | ||
2469 | |||
2470 | switch (fam) { | ||
2471 | case 0xf: | ||
2472 | fam_type = &amd64_family_types[K8_CPUS]; | ||
2473 | pvt->ops = &amd64_family_types[K8_CPUS].ops; | ||
2474 | break; | ||
2475 | |||
2476 | case 0x10: | ||
2477 | fam_type = &amd64_family_types[F10_CPUS]; | ||
2478 | pvt->ops = &amd64_family_types[F10_CPUS].ops; | ||
2479 | break; | ||
2480 | |||
2481 | case 0x15: | ||
2482 | fam_type = &amd64_family_types[F15_CPUS]; | ||
2483 | pvt->ops = &amd64_family_types[F15_CPUS].ops; | ||
2484 | break; | ||
2485 | |||
2486 | default: | ||
2487 | amd64_err("Unsupported family!\n"); | ||
2488 | return NULL; | ||
2489 | } | ||
2490 | |||
2491 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | ||
2492 | |||
2493 | amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, | ||
2494 | (fam == 0xf ? | ||
2495 | (pvt->ext_model >= K8_REV_F ? "revF or later " | ||
2496 | : "revE or earlier ") | ||
2497 | : ""), pvt->mc_node_id); | ||
2498 | return fam_type; | ||
2499 | } | ||
2500 | |||
2501 | static int amd64_init_one_instance(struct pci_dev *F2) | ||
2653 | { | 2502 | { |
2654 | struct amd64_pvt *pvt = NULL; | 2503 | struct amd64_pvt *pvt = NULL; |
2504 | struct amd64_family_type *fam_type = NULL; | ||
2505 | struct mem_ctl_info *mci = NULL; | ||
2655 | int err = 0, ret; | 2506 | int err = 0, ret; |
2507 | u8 nid = get_node_id(F2); | ||
2656 | 2508 | ||
2657 | ret = -ENOMEM; | 2509 | ret = -ENOMEM; |
2658 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); | 2510 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); |
2659 | if (!pvt) | 2511 | if (!pvt) |
2660 | goto err_exit; | 2512 | goto err_ret; |
2661 | 2513 | ||
2662 | pvt->mc_node_id = get_node_id(dram_f2_ctl); | 2514 | pvt->mc_node_id = nid; |
2515 | pvt->F2 = F2; | ||
2663 | 2516 | ||
2664 | pvt->dram_f2_ctl = dram_f2_ctl; | 2517 | ret = -EINVAL; |
2665 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | 2518 | fam_type = amd64_per_family_init(pvt); |
2666 | pvt->mc_type_index = mc_type_index; | 2519 | if (!fam_type) |
2667 | pvt->ops = family_ops(mc_type_index); | 2520 | goto err_free; |
2668 | 2521 | ||
2669 | /* | ||
2670 | * We have the dram_f2_ctl device as an argument, now go reserve its | ||
2671 | * sibling devices from the PCI system. | ||
2672 | */ | ||
2673 | ret = -ENODEV; | 2522 | ret = -ENODEV; |
2674 | err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index); | 2523 | err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id); |
2675 | if (err) | 2524 | if (err) |
2676 | goto err_free; | 2525 | goto err_free; |
2677 | 2526 | ||
2678 | ret = -EINVAL; | 2527 | read_mc_regs(pvt); |
2679 | err = amd64_check_ecc_enabled(pvt); | ||
2680 | if (err) | ||
2681 | goto err_put; | ||
2682 | |||
2683 | /* | ||
2684 | * Key operation here: setup of HW prior to performing ops on it. Some | ||
2685 | * setup is required to access ECS data. After this is performed, the | ||
2686 | * 'teardown' function must be called upon error and normal exit paths. | ||
2687 | */ | ||
2688 | if (boot_cpu_data.x86 >= 0x10) | ||
2689 | amd64_setup(pvt); | ||
2690 | |||
2691 | /* | ||
2692 | * Save the pointer to the private data for use in 2nd initialization | ||
2693 | * stage | ||
2694 | */ | ||
2695 | pvt_lookup[pvt->mc_node_id] = pvt; | ||
2696 | |||
2697 | return 0; | ||
2698 | |||
2699 | err_put: | ||
2700 | amd64_free_mc_sibling_devices(pvt); | ||
2701 | |||
2702 | err_free: | ||
2703 | kfree(pvt); | ||
2704 | |||
2705 | err_exit: | ||
2706 | return ret; | ||
2707 | } | ||
2708 | |||
2709 | /* | ||
2710 | * This is the finishing stage of the init code. Needs to be performed after all | ||
2711 | * MCs' hardware have been prepped for accessing extended config space. | ||
2712 | */ | ||
2713 | static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | ||
2714 | { | ||
2715 | int node_id = pvt->mc_node_id; | ||
2716 | struct mem_ctl_info *mci; | ||
2717 | int ret = -ENODEV; | ||
2718 | |||
2719 | amd64_read_mc_registers(pvt); | ||
2720 | 2528 | ||
2721 | /* | 2529 | /* |
2722 | * We need to determine how many memory channels there are. Then use | 2530 | * We need to determine how many memory channels there are. Then use |
2723 | * that information for calculating the size of the dynamic instance | 2531 | * that information for calculating the size of the dynamic instance |
2724 | * tables in the 'mci' structure | 2532 | * tables in the 'mci' structure. |
2725 | */ | 2533 | */ |
2534 | ret = -EINVAL; | ||
2726 | pvt->channel_count = pvt->ops->early_channel_count(pvt); | 2535 | pvt->channel_count = pvt->ops->early_channel_count(pvt); |
2727 | if (pvt->channel_count < 0) | 2536 | if (pvt->channel_count < 0) |
2728 | goto err_exit; | 2537 | goto err_siblings; |
2729 | 2538 | ||
2730 | ret = -ENOMEM; | 2539 | ret = -ENOMEM; |
2731 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); | 2540 | mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); |
2732 | if (!mci) | 2541 | if (!mci) |
2733 | goto err_exit; | 2542 | goto err_siblings; |
2734 | 2543 | ||
2735 | mci->pvt_info = pvt; | 2544 | mci->pvt_info = pvt; |
2545 | mci->dev = &pvt->F2->dev; | ||
2736 | 2546 | ||
2737 | mci->dev = &pvt->dram_f2_ctl->dev; | 2547 | setup_mci_misc_attrs(mci, fam_type); |
2738 | amd64_setup_mci_misc_attributes(mci); | ||
2739 | 2548 | ||
2740 | if (amd64_init_csrows(mci)) | 2549 | if (init_csrows(mci)) |
2741 | mci->edac_cap = EDAC_FLAG_NONE; | 2550 | mci->edac_cap = EDAC_FLAG_NONE; |
2742 | 2551 | ||
2743 | amd64_enable_ecc_error_reporting(mci); | 2552 | set_mc_sysfs_attrs(mci); |
2744 | amd64_set_mc_sysfs_attributes(mci); | ||
2745 | 2553 | ||
2746 | ret = -ENODEV; | 2554 | ret = -ENODEV; |
2747 | if (edac_mc_add_mc(mci)) { | 2555 | if (edac_mc_add_mc(mci)) { |
@@ -2749,54 +2557,77 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |||
2749 | goto err_add_mc; | 2557 | goto err_add_mc; |
2750 | } | 2558 | } |
2751 | 2559 | ||
2752 | mci_lookup[node_id] = mci; | ||
2753 | pvt_lookup[node_id] = NULL; | ||
2754 | |||
2755 | /* register stuff with EDAC MCE */ | 2560 | /* register stuff with EDAC MCE */ |
2756 | if (report_gart_errors) | 2561 | if (report_gart_errors) |
2757 | amd_report_gart_errors(true); | 2562 | amd_report_gart_errors(true); |
2758 | 2563 | ||
2759 | amd_register_ecc_decoder(amd64_decode_bus_error); | 2564 | amd_register_ecc_decoder(amd64_decode_bus_error); |
2760 | 2565 | ||
2566 | mcis[nid] = mci; | ||
2567 | |||
2568 | atomic_inc(&drv_instances); | ||
2569 | |||
2761 | return 0; | 2570 | return 0; |
2762 | 2571 | ||
2763 | err_add_mc: | 2572 | err_add_mc: |
2764 | edac_mc_free(mci); | 2573 | edac_mc_free(mci); |
2765 | 2574 | ||
2766 | err_exit: | 2575 | err_siblings: |
2767 | debugf0("failure to init 2nd stage: ret=%d\n", ret); | 2576 | free_mc_sibling_devs(pvt); |
2768 | |||
2769 | amd64_restore_ecc_error_reporting(pvt); | ||
2770 | |||
2771 | if (boot_cpu_data.x86 > 0xf) | ||
2772 | amd64_teardown(pvt); | ||
2773 | 2577 | ||
2774 | amd64_free_mc_sibling_devices(pvt); | 2578 | err_free: |
2775 | 2579 | kfree(pvt); | |
2776 | kfree(pvt_lookup[pvt->mc_node_id]); | ||
2777 | pvt_lookup[node_id] = NULL; | ||
2778 | 2580 | ||
2581 | err_ret: | ||
2779 | return ret; | 2582 | return ret; |
2780 | } | 2583 | } |
2781 | 2584 | ||
2782 | 2585 | static int __devinit amd64_probe_one_instance(struct pci_dev *pdev, | |
2783 | static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | 2586 | const struct pci_device_id *mc_type) |
2784 | const struct pci_device_id *mc_type) | ||
2785 | { | 2587 | { |
2588 | u8 nid = get_node_id(pdev); | ||
2589 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | ||
2590 | struct ecc_settings *s; | ||
2786 | int ret = 0; | 2591 | int ret = 0; |
2787 | 2592 | ||
2788 | debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev), | ||
2789 | get_amd_family_name(mc_type->driver_data)); | ||
2790 | |||
2791 | ret = pci_enable_device(pdev); | 2593 | ret = pci_enable_device(pdev); |
2792 | if (ret < 0) | 2594 | if (ret < 0) { |
2793 | ret = -EIO; | ||
2794 | else | ||
2795 | ret = amd64_probe_one_instance(pdev, mc_type->driver_data); | ||
2796 | |||
2797 | if (ret < 0) | ||
2798 | debugf0("ret=%d\n", ret); | 2595 | debugf0("ret=%d\n", ret); |
2596 | return -EIO; | ||
2597 | } | ||
2598 | |||
2599 | ret = -ENOMEM; | ||
2600 | s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL); | ||
2601 | if (!s) | ||
2602 | goto err_out; | ||
2603 | |||
2604 | ecc_stngs[nid] = s; | ||
2605 | |||
2606 | if (!ecc_enabled(F3, nid)) { | ||
2607 | ret = -ENODEV; | ||
2608 | |||
2609 | if (!ecc_enable_override) | ||
2610 | goto err_enable; | ||
2611 | |||
2612 | amd64_warn("Forcing ECC on!\n"); | ||
2613 | |||
2614 | if (!enable_ecc_error_reporting(s, nid, F3)) | ||
2615 | goto err_enable; | ||
2616 | } | ||
2799 | 2617 | ||
2618 | ret = amd64_init_one_instance(pdev); | ||
2619 | if (ret < 0) { | ||
2620 | amd64_err("Error probing instance: %d\n", nid); | ||
2621 | restore_ecc_error_reporting(s, nid, F3); | ||
2622 | } | ||
2623 | |||
2624 | return ret; | ||
2625 | |||
2626 | err_enable: | ||
2627 | kfree(s); | ||
2628 | ecc_stngs[nid] = NULL; | ||
2629 | |||
2630 | err_out: | ||
2800 | return ret; | 2631 | return ret; |
2801 | } | 2632 | } |
2802 | 2633 | ||
@@ -2804,6 +2635,9 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | |||
2804 | { | 2635 | { |
2805 | struct mem_ctl_info *mci; | 2636 | struct mem_ctl_info *mci; |
2806 | struct amd64_pvt *pvt; | 2637 | struct amd64_pvt *pvt; |
2638 | u8 nid = get_node_id(pdev); | ||
2639 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | ||
2640 | struct ecc_settings *s = ecc_stngs[nid]; | ||
2807 | 2641 | ||
2808 | /* Remove from EDAC CORE tracking list */ | 2642 | /* Remove from EDAC CORE tracking list */ |
2809 | mci = edac_mc_del_mc(&pdev->dev); | 2643 | mci = edac_mc_del_mc(&pdev->dev); |
@@ -2812,20 +2646,20 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | |||
2812 | 2646 | ||
2813 | pvt = mci->pvt_info; | 2647 | pvt = mci->pvt_info; |
2814 | 2648 | ||
2815 | amd64_restore_ecc_error_reporting(pvt); | 2649 | restore_ecc_error_reporting(s, nid, F3); |
2816 | |||
2817 | if (boot_cpu_data.x86 > 0xf) | ||
2818 | amd64_teardown(pvt); | ||
2819 | 2650 | ||
2820 | amd64_free_mc_sibling_devices(pvt); | 2651 | free_mc_sibling_devs(pvt); |
2821 | 2652 | ||
2822 | /* unregister from EDAC MCE */ | 2653 | /* unregister from EDAC MCE */ |
2823 | amd_report_gart_errors(false); | 2654 | amd_report_gart_errors(false); |
2824 | amd_unregister_ecc_decoder(amd64_decode_bus_error); | 2655 | amd_unregister_ecc_decoder(amd64_decode_bus_error); |
2825 | 2656 | ||
2657 | kfree(ecc_stngs[nid]); | ||
2658 | ecc_stngs[nid] = NULL; | ||
2659 | |||
2826 | /* Free the EDAC CORE resources */ | 2660 | /* Free the EDAC CORE resources */ |
2827 | mci->pvt_info = NULL; | 2661 | mci->pvt_info = NULL; |
2828 | mci_lookup[pvt->mc_node_id] = NULL; | 2662 | mcis[nid] = NULL; |
2829 | 2663 | ||
2830 | kfree(pvt); | 2664 | kfree(pvt); |
2831 | edac_mc_free(mci); | 2665 | edac_mc_free(mci); |
@@ -2844,7 +2678,6 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |||
2844 | .subdevice = PCI_ANY_ID, | 2678 | .subdevice = PCI_ANY_ID, |
2845 | .class = 0, | 2679 | .class = 0, |
2846 | .class_mask = 0, | 2680 | .class_mask = 0, |
2847 | .driver_data = K8_CPUS | ||
2848 | }, | 2681 | }, |
2849 | { | 2682 | { |
2850 | .vendor = PCI_VENDOR_ID_AMD, | 2683 | .vendor = PCI_VENDOR_ID_AMD, |
@@ -2853,29 +2686,28 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |||
2853 | .subdevice = PCI_ANY_ID, | 2686 | .subdevice = PCI_ANY_ID, |
2854 | .class = 0, | 2687 | .class = 0, |
2855 | .class_mask = 0, | 2688 | .class_mask = 0, |
2856 | .driver_data = F10_CPUS | ||
2857 | }, | 2689 | }, |
2858 | { | 2690 | { |
2859 | .vendor = PCI_VENDOR_ID_AMD, | 2691 | .vendor = PCI_VENDOR_ID_AMD, |
2860 | .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM, | 2692 | .device = PCI_DEVICE_ID_AMD_15H_NB_F2, |
2861 | .subvendor = PCI_ANY_ID, | 2693 | .subvendor = PCI_ANY_ID, |
2862 | .subdevice = PCI_ANY_ID, | 2694 | .subdevice = PCI_ANY_ID, |
2863 | .class = 0, | 2695 | .class = 0, |
2864 | .class_mask = 0, | 2696 | .class_mask = 0, |
2865 | .driver_data = F11_CPUS | ||
2866 | }, | 2697 | }, |
2698 | |||
2867 | {0, } | 2699 | {0, } |
2868 | }; | 2700 | }; |
2869 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); | 2701 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); |
2870 | 2702 | ||
2871 | static struct pci_driver amd64_pci_driver = { | 2703 | static struct pci_driver amd64_pci_driver = { |
2872 | .name = EDAC_MOD_STR, | 2704 | .name = EDAC_MOD_STR, |
2873 | .probe = amd64_init_one_instance, | 2705 | .probe = amd64_probe_one_instance, |
2874 | .remove = __devexit_p(amd64_remove_one_instance), | 2706 | .remove = __devexit_p(amd64_remove_one_instance), |
2875 | .id_table = amd64_pci_table, | 2707 | .id_table = amd64_pci_table, |
2876 | }; | 2708 | }; |
2877 | 2709 | ||
2878 | static void amd64_setup_pci_device(void) | 2710 | static void setup_pci_device(void) |
2879 | { | 2711 | { |
2880 | struct mem_ctl_info *mci; | 2712 | struct mem_ctl_info *mci; |
2881 | struct amd64_pvt *pvt; | 2713 | struct amd64_pvt *pvt; |
@@ -2883,13 +2715,12 @@ static void amd64_setup_pci_device(void) | |||
2883 | if (amd64_ctl_pci) | 2715 | if (amd64_ctl_pci) |
2884 | return; | 2716 | return; |
2885 | 2717 | ||
2886 | mci = mci_lookup[0]; | 2718 | mci = mcis[0]; |
2887 | if (mci) { | 2719 | if (mci) { |
2888 | 2720 | ||
2889 | pvt = mci->pvt_info; | 2721 | pvt = mci->pvt_info; |
2890 | amd64_ctl_pci = | 2722 | amd64_ctl_pci = |
2891 | edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev, | 2723 | edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); |
2892 | EDAC_MOD_STR); | ||
2893 | 2724 | ||
2894 | if (!amd64_ctl_pci) { | 2725 | if (!amd64_ctl_pci) { |
2895 | pr_warning("%s(): Unable to create PCI control\n", | 2726 | pr_warning("%s(): Unable to create PCI control\n", |
@@ -2903,51 +2734,50 @@ static void amd64_setup_pci_device(void) | |||
2903 | 2734 | ||
2904 | static int __init amd64_edac_init(void) | 2735 | static int __init amd64_edac_init(void) |
2905 | { | 2736 | { |
2906 | int nb, err = -ENODEV; | 2737 | int err = -ENODEV; |
2907 | bool load_ok = false; | ||
2908 | 2738 | ||
2909 | edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); | 2739 | printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); |
2910 | 2740 | ||
2911 | opstate_init(); | 2741 | opstate_init(); |
2912 | 2742 | ||
2913 | if (cache_k8_northbridges() < 0) | 2743 | if (amd_cache_northbridges() < 0) |
2914 | goto err_ret; | 2744 | goto err_ret; |
2915 | 2745 | ||
2746 | err = -ENOMEM; | ||
2747 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); | ||
2748 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); | ||
2749 | if (!(mcis && ecc_stngs)) | ||
2750 | goto err_free; | ||
2751 | |||
2916 | msrs = msrs_alloc(); | 2752 | msrs = msrs_alloc(); |
2917 | if (!msrs) | 2753 | if (!msrs) |
2918 | goto err_ret; | 2754 | goto err_free; |
2919 | 2755 | ||
2920 | err = pci_register_driver(&amd64_pci_driver); | 2756 | err = pci_register_driver(&amd64_pci_driver); |
2921 | if (err) | 2757 | if (err) |
2922 | goto err_pci; | 2758 | goto err_pci; |
2923 | 2759 | ||
2924 | /* | ||
2925 | * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd | ||
2926 | * amd64_pvt structs. These will be used in the 2nd stage init function | ||
2927 | * to finish initialization of the MC instances. | ||
2928 | */ | ||
2929 | err = -ENODEV; | 2760 | err = -ENODEV; |
2930 | for (nb = 0; nb < num_k8_northbridges; nb++) { | 2761 | if (!atomic_read(&drv_instances)) |
2931 | if (!pvt_lookup[nb]) | 2762 | goto err_no_instances; |
2932 | continue; | ||
2933 | |||
2934 | err = amd64_init_2nd_stage(pvt_lookup[nb]); | ||
2935 | if (err) | ||
2936 | goto err_2nd_stage; | ||
2937 | |||
2938 | load_ok = true; | ||
2939 | } | ||
2940 | 2763 | ||
2941 | if (load_ok) { | 2764 | setup_pci_device(); |
2942 | amd64_setup_pci_device(); | 2765 | return 0; |
2943 | return 0; | ||
2944 | } | ||
2945 | 2766 | ||
2946 | err_2nd_stage: | 2767 | err_no_instances: |
2947 | pci_unregister_driver(&amd64_pci_driver); | 2768 | pci_unregister_driver(&amd64_pci_driver); |
2769 | |||
2948 | err_pci: | 2770 | err_pci: |
2949 | msrs_free(msrs); | 2771 | msrs_free(msrs); |
2950 | msrs = NULL; | 2772 | msrs = NULL; |
2773 | |||
2774 | err_free: | ||
2775 | kfree(mcis); | ||
2776 | mcis = NULL; | ||
2777 | |||
2778 | kfree(ecc_stngs); | ||
2779 | ecc_stngs = NULL; | ||
2780 | |||
2951 | err_ret: | 2781 | err_ret: |
2952 | return err; | 2782 | return err; |
2953 | } | 2783 | } |
@@ -2959,6 +2789,12 @@ static void __exit amd64_edac_exit(void) | |||
2959 | 2789 | ||
2960 | pci_unregister_driver(&amd64_pci_driver); | 2790 | pci_unregister_driver(&amd64_pci_driver); |
2961 | 2791 | ||
2792 | kfree(ecc_stngs); | ||
2793 | ecc_stngs = NULL; | ||
2794 | |||
2795 | kfree(mcis); | ||
2796 | mcis = NULL; | ||
2797 | |||
2962 | msrs_free(msrs); | 2798 | msrs_free(msrs); |
2963 | msrs = NULL; | 2799 | msrs = NULL; |
2964 | } | 2800 | } |
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 613b9381e71a..9a666cb985b2 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -72,13 +72,28 @@ | |||
72 | #include <linux/edac.h> | 72 | #include <linux/edac.h> |
73 | #include <asm/msr.h> | 73 | #include <asm/msr.h> |
74 | #include "edac_core.h" | 74 | #include "edac_core.h" |
75 | #include "edac_mce_amd.h" | 75 | #include "mce_amd.h" |
76 | 76 | ||
77 | #define amd64_printk(level, fmt, arg...) \ | 77 | #define amd64_debug(fmt, arg...) \ |
78 | edac_printk(level, "amd64", fmt, ##arg) | 78 | edac_printk(KERN_DEBUG, "amd64", fmt, ##arg) |
79 | 79 | ||
80 | #define amd64_mc_printk(mci, level, fmt, arg...) \ | 80 | #define amd64_info(fmt, arg...) \ |
81 | edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg) | 81 | edac_printk(KERN_INFO, "amd64", fmt, ##arg) |
82 | |||
83 | #define amd64_notice(fmt, arg...) \ | ||
84 | edac_printk(KERN_NOTICE, "amd64", fmt, ##arg) | ||
85 | |||
86 | #define amd64_warn(fmt, arg...) \ | ||
87 | edac_printk(KERN_WARNING, "amd64", fmt, ##arg) | ||
88 | |||
89 | #define amd64_err(fmt, arg...) \ | ||
90 | edac_printk(KERN_ERR, "amd64", fmt, ##arg) | ||
91 | |||
92 | #define amd64_mc_warn(mci, fmt, arg...) \ | ||
93 | edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg) | ||
94 | |||
95 | #define amd64_mc_err(mci, fmt, arg...) \ | ||
96 | edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg) | ||
82 | 97 | ||
83 | /* | 98 | /* |
84 | * Throughout the comments in this code, the following terms are used: | 99 | * Throughout the comments in this code, the following terms are used: |
@@ -129,96 +144,76 @@ | |||
129 | * sections 3.5.4 and 3.5.5 for more information. | 144 | * sections 3.5.4 and 3.5.5 for more information. |
130 | */ | 145 | */ |
131 | 146 | ||
132 | #define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__ | 147 | #define EDAC_AMD64_VERSION "3.4.0" |
133 | #define EDAC_MOD_STR "amd64_edac" | 148 | #define EDAC_MOD_STR "amd64_edac" |
134 | 149 | ||
135 | #define EDAC_MAX_NUMNODES 8 | ||
136 | |||
137 | /* Extended Model from CPUID, for CPU Revision numbers */ | 150 | /* Extended Model from CPUID, for CPU Revision numbers */ |
138 | #define K8_REV_D 1 | 151 | #define K8_REV_D 1 |
139 | #define K8_REV_E 2 | 152 | #define K8_REV_E 2 |
140 | #define K8_REV_F 4 | 153 | #define K8_REV_F 4 |
141 | 154 | ||
142 | /* Hardware limit on ChipSelect rows per MC and processors per system */ | 155 | /* Hardware limit on ChipSelect rows per MC and processors per system */ |
143 | #define MAX_CS_COUNT 8 | 156 | #define NUM_CHIPSELECTS 8 |
144 | #define DRAM_REG_COUNT 8 | 157 | #define DRAM_RANGES 8 |
145 | 158 | ||
146 | #define ON true | 159 | #define ON true |
147 | #define OFF false | 160 | #define OFF false |
148 | 161 | ||
149 | /* | 162 | /* |
163 | * Create a contiguous bitmask starting at bit position @lo and ending at | ||
164 | * position @hi. For example | ||
165 | * | ||
166 | * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000. | ||
167 | */ | ||
168 | #define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) | ||
169 | |||
170 | /* | ||
150 | * PCI-defined configuration space registers | 171 | * PCI-defined configuration space registers |
151 | */ | 172 | */ |
173 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 | ||
174 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 | ||
152 | 175 | ||
153 | 176 | ||
154 | /* | 177 | /* |
155 | * Function 1 - Address Map | 178 | * Function 1 - Address Map |
156 | */ | 179 | */ |
157 | #define K8_DRAM_BASE_LOW 0x40 | 180 | #define DRAM_BASE_LO 0x40 |
158 | #define K8_DRAM_LIMIT_LOW 0x44 | 181 | #define DRAM_LIMIT_LO 0x44 |
159 | #define K8_DHAR 0xf0 | ||
160 | |||
161 | #define DHAR_VALID BIT(0) | ||
162 | #define F10_DRAM_MEM_HOIST_VALID BIT(1) | ||
163 | 182 | ||
164 | #define DHAR_BASE_MASK 0xff000000 | 183 | #define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7)) |
165 | #define dhar_base(dhar) (dhar & DHAR_BASE_MASK) | 184 | #define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3)) |
185 | #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) | ||
186 | #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) | ||
166 | 187 | ||
167 | #define K8_DHAR_OFFSET_MASK 0x0000ff00 | 188 | #define DHAR 0xf0 |
168 | #define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16) | 189 | #define dhar_valid(pvt) ((pvt)->dhar & BIT(0)) |
190 | #define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1)) | ||
191 | #define dhar_base(pvt) ((pvt)->dhar & 0xff000000) | ||
192 | #define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16) | ||
169 | 193 | ||
170 | #define F10_DHAR_OFFSET_MASK 0x0000ff80 | ||
171 | /* NOTE: Extra mask bit vs K8 */ | 194 | /* NOTE: Extra mask bit vs K8 */ |
172 | #define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16) | 195 | #define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16) |
173 | 196 | ||
197 | #define DCT_CFG_SEL 0x10C | ||
174 | 198 | ||
175 | /* F10 High BASE/LIMIT registers */ | 199 | #define DRAM_LOCAL_NODE_BASE 0x120 |
176 | #define F10_DRAM_BASE_HIGH 0x140 | 200 | #define DRAM_LOCAL_NODE_LIM 0x124 |
177 | #define F10_DRAM_LIMIT_HIGH 0x144 | ||
178 | 201 | ||
202 | #define DRAM_BASE_HI 0x140 | ||
203 | #define DRAM_LIMIT_HI 0x144 | ||
179 | 204 | ||
180 | /* | ||
181 | * Function 2 - DRAM controller | ||
182 | */ | ||
183 | #define K8_DCSB0 0x40 | ||
184 | #define F10_DCSB1 0x140 | ||
185 | |||
186 | #define K8_DCSB_CS_ENABLE BIT(0) | ||
187 | #define K8_DCSB_NPT_SPARE BIT(1) | ||
188 | #define K8_DCSB_NPT_TESTFAIL BIT(2) | ||
189 | 205 | ||
190 | /* | 206 | /* |
191 | * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form | 207 | * Function 2 - DRAM controller |
192 | * the address | ||
193 | */ | ||
194 | #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) | ||
195 | #define REV_E_DCS_SHIFT 4 | ||
196 | |||
197 | #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) | ||
198 | #define REV_F_F1Xh_DCS_SHIFT 8 | ||
199 | |||
200 | /* | ||
201 | * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount | ||
202 | * to form the address | ||
203 | */ | 208 | */ |
204 | #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) | 209 | #define DCSB0 0x40 |
205 | #define REV_F_DCS_SHIFT 8 | 210 | #define DCSB1 0x140 |
206 | 211 | #define DCSB_CS_ENABLE BIT(0) | |
207 | /* DRAM CS Mask Registers */ | ||
208 | #define K8_DCSM0 0x60 | ||
209 | #define F10_DCSM1 0x160 | ||
210 | |||
211 | /* REV E: select [29:21] and [15:9] from DCSM */ | ||
212 | #define REV_E_DCSM_MASK_BITS 0x3FE0FE00 | ||
213 | |||
214 | /* unused bits [24:20] and [12:0] */ | ||
215 | #define REV_E_DCS_NOTUSED_BITS 0x01F01FFF | ||
216 | 212 | ||
217 | /* REV F and later: select [28:19] and [13:5] from DCSM */ | 213 | #define DCSM0 0x60 |
218 | #define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0 | 214 | #define DCSM1 0x160 |
219 | 215 | ||
220 | /* unused bits [26:22] and [12:0] */ | 216 | #define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) |
221 | #define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF | ||
222 | 217 | ||
223 | #define DBAM0 0x80 | 218 | #define DBAM0 0x80 |
224 | #define DBAM1 0x180 | 219 | #define DBAM1 0x180 |
@@ -228,152 +223,84 @@ | |||
228 | 223 | ||
229 | #define DBAM_MAX_VALUE 11 | 224 | #define DBAM_MAX_VALUE 11 |
230 | 225 | ||
231 | 226 | #define DCLR0 0x90 | |
232 | #define F10_DCLR_0 0x90 | 227 | #define DCLR1 0x190 |
233 | #define F10_DCLR_1 0x190 | ||
234 | #define REVE_WIDTH_128 BIT(16) | 228 | #define REVE_WIDTH_128 BIT(16) |
235 | #define F10_WIDTH_128 BIT(11) | 229 | #define WIDTH_128 BIT(11) |
236 | 230 | ||
231 | #define DCHR0 0x94 | ||
232 | #define DCHR1 0x194 | ||
233 | #define DDR3_MODE BIT(8) | ||
237 | 234 | ||
238 | #define F10_DCHR_0 0x94 | 235 | #define DCT_SEL_LO 0x110 |
239 | #define F10_DCHR_1 0x194 | 236 | #define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800) |
237 | #define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3) | ||
238 | #define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0)) | ||
239 | #define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2)) | ||
240 | 240 | ||
241 | #define F10_DCHR_FOUR_RANK_DIMM BIT(18) | 241 | #define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4))) |
242 | #define DDR3_MODE BIT(8) | ||
243 | #define F10_DCHR_MblMode BIT(6) | ||
244 | 242 | ||
243 | #define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5)) | ||
244 | #define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10)) | ||
245 | 245 | ||
246 | #define F10_DCTL_SEL_LOW 0x110 | 246 | #define SWAP_INTLV_REG 0x10c |
247 | #define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800) | ||
248 | #define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3) | ||
249 | #define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0)) | ||
250 | #define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2)) | ||
251 | #define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4)) | ||
252 | #define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5)) | ||
253 | #define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8)) | ||
254 | #define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10)) | ||
255 | 247 | ||
256 | #define F10_DCTL_SEL_HIGH 0x114 | 248 | #define DCT_SEL_HI 0x114 |
257 | 249 | ||
258 | /* | 250 | /* |
259 | * Function 3 - Misc Control | 251 | * Function 3 - Misc Control |
260 | */ | 252 | */ |
261 | #define K8_NBCTL 0x40 | 253 | #define NBCTL 0x40 |
262 | |||
263 | /* Correctable ECC error reporting enable */ | ||
264 | #define K8_NBCTL_CECCEn BIT(0) | ||
265 | |||
266 | /* UnCorrectable ECC error reporting enable */ | ||
267 | #define K8_NBCTL_UECCEn BIT(1) | ||
268 | |||
269 | #define K8_NBCFG 0x44 | ||
270 | #define K8_NBCFG_CHIPKILL BIT(23) | ||
271 | #define K8_NBCFG_ECC_ENABLE BIT(22) | ||
272 | 254 | ||
273 | #define K8_NBSL 0x48 | 255 | #define NBCFG 0x44 |
256 | #define NBCFG_CHIPKILL BIT(23) | ||
257 | #define NBCFG_ECC_ENABLE BIT(22) | ||
274 | 258 | ||
275 | 259 | /* F3x48: NBSL */ | |
276 | /* Family F10h: Normalized Extended Error Codes */ | ||
277 | #define F10_NBSL_EXT_ERR_RES 0x0 | ||
278 | #define F10_NBSL_EXT_ERR_ECC 0x8 | 260 | #define F10_NBSL_EXT_ERR_ECC 0x8 |
261 | #define NBSL_PP_OBS 0x2 | ||
279 | 262 | ||
280 | /* Next two are overloaded values */ | 263 | #define SCRCTRL 0x58 |
281 | #define F10_NBSL_EXT_ERR_LINK_PROTO 0xB | ||
282 | #define F10_NBSL_EXT_ERR_L3_PROTO 0xB | ||
283 | |||
284 | #define F10_NBSL_EXT_ERR_NB_ARRAY 0xC | ||
285 | #define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD | ||
286 | #define F10_NBSL_EXT_ERR_LINK_RETRY 0xE | ||
287 | |||
288 | /* Next two are overloaded values */ | ||
289 | #define F10_NBSL_EXT_ERR_GART_WALK 0xF | ||
290 | #define F10_NBSL_EXT_ERR_DEV_WALK 0xF | ||
291 | |||
292 | /* 0x10 to 0x1B: Reserved */ | ||
293 | #define F10_NBSL_EXT_ERR_L3_DATA 0x1C | ||
294 | #define F10_NBSL_EXT_ERR_L3_TAG 0x1D | ||
295 | #define F10_NBSL_EXT_ERR_L3_LRU 0x1E | ||
296 | |||
297 | /* K8: Normalized Extended Error Codes */ | ||
298 | #define K8_NBSL_EXT_ERR_ECC 0x0 | ||
299 | #define K8_NBSL_EXT_ERR_CRC 0x1 | ||
300 | #define K8_NBSL_EXT_ERR_SYNC 0x2 | ||
301 | #define K8_NBSL_EXT_ERR_MST 0x3 | ||
302 | #define K8_NBSL_EXT_ERR_TGT 0x4 | ||
303 | #define K8_NBSL_EXT_ERR_GART 0x5 | ||
304 | #define K8_NBSL_EXT_ERR_RMW 0x6 | ||
305 | #define K8_NBSL_EXT_ERR_WDT 0x7 | ||
306 | #define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8 | ||
307 | #define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD | ||
308 | |||
309 | /* | ||
310 | * The following are for BUS type errors AFTER values have been normalized by | ||
311 | * shifting right | ||
312 | */ | ||
313 | #define K8_NBSL_PP_SRC 0x0 | ||
314 | #define K8_NBSL_PP_RES 0x1 | ||
315 | #define K8_NBSL_PP_OBS 0x2 | ||
316 | #define K8_NBSL_PP_GENERIC 0x3 | ||
317 | |||
318 | #define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF) | ||
319 | |||
320 | #define K8_NBEAL 0x50 | ||
321 | #define K8_NBEAH 0x54 | ||
322 | #define K8_SCRCTRL 0x58 | ||
323 | |||
324 | #define F10_NB_CFG_LOW 0x88 | ||
325 | #define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14) | ||
326 | |||
327 | #define F10_NB_CFG_HIGH 0x8C | ||
328 | 264 | ||
329 | #define F10_ONLINE_SPARE 0xB0 | 265 | #define F10_ONLINE_SPARE 0xB0 |
330 | #define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) | 266 | #define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1) |
331 | #define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3)) | 267 | #define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7) |
332 | #define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007) | ||
333 | #define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007) | ||
334 | 268 | ||
335 | #define F10_NB_ARRAY_ADDR 0xB8 | 269 | #define F10_NB_ARRAY_ADDR 0xB8 |
336 | 270 | #define F10_NB_ARRAY_DRAM_ECC BIT(31) | |
337 | #define F10_NB_ARRAY_DRAM_ECC 0x80000000 | ||
338 | 271 | ||
339 | /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ | 272 | /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ |
340 | #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) | 273 | #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) |
341 | 274 | ||
342 | #define F10_NB_ARRAY_DATA 0xBC | 275 | #define F10_NB_ARRAY_DATA 0xBC |
343 | |||
344 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ | 276 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ |
345 | (BIT(((word) & 0xF) + 20) | \ | 277 | (BIT(((word) & 0xF) + 20) | \ |
346 | BIT(17) | bits) | 278 | BIT(17) | bits) |
347 | |||
348 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ | 279 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ |
349 | (BIT(((word) & 0xF) + 20) | \ | 280 | (BIT(((word) & 0xF) + 20) | \ |
350 | BIT(16) | bits) | 281 | BIT(16) | bits) |
351 | 282 | ||
352 | #define K8_NBCAP 0xE8 | 283 | #define NBCAP 0xE8 |
353 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) | 284 | #define NBCAP_CHIPKILL BIT(4) |
354 | #define K8_NBCAP_CHIPKILL BIT(4) | 285 | #define NBCAP_SECDED BIT(3) |
355 | #define K8_NBCAP_SECDED BIT(3) | 286 | #define NBCAP_DCT_DUAL BIT(0) |
356 | #define K8_NBCAP_DCT_DUAL BIT(0) | ||
357 | 287 | ||
358 | #define EXT_NB_MCA_CFG 0x180 | 288 | #define EXT_NB_MCA_CFG 0x180 |
359 | 289 | ||
360 | /* MSRs */ | 290 | /* MSRs */ |
361 | #define K8_MSR_MCGCTL_NBE BIT(4) | 291 | #define MSR_MCGCTL_NBE BIT(4) |
362 | |||
363 | #define K8_MSR_MC4CTL 0x0410 | ||
364 | #define K8_MSR_MC4STAT 0x0411 | ||
365 | #define K8_MSR_MC4ADDR 0x0412 | ||
366 | 292 | ||
367 | /* AMD sets the first MC device at device ID 0x18. */ | 293 | /* AMD sets the first MC device at device ID 0x18. */ |
368 | static inline int get_node_id(struct pci_dev *pdev) | 294 | static inline u8 get_node_id(struct pci_dev *pdev) |
369 | { | 295 | { |
370 | return PCI_SLOT(pdev->devfn) - 0x18; | 296 | return PCI_SLOT(pdev->devfn) - 0x18; |
371 | } | 297 | } |
372 | 298 | ||
373 | enum amd64_chipset_families { | 299 | enum amd_families { |
374 | K8_CPUS = 0, | 300 | K8_CPUS = 0, |
375 | F10_CPUS, | 301 | F10_CPUS, |
376 | F11_CPUS, | 302 | F15_CPUS, |
303 | NUM_FAMILIES, | ||
377 | }; | 304 | }; |
378 | 305 | ||
379 | /* Error injection control structure */ | 306 | /* Error injection control structure */ |
@@ -383,17 +310,36 @@ struct error_injection { | |||
383 | u32 bit_map; | 310 | u32 bit_map; |
384 | }; | 311 | }; |
385 | 312 | ||
313 | /* low and high part of PCI config space regs */ | ||
314 | struct reg_pair { | ||
315 | u32 lo, hi; | ||
316 | }; | ||
317 | |||
318 | /* | ||
319 | * See F1x[1, 0][7C:40] DRAM Base/Limit Registers | ||
320 | */ | ||
321 | struct dram_range { | ||
322 | struct reg_pair base; | ||
323 | struct reg_pair lim; | ||
324 | }; | ||
325 | |||
326 | /* A DCT chip selects collection */ | ||
327 | struct chip_select { | ||
328 | u32 csbases[NUM_CHIPSELECTS]; | ||
329 | u8 b_cnt; | ||
330 | |||
331 | u32 csmasks[NUM_CHIPSELECTS]; | ||
332 | u8 m_cnt; | ||
333 | }; | ||
334 | |||
386 | struct amd64_pvt { | 335 | struct amd64_pvt { |
336 | struct low_ops *ops; | ||
337 | |||
387 | /* pci_device handles which we utilize */ | 338 | /* pci_device handles which we utilize */ |
388 | struct pci_dev *addr_f1_ctl; | 339 | struct pci_dev *F1, *F2, *F3; |
389 | struct pci_dev *dram_f2_ctl; | ||
390 | struct pci_dev *misc_f3_ctl; | ||
391 | 340 | ||
392 | int mc_node_id; /* MC index of this MC node */ | 341 | unsigned mc_node_id; /* MC index of this MC node */ |
393 | int ext_model; /* extended model value of this node */ | 342 | int ext_model; /* extended model value of this node */ |
394 | |||
395 | struct low_ops *ops; /* pointer to per PCI Device ID func table */ | ||
396 | |||
397 | int channel_count; | 343 | int channel_count; |
398 | 344 | ||
399 | /* Raw registers */ | 345 | /* Raw registers */ |
@@ -408,85 +354,66 @@ struct amd64_pvt { | |||
408 | u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ | 354 | u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ |
409 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ | 355 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ |
410 | 356 | ||
411 | /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ | 357 | /* one for each DCT */ |
412 | u32 dcsb0[MAX_CS_COUNT]; | 358 | struct chip_select csels[2]; |
413 | u32 dcsb1[MAX_CS_COUNT]; | 359 | |
414 | 360 | /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ | |
415 | /* DRAM CS Mask Registers F2x[1,0][6C:60] */ | 361 | struct dram_range ranges[DRAM_RANGES]; |
416 | u32 dcsm0[MAX_CS_COUNT]; | ||
417 | u32 dcsm1[MAX_CS_COUNT]; | ||
418 | |||
419 | /* | ||
420 | * Decoded parts of DRAM BASE and LIMIT Registers | ||
421 | * F1x[78,70,68,60,58,50,48,40] | ||
422 | */ | ||
423 | u64 dram_base[DRAM_REG_COUNT]; | ||
424 | u64 dram_limit[DRAM_REG_COUNT]; | ||
425 | u8 dram_IntlvSel[DRAM_REG_COUNT]; | ||
426 | u8 dram_IntlvEn[DRAM_REG_COUNT]; | ||
427 | u8 dram_DstNode[DRAM_REG_COUNT]; | ||
428 | u8 dram_rw_en[DRAM_REG_COUNT]; | ||
429 | |||
430 | /* | ||
431 | * The following fields are set at (load) run time, after CPU revision | ||
432 | * has been determined, since the dct_base and dct_mask registers vary | ||
433 | * based on revision | ||
434 | */ | ||
435 | u32 dcsb_base; /* DCSB base bits */ | ||
436 | u32 dcsm_mask; /* DCSM mask bits */ | ||
437 | u32 cs_count; /* num chip selects (== num DCSB registers) */ | ||
438 | u32 num_dcsm; /* Number of DCSM registers */ | ||
439 | u32 dcs_mask_notused; /* DCSM notused mask bits */ | ||
440 | u32 dcs_shift; /* DCSB and DCSM shift value */ | ||
441 | 362 | ||
442 | u64 top_mem; /* top of memory below 4GB */ | 363 | u64 top_mem; /* top of memory below 4GB */ |
443 | u64 top_mem2; /* top of memory above 4GB */ | 364 | u64 top_mem2; /* top of memory above 4GB */ |
444 | 365 | ||
445 | u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */ | 366 | u32 dct_sel_lo; /* DRAM Controller Select Low */ |
446 | u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ | 367 | u32 dct_sel_hi; /* DRAM Controller Select High */ |
447 | u32 online_spare; /* On-Line spare Reg */ | 368 | u32 online_spare; /* On-Line spare Reg */ |
448 | 369 | ||
449 | /* x4 or x8 syndromes in use */ | 370 | /* x4 or x8 syndromes in use */ |
450 | u8 syn_type; | 371 | u8 ecc_sym_sz; |
451 | |||
452 | /* temp storage for when input is received from sysfs */ | ||
453 | struct err_regs ctl_error_info; | ||
454 | 372 | ||
455 | /* place to store error injection parameters prior to issue */ | 373 | /* place to store error injection parameters prior to issue */ |
456 | struct error_injection injection; | 374 | struct error_injection injection; |
375 | }; | ||
457 | 376 | ||
458 | /* Save old hw registers' values before we modified them */ | 377 | static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i) |
459 | u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ | 378 | { |
460 | u32 old_nbctl; | 379 | u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8; |
380 | |||
381 | if (boot_cpu_data.x86 == 0xf) | ||
382 | return addr; | ||
383 | |||
384 | return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr; | ||
385 | } | ||
386 | |||
387 | static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i) | ||
388 | { | ||
389 | u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; | ||
461 | 390 | ||
462 | /* MC Type Index value: socket F vs Family 10h */ | 391 | if (boot_cpu_data.x86 == 0xf) |
463 | u32 mc_type_index; | 392 | return lim; |
393 | |||
394 | return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim; | ||
395 | } | ||
396 | |||
397 | static inline u16 extract_syndrome(u64 status) | ||
398 | { | ||
399 | return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00); | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * per-node ECC settings descriptor | ||
404 | */ | ||
405 | struct ecc_settings { | ||
406 | u32 old_nbctl; | ||
407 | bool nbctl_valid; | ||
464 | 408 | ||
465 | /* misc settings */ | ||
466 | struct flags { | 409 | struct flags { |
467 | unsigned long cf8_extcfg:1; | ||
468 | unsigned long nb_mce_enable:1; | 410 | unsigned long nb_mce_enable:1; |
469 | unsigned long nb_ecc_prev:1; | 411 | unsigned long nb_ecc_prev:1; |
470 | } flags; | 412 | } flags; |
471 | }; | 413 | }; |
472 | 414 | ||
473 | struct scrubrate { | ||
474 | u32 scrubval; /* bit pattern for scrub rate */ | ||
475 | u32 bandwidth; /* bandwidth consumed (bytes/sec) */ | ||
476 | }; | ||
477 | |||
478 | extern struct scrubrate scrubrates[23]; | ||
479 | extern const char *tt_msgs[4]; | ||
480 | extern const char *ll_msgs[4]; | ||
481 | extern const char *rrrr_msgs[16]; | ||
482 | extern const char *to_msgs[2]; | ||
483 | extern const char *pp_msgs[4]; | ||
484 | extern const char *ii_msgs[4]; | ||
485 | extern const char *ext_msgs[32]; | ||
486 | extern const char *htlink_msgs[8]; | ||
487 | |||
488 | #ifdef CONFIG_EDAC_DEBUG | 415 | #ifdef CONFIG_EDAC_DEBUG |
489 | #define NUM_DBG_ATTRS 9 | 416 | #define NUM_DBG_ATTRS 5 |
490 | #else | 417 | #else |
491 | #define NUM_DBG_ATTRS 0 | 418 | #define NUM_DBG_ATTRS 0 |
492 | #endif | 419 | #endif |
@@ -506,58 +433,30 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], | |||
506 | */ | 433 | */ |
507 | struct low_ops { | 434 | struct low_ops { |
508 | int (*early_channel_count) (struct amd64_pvt *pvt); | 435 | int (*early_channel_count) (struct amd64_pvt *pvt); |
509 | 436 | void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, | |
510 | u64 (*get_error_address) (struct mem_ctl_info *mci, | 437 | u16 syndrome); |
511 | struct err_regs *info); | 438 | int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); |
512 | void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram); | 439 | int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, |
513 | void (*read_dram_ctl_register) (struct amd64_pvt *pvt); | 440 | u32 *val, const char *func); |
514 | void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, | ||
515 | struct err_regs *info, u64 SystemAddr); | ||
516 | int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); | ||
517 | }; | 441 | }; |
518 | 442 | ||
519 | struct amd64_family_type { | 443 | struct amd64_family_type { |
520 | const char *ctl_name; | 444 | const char *ctl_name; |
521 | u16 addr_f1_ctl; | 445 | u16 f1_id, f3_id; |
522 | u16 misc_f3_ctl; | ||
523 | struct low_ops ops; | 446 | struct low_ops ops; |
524 | }; | 447 | }; |
525 | 448 | ||
526 | static struct amd64_family_type amd64_family_types[]; | 449 | int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, |
527 | 450 | u32 val, const char *func); | |
528 | static inline const char *get_amd_family_name(int index) | ||
529 | { | ||
530 | return amd64_family_types[index].ctl_name; | ||
531 | } | ||
532 | |||
533 | static inline struct low_ops *family_ops(int index) | ||
534 | { | ||
535 | return &amd64_family_types[index].ops; | ||
536 | } | ||
537 | |||
538 | static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
539 | u32 *val, const char *func) | ||
540 | { | ||
541 | int err = 0; | ||
542 | |||
543 | err = pci_read_config_dword(pdev, offset, val); | ||
544 | if (err) | ||
545 | amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n", | ||
546 | func, PCI_FUNC(pdev->devfn), offset); | ||
547 | |||
548 | return err; | ||
549 | } | ||
550 | 451 | ||
551 | #define amd64_read_pci_cfg(pdev, offset, val) \ | 452 | #define amd64_read_pci_cfg(pdev, offset, val) \ |
552 | amd64_read_pci_cfg_dword(pdev, offset, val, __func__) | 453 | __amd64_read_pci_cfg_dword(pdev, offset, val, __func__) |
553 | 454 | ||
554 | /* | 455 | #define amd64_write_pci_cfg(pdev, offset, val) \ |
555 | * For future CPU versions, verify the following as new 'slow' rates appear and | 456 | __amd64_write_pci_cfg_dword(pdev, offset, val, __func__) |
556 | * modify the necessary skip values for the supported CPU. | 457 | |
557 | */ | 458 | #define amd64_read_dct_pci_cfg(pvt, offset, val) \ |
558 | #define K8_MIN_SCRUB_RATE_BITS 0x0 | 459 | pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__) |
559 | #define F10_MIN_SCRUB_RATE_BITS 0x5 | ||
560 | #define F11_MIN_SCRUB_RATE_BITS 0x6 | ||
561 | 460 | ||
562 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | 461 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, |
563 | u64 *hole_offset, u64 *hole_size); | 462 | u64 *hole_offset, u64 *hole_size); |
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c index 59cf2cf6e11e..e3562288f4ce 100644 --- a/drivers/edac/amd64_edac_dbg.c +++ b/drivers/edac/amd64_edac_dbg.c | |||
@@ -1,167 +1,16 @@ | |||
1 | #include "amd64_edac.h" | 1 | #include "amd64_edac.h" |
2 | 2 | ||
3 | /* | 3 | #define EDAC_DCT_ATTR_SHOW(reg) \ |
4 | * accept a hex value and store it into the virtual error register file, field: | 4 | static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \ |
5 | * nbeal and nbeah. Assume virtual error values have already been set for: NBSL, | 5 | { \ |
6 | * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and | 6 | struct amd64_pvt *pvt = mci->pvt_info; \ |
7 | * CHANNEL | 7 | return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ |
8 | */ | ||
9 | static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data, | ||
10 | size_t count) | ||
11 | { | ||
12 | struct amd64_pvt *pvt = mci->pvt_info; | ||
13 | unsigned long long value; | ||
14 | int ret = 0; | ||
15 | |||
16 | ret = strict_strtoull(data, 16, &value); | ||
17 | if (ret != -EINVAL) { | ||
18 | debugf0("received NBEA= 0x%llx\n", value); | ||
19 | |||
20 | /* place the value into the virtual error packet */ | ||
21 | pvt->ctl_error_info.nbeal = (u32) value; | ||
22 | value >>= 32; | ||
23 | pvt->ctl_error_info.nbeah = (u32) value; | ||
24 | |||
25 | /* Process the Mapping request */ | ||
26 | /* TODO: Add race prevention */ | ||
27 | amd_decode_nb_mce(pvt->mc_node_id, &pvt->ctl_error_info, 1); | ||
28 | |||
29 | return count; | ||
30 | } | ||
31 | return ret; | ||
32 | } | ||
33 | |||
34 | /* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */ | ||
35 | static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data) | ||
36 | { | ||
37 | struct amd64_pvt *pvt = mci->pvt_info; | ||
38 | u64 value; | ||
39 | |||
40 | value = pvt->ctl_error_info.nbeah; | ||
41 | value <<= 32; | ||
42 | value |= pvt->ctl_error_info.nbeal; | ||
43 | |||
44 | return sprintf(data, "%llx\n", value); | ||
45 | } | ||
46 | |||
47 | /* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */ | ||
48 | static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data, | ||
49 | size_t count) | ||
50 | { | ||
51 | struct amd64_pvt *pvt = mci->pvt_info; | ||
52 | unsigned long value; | ||
53 | int ret = 0; | ||
54 | |||
55 | ret = strict_strtoul(data, 16, &value); | ||
56 | if (ret != -EINVAL) { | ||
57 | debugf0("received NBSL= 0x%lx\n", value); | ||
58 | |||
59 | pvt->ctl_error_info.nbsl = (u32) value; | ||
60 | |||
61 | return count; | ||
62 | } | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | /* display back what the last NBSL value written */ | ||
67 | static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data) | ||
68 | { | ||
69 | struct amd64_pvt *pvt = mci->pvt_info; | ||
70 | u32 value; | ||
71 | |||
72 | value = pvt->ctl_error_info.nbsl; | ||
73 | |||
74 | return sprintf(data, "%x\n", value); | ||
75 | } | ||
76 | |||
77 | /* store the NBSH (MCA NB Status High) value user desires */ | ||
78 | static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data, | ||
79 | size_t count) | ||
80 | { | ||
81 | struct amd64_pvt *pvt = mci->pvt_info; | ||
82 | unsigned long value; | ||
83 | int ret = 0; | ||
84 | |||
85 | ret = strict_strtoul(data, 16, &value); | ||
86 | if (ret != -EINVAL) { | ||
87 | debugf0("received NBSH= 0x%lx\n", value); | ||
88 | |||
89 | pvt->ctl_error_info.nbsh = (u32) value; | ||
90 | |||
91 | return count; | ||
92 | } | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | /* display back what the last NBSH value written */ | ||
97 | static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data) | ||
98 | { | ||
99 | struct amd64_pvt *pvt = mci->pvt_info; | ||
100 | u32 value; | ||
101 | |||
102 | value = pvt->ctl_error_info.nbsh; | ||
103 | |||
104 | return sprintf(data, "%x\n", value); | ||
105 | } | 8 | } |
106 | 9 | ||
107 | /* accept and store the NBCFG (MCA NB Configuration) value user desires */ | 10 | EDAC_DCT_ATTR_SHOW(dhar); |
108 | static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci, | 11 | EDAC_DCT_ATTR_SHOW(dbam0); |
109 | const char *data, size_t count) | 12 | EDAC_DCT_ATTR_SHOW(top_mem); |
110 | { | 13 | EDAC_DCT_ATTR_SHOW(top_mem2); |
111 | struct amd64_pvt *pvt = mci->pvt_info; | ||
112 | unsigned long value; | ||
113 | int ret = 0; | ||
114 | |||
115 | ret = strict_strtoul(data, 16, &value); | ||
116 | if (ret != -EINVAL) { | ||
117 | debugf0("received NBCFG= 0x%lx\n", value); | ||
118 | |||
119 | pvt->ctl_error_info.nbcfg = (u32) value; | ||
120 | |||
121 | return count; | ||
122 | } | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | /* various show routines for the controls of a MCI */ | ||
127 | static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data) | ||
128 | { | ||
129 | struct amd64_pvt *pvt = mci->pvt_info; | ||
130 | |||
131 | return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg); | ||
132 | } | ||
133 | |||
134 | |||
135 | static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data) | ||
136 | { | ||
137 | struct amd64_pvt *pvt = mci->pvt_info; | ||
138 | |||
139 | return sprintf(data, "%x\n", pvt->dhar); | ||
140 | } | ||
141 | |||
142 | |||
143 | static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data) | ||
144 | { | ||
145 | struct amd64_pvt *pvt = mci->pvt_info; | ||
146 | |||
147 | return sprintf(data, "%x\n", pvt->dbam0); | ||
148 | } | ||
149 | |||
150 | |||
151 | static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data) | ||
152 | { | ||
153 | struct amd64_pvt *pvt = mci->pvt_info; | ||
154 | |||
155 | return sprintf(data, "%llx\n", pvt->top_mem); | ||
156 | } | ||
157 | |||
158 | |||
159 | static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data) | ||
160 | { | ||
161 | struct amd64_pvt *pvt = mci->pvt_info; | ||
162 | |||
163 | return sprintf(data, "%llx\n", pvt->top_mem2); | ||
164 | } | ||
165 | 14 | ||
166 | static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) | 15 | static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) |
167 | { | 16 | { |
@@ -182,38 +31,6 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { | |||
182 | 31 | ||
183 | { | 32 | { |
184 | .attr = { | 33 | .attr = { |
185 | .name = "nbea_ctl", | ||
186 | .mode = (S_IRUGO | S_IWUSR) | ||
187 | }, | ||
188 | .show = amd64_nbea_show, | ||
189 | .store = amd64_nbea_store, | ||
190 | }, | ||
191 | { | ||
192 | .attr = { | ||
193 | .name = "nbsl_ctl", | ||
194 | .mode = (S_IRUGO | S_IWUSR) | ||
195 | }, | ||
196 | .show = amd64_nbsl_show, | ||
197 | .store = amd64_nbsl_store, | ||
198 | }, | ||
199 | { | ||
200 | .attr = { | ||
201 | .name = "nbsh_ctl", | ||
202 | .mode = (S_IRUGO | S_IWUSR) | ||
203 | }, | ||
204 | .show = amd64_nbsh_show, | ||
205 | .store = amd64_nbsh_store, | ||
206 | }, | ||
207 | { | ||
208 | .attr = { | ||
209 | .name = "nbcfg_ctl", | ||
210 | .mode = (S_IRUGO | S_IWUSR) | ||
211 | }, | ||
212 | .show = amd64_nbcfg_show, | ||
213 | .store = amd64_nbcfg_store, | ||
214 | }, | ||
215 | { | ||
216 | .attr = { | ||
217 | .name = "dhar", | 34 | .name = "dhar", |
218 | .mode = (S_IRUGO) | 35 | .mode = (S_IRUGO) |
219 | }, | 36 | }, |
@@ -225,7 +42,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { | |||
225 | .name = "dbam", | 42 | .name = "dbam", |
226 | .mode = (S_IRUGO) | 43 | .mode = (S_IRUGO) |
227 | }, | 44 | }, |
228 | .show = amd64_dbam_show, | 45 | .show = amd64_dbam0_show, |
229 | .store = NULL, | 46 | .store = NULL, |
230 | }, | 47 | }, |
231 | { | 48 | { |
@@ -233,7 +50,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { | |||
233 | .name = "topmem", | 50 | .name = "topmem", |
234 | .mode = (S_IRUGO) | 51 | .mode = (S_IRUGO) |
235 | }, | 52 | }, |
236 | .show = amd64_topmem_show, | 53 | .show = amd64_top_mem_show, |
237 | .store = NULL, | 54 | .store = NULL, |
238 | }, | 55 | }, |
239 | { | 56 | { |
@@ -241,7 +58,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { | |||
241 | .name = "topmem2", | 58 | .name = "topmem2", |
242 | .mode = (S_IRUGO) | 59 | .mode = (S_IRUGO) |
243 | }, | 60 | }, |
244 | .show = amd64_topmem2_show, | 61 | .show = amd64_top_mem2_show, |
245 | .store = NULL, | 62 | .store = NULL, |
246 | }, | 63 | }, |
247 | { | 64 | { |
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index 29f1f7a612d9..303f10e03dda 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c | |||
@@ -23,9 +23,7 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, | |||
23 | if (ret != -EINVAL) { | 23 | if (ret != -EINVAL) { |
24 | 24 | ||
25 | if (value > 3) { | 25 | if (value > 3) { |
26 | amd64_printk(KERN_WARNING, | 26 | amd64_warn("%s: invalid section 0x%lx\n", __func__, value); |
27 | "%s: invalid section 0x%lx\n", | ||
28 | __func__, value); | ||
29 | return -EINVAL; | 27 | return -EINVAL; |
30 | } | 28 | } |
31 | 29 | ||
@@ -58,9 +56,7 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, | |||
58 | if (ret != -EINVAL) { | 56 | if (ret != -EINVAL) { |
59 | 57 | ||
60 | if (value > 8) { | 58 | if (value > 8) { |
61 | amd64_printk(KERN_WARNING, | 59 | amd64_warn("%s: invalid word 0x%lx\n", __func__, value); |
62 | "%s: invalid word 0x%lx\n", | ||
63 | __func__, value); | ||
64 | return -EINVAL; | 60 | return -EINVAL; |
65 | } | 61 | } |
66 | 62 | ||
@@ -92,9 +88,8 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, | |||
92 | if (ret != -EINVAL) { | 88 | if (ret != -EINVAL) { |
93 | 89 | ||
94 | if (value & 0xFFFF0000) { | 90 | if (value & 0xFFFF0000) { |
95 | amd64_printk(KERN_WARNING, | 91 | amd64_warn("%s: invalid EccVector: 0x%lx\n", |
96 | "%s: invalid EccVector: 0x%lx\n", | 92 | __func__, value); |
97 | __func__, value); | ||
98 | return -EINVAL; | 93 | return -EINVAL; |
99 | } | 94 | } |
100 | 95 | ||
@@ -122,15 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, | |||
122 | /* Form value to choose 16-byte section of cacheline */ | 117 | /* Form value to choose 16-byte section of cacheline */ |
123 | section = F10_NB_ARRAY_DRAM_ECC | | 118 | section = F10_NB_ARRAY_DRAM_ECC | |
124 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); | 119 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); |
125 | pci_write_config_dword(pvt->misc_f3_ctl, | 120 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); |
126 | F10_NB_ARRAY_ADDR, section); | ||
127 | 121 | ||
128 | word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, | 122 | word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, |
129 | pvt->injection.bit_map); | 123 | pvt->injection.bit_map); |
130 | 124 | ||
131 | /* Issue 'word' and 'bit' along with the READ request */ | 125 | /* Issue 'word' and 'bit' along with the READ request */ |
132 | pci_write_config_dword(pvt->misc_f3_ctl, | 126 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); |
133 | F10_NB_ARRAY_DATA, word_bits); | ||
134 | 127 | ||
135 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); | 128 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); |
136 | 129 | ||
@@ -157,15 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, | |||
157 | /* Form value to choose 16-byte section of cacheline */ | 150 | /* Form value to choose 16-byte section of cacheline */ |
158 | section = F10_NB_ARRAY_DRAM_ECC | | 151 | section = F10_NB_ARRAY_DRAM_ECC | |
159 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); | 152 | SET_NB_ARRAY_ADDRESS(pvt->injection.section); |
160 | pci_write_config_dword(pvt->misc_f3_ctl, | 153 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); |
161 | F10_NB_ARRAY_ADDR, section); | ||
162 | 154 | ||
163 | word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, | 155 | word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, |
164 | pvt->injection.bit_map); | 156 | pvt->injection.bit_map); |
165 | 157 | ||
166 | /* Issue 'word' and 'bit' along with the READ request */ | 158 | /* Issue 'word' and 'bit' along with the READ request */ |
167 | pci_write_config_dword(pvt->misc_f3_ctl, | 159 | amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); |
168 | F10_NB_ARRAY_DATA, word_bits); | ||
169 | 160 | ||
170 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); | 161 | debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); |
171 | 162 | ||
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index cace0a7b707a..e47e73bbbcc5 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/edac.h> | 19 | #include <linux/edac.h> |
20 | #include "edac_core.h" | 20 | #include "edac_core.h" |
21 | 21 | ||
22 | #define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ | 22 | #define AMD76X_REVISION " Ver: 2.0.2" |
23 | #define EDAC_MOD_STR "amd76x_edac" | 23 | #define EDAC_MOD_STR "amd76x_edac" |
24 | 24 | ||
25 | #define amd76x_printk(level, fmt, arg...) \ | 25 | #define amd76x_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c index 35b78d04bbfa..ddd890052ce2 100644 --- a/drivers/edac/amd8111_edac.c +++ b/drivers/edac/amd8111_edac.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "edac_module.h" | 33 | #include "edac_module.h" |
34 | #include "amd8111_edac.h" | 34 | #include "amd8111_edac.h" |
35 | 35 | ||
36 | #define AMD8111_EDAC_REVISION " Ver: 1.0.0 " __DATE__ | 36 | #define AMD8111_EDAC_REVISION " Ver: 1.0.0" |
37 | #define AMD8111_EDAC_MOD_STR "amd8111_edac" | 37 | #define AMD8111_EDAC_MOD_STR "amd8111_edac" |
38 | 38 | ||
39 | #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 | 39 | #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 |
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c index b432d60c622a..a5c680561c73 100644 --- a/drivers/edac/amd8131_edac.c +++ b/drivers/edac/amd8131_edac.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "edac_module.h" | 33 | #include "edac_module.h" |
34 | #include "amd8131_edac.h" | 34 | #include "amd8131_edac.h" |
35 | 35 | ||
36 | #define AMD8131_EDAC_REVISION " Ver: 1.0.0 " __DATE__ | 36 | #define AMD8131_EDAC_REVISION " Ver: 1.0.0" |
37 | #define AMD8131_EDAC_MOD_STR "amd8131_edac" | 37 | #define AMD8131_EDAC_MOD_STR "amd8131_edac" |
38 | 38 | ||
39 | /* Wrapper functions for accessing PCI configuration space */ | 39 | /* Wrapper functions for accessing PCI configuration space */ |
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h index 60e0d1c72dee..6f8b07131ec4 100644 --- a/drivers/edac/amd8131_edac.h +++ b/drivers/edac/amd8131_edac.h | |||
@@ -99,7 +99,7 @@ struct amd8131_dev_info { | |||
99 | 99 | ||
100 | /* | 100 | /* |
101 | * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC | 101 | * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC |
102 | * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are | 102 | * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are |
103 | * four PCIX Bridges on ATCA-6101 altogether. | 103 | * four PCIX Bridges on ATCA-6101 altogether. |
104 | * | 104 | * |
105 | * These PCIX Bridges share the same PCI Device ID and are all of | 105 | * These PCIX Bridges share the same PCI Device ID and are all of |
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index c973004c002c..db1df59ae2b6 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c | |||
@@ -47,7 +47,7 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) | |||
47 | offset = address & ~PAGE_MASK; | 47 | offset = address & ~PAGE_MASK; |
48 | syndrome = (ar & 0x000000001fe00000ul) >> 21; | 48 | syndrome = (ar & 0x000000001fe00000ul) >> 21; |
49 | 49 | ||
50 | /* TODO: Decoding of the error addresss */ | 50 | /* TODO: Decoding of the error address */ |
51 | edac_mc_handle_ce(mci, csrow->first_page + pfn, offset, | 51 | edac_mc_handle_ce(mci, csrow->first_page + pfn, offset, |
52 | syndrome, 0, chan, ""); | 52 | syndrome, 0, chan, ""); |
53 | } | 53 | } |
@@ -68,7 +68,7 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) | |||
68 | pfn = address >> PAGE_SHIFT; | 68 | pfn = address >> PAGE_SHIFT; |
69 | offset = address & ~PAGE_MASK; | 69 | offset = address & ~PAGE_MASK; |
70 | 70 | ||
71 | /* TODO: Decoding of the error addresss */ | 71 | /* TODO: Decoding of the error address */ |
72 | edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, ""); | 72 | edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, ""); |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index 1609a19df495..a687a0d16962 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include "edac_core.h" | 30 | #include "edac_core.h" |
31 | #include "edac_module.h" | 31 | #include "edac_module.h" |
32 | 32 | ||
33 | #define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__ | 33 | #define CPC925_EDAC_REVISION " Ver: 1.0.0" |
34 | #define CPC925_EDAC_MOD_STR "cpc925_edac" | 34 | #define CPC925_EDAC_MOD_STR "cpc925_edac" |
35 | 35 | ||
36 | #define cpc925_printk(level, fmt, arg...) \ | 36 | #define cpc925_printk(level, fmt, arg...) \ |
@@ -817,10 +817,11 @@ static void cpc925_del_edac_devices(void) | |||
817 | } | 817 | } |
818 | } | 818 | } |
819 | 819 | ||
820 | /* Convert current back-ground scrub rate into byte/sec bandwith */ | 820 | /* Convert current back-ground scrub rate into byte/sec bandwidth */ |
821 | static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | 821 | static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) |
822 | { | 822 | { |
823 | struct cpc925_mc_pdata *pdata = mci->pvt_info; | 823 | struct cpc925_mc_pdata *pdata = mci->pvt_info; |
824 | int bw; | ||
824 | u32 mscr; | 825 | u32 mscr; |
825 | u8 si; | 826 | u8 si; |
826 | 827 | ||
@@ -832,11 +833,11 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |||
832 | if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || | 833 | if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || |
833 | (si == 0)) { | 834 | (si == 0)) { |
834 | cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); | 835 | cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); |
835 | *bw = 0; | 836 | bw = 0; |
836 | } else | 837 | } else |
837 | *bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; | 838 | bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; |
838 | 839 | ||
839 | return 0; | 840 | return bw; |
840 | } | 841 | } |
841 | 842 | ||
842 | /* Return 0 for single channel; 1 for dual channel */ | 843 | /* Return 0 for single channel; 1 for dual channel */ |
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 073f5a06d238..1af531a11d21 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/edac.h> | 24 | #include <linux/edac.h> |
25 | #include "edac_core.h" | 25 | #include "edac_core.h" |
26 | 26 | ||
27 | #define E752X_REVISION " Ver: 2.0.2 " __DATE__ | 27 | #define E752X_REVISION " Ver: 2.0.2" |
28 | #define EDAC_MOD_STR "e752x_edac" | 28 | #define EDAC_MOD_STR "e752x_edac" |
29 | 29 | ||
30 | static int report_non_memory_errors; | 30 | static int report_non_memory_errors; |
@@ -983,11 +983,11 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) | |||
983 | 983 | ||
984 | pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); | 984 | pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); |
985 | 985 | ||
986 | return 0; | 986 | return scrubrates[i].bandwidth; |
987 | } | 987 | } |
988 | 988 | ||
989 | /* Convert current scrub rate value into byte/sec bandwidth */ | 989 | /* Convert current scrub rate value into byte/sec bandwidth */ |
990 | static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | 990 | static int get_sdram_scrub_rate(struct mem_ctl_info *mci) |
991 | { | 991 | { |
992 | const struct scrubrate *scrubrates; | 992 | const struct scrubrate *scrubrates; |
993 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 993 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
@@ -1013,10 +1013,8 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |||
1013 | "Invalid sdram scrub control value: 0x%x\n", scrubval); | 1013 | "Invalid sdram scrub control value: 0x%x\n", scrubval); |
1014 | return -1; | 1014 | return -1; |
1015 | } | 1015 | } |
1016 | return scrubrates[i].bandwidth; | ||
1016 | 1017 | ||
1017 | *bw = scrubrates[i].bandwidth; | ||
1018 | |||
1019 | return 0; | ||
1020 | } | 1018 | } |
1021 | 1019 | ||
1022 | /* Return 1 if dual channel mode is active. Else return 0. */ | 1020 | /* Return 1 if dual channel mode is active. Else return 0. */ |
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 1731d7245816..6ffb6d23281f 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/edac.h> | 29 | #include <linux/edac.h> |
30 | #include "edac_core.h" | 30 | #include "edac_core.h" |
31 | 31 | ||
32 | #define E7XXX_REVISION " Ver: 2.0.2 " __DATE__ | 32 | #define E7XXX_REVISION " Ver: 2.0.2" |
33 | #define EDAC_MOD_STR "e7xxx_edac" | 33 | #define EDAC_MOD_STR "e7xxx_edac" |
34 | 34 | ||
35 | #define e7xxx_printk(level, fmt, arg...) \ | 35 | #define e7xxx_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index ce7146677e9b..55b8278bb172 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -41,9 +41,11 @@ | |||
41 | #define MC_PROC_NAME_MAX_LEN 7 | 41 | #define MC_PROC_NAME_MAX_LEN 7 |
42 | 42 | ||
43 | #if PAGE_SHIFT < 20 | 43 | #if PAGE_SHIFT < 20 |
44 | #define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) ) | 44 | #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) |
45 | #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) | ||
45 | #else /* PAGE_SHIFT > 20 */ | 46 | #else /* PAGE_SHIFT > 20 */ |
46 | #define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) | 47 | #define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20)) |
48 | #define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20)) | ||
47 | #endif | 49 | #endif |
48 | 50 | ||
49 | #define edac_printk(level, prefix, fmt, arg...) \ | 51 | #define edac_printk(level, prefix, fmt, arg...) \ |
@@ -66,9 +68,10 @@ | |||
66 | #define EDAC_PCI "PCI" | 68 | #define EDAC_PCI "PCI" |
67 | #define EDAC_DEBUG "DEBUG" | 69 | #define EDAC_DEBUG "DEBUG" |
68 | 70 | ||
71 | extern const char *edac_mem_types[]; | ||
72 | |||
69 | #ifdef CONFIG_EDAC_DEBUG | 73 | #ifdef CONFIG_EDAC_DEBUG |
70 | extern int edac_debug_level; | 74 | extern int edac_debug_level; |
71 | extern const char *edac_mem_types[]; | ||
72 | 75 | ||
73 | #define edac_debug_printk(level, fmt, arg...) \ | 76 | #define edac_debug_printk(level, fmt, arg...) \ |
74 | do { \ | 77 | do { \ |
@@ -161,7 +164,7 @@ enum mem_type { | |||
161 | /* chipset Error Detection and Correction capabilities and mode */ | 164 | /* chipset Error Detection and Correction capabilities and mode */ |
162 | enum edac_type { | 165 | enum edac_type { |
163 | EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ | 166 | EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ |
164 | EDAC_NONE, /* Doesnt support ECC */ | 167 | EDAC_NONE, /* Doesn't support ECC */ |
165 | EDAC_RESERVED, /* Reserved ECC type */ | 168 | EDAC_RESERVED, /* Reserved ECC type */ |
166 | EDAC_PARITY, /* Detects parity errors */ | 169 | EDAC_PARITY, /* Detects parity errors */ |
167 | EDAC_EC, /* Error Checking - no correction */ | 170 | EDAC_EC, /* Error Checking - no correction */ |
@@ -230,7 +233,7 @@ enum scrub_type { | |||
230 | * of these in parallel provides 64 bits which is common | 233 | * of these in parallel provides 64 bits which is common |
231 | * for a memory stick. | 234 | * for a memory stick. |
232 | * | 235 | * |
233 | * Memory Stick: A printed circuit board that agregates multiple | 236 | * Memory Stick: A printed circuit board that aggregates multiple |
234 | * memory devices in parallel. This is the atomic | 237 | * memory devices in parallel. This is the atomic |
235 | * memory component that is purchaseable by Joe consumer | 238 | * memory component that is purchaseable by Joe consumer |
236 | * and loaded into a memory socket. | 239 | * and loaded into a memory socket. |
@@ -256,7 +259,7 @@ enum scrub_type { | |||
256 | * for single channel are 64 bits, for dual channel 128 | 259 | * for single channel are 64 bits, for dual channel 128 |
257 | * bits. | 260 | * bits. |
258 | * | 261 | * |
259 | * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory. | 262 | * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. |
260 | * Motherboards commonly drive two chip-select pins to | 263 | * Motherboards commonly drive two chip-select pins to |
261 | * a memory stick. A single-ranked stick, will occupy | 264 | * a memory stick. A single-ranked stick, will occupy |
262 | * only one of those rows. The other will be unused. | 265 | * only one of those rows. The other will be unused. |
@@ -328,7 +331,7 @@ struct csrow_info { | |||
328 | 331 | ||
329 | struct mcidev_sysfs_group { | 332 | struct mcidev_sysfs_group { |
330 | const char *name; /* group name */ | 333 | const char *name; /* group name */ |
331 | struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */ | 334 | const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */ |
332 | }; | 335 | }; |
333 | 336 | ||
334 | struct mcidev_sysfs_group_kobj { | 337 | struct mcidev_sysfs_group_kobj { |
@@ -336,7 +339,7 @@ struct mcidev_sysfs_group_kobj { | |||
336 | 339 | ||
337 | struct kobject kobj; /* kobj for the group */ | 340 | struct kobject kobj; /* kobj for the group */ |
338 | 341 | ||
339 | struct mcidev_sysfs_group *grp; /* group description table */ | 342 | const struct mcidev_sysfs_group *grp; /* group description table */ |
340 | struct mem_ctl_info *mci; /* the parent */ | 343 | struct mem_ctl_info *mci; /* the parent */ |
341 | }; | 344 | }; |
342 | 345 | ||
@@ -347,7 +350,7 @@ struct mcidev_sysfs_group_kobj { | |||
347 | struct mcidev_sysfs_attribute { | 350 | struct mcidev_sysfs_attribute { |
348 | /* It should use either attr or grp */ | 351 | /* It should use either attr or grp */ |
349 | struct attribute attr; | 352 | struct attribute attr; |
350 | struct mcidev_sysfs_group *grp; /* Points to a group of attributes */ | 353 | const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */ |
351 | 354 | ||
352 | /* Ops for show/store values at the attribute - not used on group */ | 355 | /* Ops for show/store values at the attribute - not used on group */ |
353 | ssize_t (*show)(struct mem_ctl_info *,char *); | 356 | ssize_t (*show)(struct mem_ctl_info *,char *); |
@@ -382,9 +385,9 @@ struct mem_ctl_info { | |||
382 | 385 | ||
383 | /* Get the current sdram memory scrub rate from the internal | 386 | /* Get the current sdram memory scrub rate from the internal |
384 | representation and converts it to the closest matching | 387 | representation and converts it to the closest matching |
385 | bandwith in bytes/sec. | 388 | bandwidth in bytes/sec. |
386 | */ | 389 | */ |
387 | int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); | 390 | int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); |
388 | 391 | ||
389 | 392 | ||
390 | /* pointer to edac checking routine */ | 393 | /* pointer to edac checking routine */ |
@@ -418,10 +421,6 @@ struct mem_ctl_info { | |||
418 | u32 ce_count; /* Total Correctable Errors for this MC */ | 421 | u32 ce_count; /* Total Correctable Errors for this MC */ |
419 | unsigned long start_time; /* mci load start time (in jiffies) */ | 422 | unsigned long start_time; /* mci load start time (in jiffies) */ |
420 | 423 | ||
421 | /* this stuff is for safe removal of mc devices from global list while | ||
422 | * NMI handlers may be traversing list | ||
423 | */ | ||
424 | struct rcu_head rcu; | ||
425 | struct completion complete; | 424 | struct completion complete; |
426 | 425 | ||
427 | /* edac sysfs device control */ | 426 | /* edac sysfs device control */ |
@@ -440,7 +439,7 @@ struct mem_ctl_info { | |||
440 | * If attributes are desired, then set to array of attributes | 439 | * If attributes are desired, then set to array of attributes |
441 | * If no attributes are desired, leave NULL | 440 | * If no attributes are desired, leave NULL |
442 | */ | 441 | */ |
443 | struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; | 442 | const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; |
444 | 443 | ||
445 | /* work struct for this MC */ | 444 | /* work struct for this MC */ |
446 | struct delayed_work work; | 445 | struct delayed_work work; |
@@ -617,10 +616,6 @@ struct edac_device_ctl_info { | |||
617 | 616 | ||
618 | unsigned long start_time; /* edac_device load start time (jiffies) */ | 617 | unsigned long start_time; /* edac_device load start time (jiffies) */ |
619 | 618 | ||
620 | /* these are for safe removal of mc devices from global list while | ||
621 | * NMI handlers may be traversing list | ||
622 | */ | ||
623 | struct rcu_head rcu; | ||
624 | struct completion removal_complete; | 619 | struct completion removal_complete; |
625 | 620 | ||
626 | /* sysfs top name under 'edac' directory | 621 | /* sysfs top name under 'edac' directory |
@@ -719,10 +714,6 @@ struct edac_pci_ctl_info { | |||
719 | 714 | ||
720 | unsigned long start_time; /* edac_pci load start time (jiffies) */ | 715 | unsigned long start_time; /* edac_pci load start time (jiffies) */ |
721 | 716 | ||
722 | /* these are for safe removal of devices from global list while | ||
723 | * NMI handlers may be traversing list | ||
724 | */ | ||
725 | struct rcu_head rcu; | ||
726 | struct completion complete; | 717 | struct completion complete; |
727 | 718 | ||
728 | /* sysfs top name under 'edac' directory | 719 | /* sysfs top name under 'edac' directory |
@@ -810,6 +801,7 @@ extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, | |||
810 | extern int edac_mc_add_mc(struct mem_ctl_info *mci); | 801 | extern int edac_mc_add_mc(struct mem_ctl_info *mci); |
811 | extern void edac_mc_free(struct mem_ctl_info *mci); | 802 | extern void edac_mc_free(struct mem_ctl_info *mci); |
812 | extern struct mem_ctl_info *edac_mc_find(int idx); | 803 | extern struct mem_ctl_info *edac_mc_find(int idx); |
804 | extern struct mem_ctl_info *find_mci_by_dev(struct device *dev); | ||
813 | extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); | 805 | extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); |
814 | extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, | 806 | extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, |
815 | unsigned long page); | 807 | unsigned long page); |
@@ -819,7 +811,7 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, | |||
819 | * There are a limited number of error logging registers that can | 811 | * There are a limited number of error logging registers that can |
820 | * be exausted. When all registers are exhausted and an additional | 812 | * be exausted. When all registers are exhausted and an additional |
821 | * error occurs then an error overflow register records that an | 813 | * error occurs then an error overflow register records that an |
822 | * error occured and the type of error, but doesn't have any | 814 | * error occurred and the type of error, but doesn't have any |
823 | * further information. The ce/ue versions make for cleaner | 815 | * further information. The ce/ue versions make for cleaner |
824 | * reporting logic and function interface - reduces conditional | 816 | * reporting logic and function interface - reduces conditional |
825 | * statement clutter and extra function arguments. | 817 | * statement clutter and extra function arguments. |
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index d5e13c94714f..c3f67437afb6 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -346,30 +346,18 @@ fail1: | |||
346 | } | 346 | } |
347 | 347 | ||
348 | /* | 348 | /* |
349 | * complete_edac_device_list_del | ||
350 | * | ||
351 | * callback function when reference count is zero | ||
352 | */ | ||
353 | static void complete_edac_device_list_del(struct rcu_head *head) | ||
354 | { | ||
355 | struct edac_device_ctl_info *edac_dev; | ||
356 | |||
357 | edac_dev = container_of(head, struct edac_device_ctl_info, rcu); | ||
358 | INIT_LIST_HEAD(&edac_dev->link); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * del_edac_device_from_global_list | 349 | * del_edac_device_from_global_list |
363 | * | ||
364 | * remove the RCU, setup for a callback call, | ||
365 | * then wait for the callback to occur | ||
366 | */ | 350 | */ |
367 | static void del_edac_device_from_global_list(struct edac_device_ctl_info | 351 | static void del_edac_device_from_global_list(struct edac_device_ctl_info |
368 | *edac_device) | 352 | *edac_device) |
369 | { | 353 | { |
370 | list_del_rcu(&edac_device->link); | 354 | list_del_rcu(&edac_device->link); |
371 | call_rcu(&edac_device->rcu, complete_edac_device_list_del); | 355 | |
372 | rcu_barrier(); | 356 | /* these are for safe removal of devices from global list while |
357 | * NMI handlers may be traversing list | ||
358 | */ | ||
359 | synchronize_rcu(); | ||
360 | INIT_LIST_HEAD(&edac_device->link); | ||
373 | } | 361 | } |
374 | 362 | ||
375 | /* | 363 | /* |
@@ -672,7 +660,7 @@ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, | |||
672 | block->counters.ce_count++; | 660 | block->counters.ce_count++; |
673 | } | 661 | } |
674 | 662 | ||
675 | /* Propogate the count up the 'totals' tree */ | 663 | /* Propagate the count up the 'totals' tree */ |
676 | instance->counters.ce_count++; | 664 | instance->counters.ce_count++; |
677 | edac_dev->counters.ce_count++; | 665 | edac_dev->counters.ce_count++; |
678 | 666 | ||
@@ -718,7 +706,7 @@ void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, | |||
718 | block->counters.ue_count++; | 706 | block->counters.ue_count++; |
719 | } | 707 | } |
720 | 708 | ||
721 | /* Propogate the count up the 'totals' tree */ | 709 | /* Propagate the count up the 'totals' tree */ |
722 | instance->counters.ue_count++; | 710 | instance->counters.ue_count++; |
723 | edac_dev->counters.ue_count++; | 711 | edac_dev->counters.ue_count++; |
724 | 712 | ||
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 070968178a24..86649df00285 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * file for managing the edac_device class of devices for EDAC | 2 | * file for managing the edac_device class of devices for EDAC |
3 | * | 3 | * |
4 | * (C) 2007 SoftwareBitMaker (http://www.softwarebitmaker.com) | 4 | * (C) 2007 SoftwareBitMaker |
5 | * | 5 | * |
6 | * This file may be distributed under the terms of the | 6 | * This file may be distributed under the terms of the |
7 | * GNU General Public License. | 7 | * GNU General Public License. |
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/ctype.h> | 13 | #include <linux/ctype.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/edac.h> | ||
16 | 17 | ||
17 | #include "edac_core.h" | 18 | #include "edac_core.h" |
18 | #include "edac_module.h" | 19 | #include "edac_module.h" |
@@ -235,7 +236,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) | |||
235 | debugf1("%s()\n", __func__); | 236 | debugf1("%s()\n", __func__); |
236 | 237 | ||
237 | /* get the /sys/devices/system/edac reference */ | 238 | /* get the /sys/devices/system/edac reference */ |
238 | edac_class = edac_get_edac_class(); | 239 | edac_class = edac_get_sysfs_class(); |
239 | if (edac_class == NULL) { | 240 | if (edac_class == NULL) { |
240 | debugf1("%s() no edac_class error\n", __func__); | 241 | debugf1("%s() no edac_class error\n", __func__); |
241 | err = -ENODEV; | 242 | err = -ENODEV; |
@@ -255,7 +256,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) | |||
255 | 256 | ||
256 | if (!try_module_get(edac_dev->owner)) { | 257 | if (!try_module_get(edac_dev->owner)) { |
257 | err = -ENODEV; | 258 | err = -ENODEV; |
258 | goto err_out; | 259 | goto err_mod_get; |
259 | } | 260 | } |
260 | 261 | ||
261 | /* register */ | 262 | /* register */ |
@@ -282,6 +283,9 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) | |||
282 | err_kobj_reg: | 283 | err_kobj_reg: |
283 | module_put(edac_dev->owner); | 284 | module_put(edac_dev->owner); |
284 | 285 | ||
286 | err_mod_get: | ||
287 | edac_put_sysfs_class(); | ||
288 | |||
285 | err_out: | 289 | err_out: |
286 | return err; | 290 | return err; |
287 | } | 291 | } |
@@ -290,12 +294,11 @@ err_out: | |||
290 | * edac_device_unregister_sysfs_main_kobj: | 294 | * edac_device_unregister_sysfs_main_kobj: |
291 | * the '..../edac/<name>' kobject | 295 | * the '..../edac/<name>' kobject |
292 | */ | 296 | */ |
293 | void edac_device_unregister_sysfs_main_kobj( | 297 | void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) |
294 | struct edac_device_ctl_info *edac_dev) | ||
295 | { | 298 | { |
296 | debugf0("%s()\n", __func__); | 299 | debugf0("%s()\n", __func__); |
297 | debugf4("%s() name of kobject is: %s\n", | 300 | debugf4("%s() name of kobject is: %s\n", |
298 | __func__, kobject_name(&edac_dev->kobj)); | 301 | __func__, kobject_name(&dev->kobj)); |
299 | 302 | ||
300 | /* | 303 | /* |
301 | * Unregister the edac device's kobject and | 304 | * Unregister the edac device's kobject and |
@@ -304,7 +307,8 @@ void edac_device_unregister_sysfs_main_kobj( | |||
304 | * a) module_put() this module | 307 | * a) module_put() this module |
305 | * b) 'kfree' the memory | 308 | * b) 'kfree' the memory |
306 | */ | 309 | */ |
307 | kobject_put(&edac_dev->kobj); | 310 | kobject_put(&dev->kobj); |
311 | edac_put_sysfs_class(); | ||
308 | } | 312 | } |
309 | 313 | ||
310 | /* edac_dev -> instance information */ | 314 | /* edac_dev -> instance information */ |
@@ -529,7 +533,7 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, | |||
529 | memset(&block->kobj, 0, sizeof(struct kobject)); | 533 | memset(&block->kobj, 0, sizeof(struct kobject)); |
530 | 534 | ||
531 | /* bump the main kobject's reference count for this controller | 535 | /* bump the main kobject's reference count for this controller |
532 | * and this instance is dependant on the main | 536 | * and this instance is dependent on the main |
533 | */ | 537 | */ |
534 | main_kobj = kobject_get(&edac_dev->kobj); | 538 | main_kobj = kobject_get(&edac_dev->kobj); |
535 | if (!main_kobj) { | 539 | if (!main_kobj) { |
@@ -631,7 +635,7 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev, | |||
631 | instance->ctl = edac_dev; | 635 | instance->ctl = edac_dev; |
632 | 636 | ||
633 | /* bump the main kobject's reference count for this controller | 637 | /* bump the main kobject's reference count for this controller |
634 | * and this instance is dependant on the main | 638 | * and this instance is dependent on the main |
635 | */ | 639 | */ |
636 | main_kobj = kobject_get(&edac_dev->kobj); | 640 | main_kobj = kobject_get(&edac_dev->kobj); |
637 | if (!main_kobj) { | 641 | if (!main_kobj) { |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 6b21e25f7a84..d69144a09043 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -76,6 +76,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci) | |||
76 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); | 76 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); |
77 | } | 77 | } |
78 | 78 | ||
79 | #endif /* CONFIG_EDAC_DEBUG */ | ||
80 | |||
79 | /* | 81 | /* |
80 | * keep those in sync with the enum mem_type | 82 | * keep those in sync with the enum mem_type |
81 | */ | 83 | */ |
@@ -100,8 +102,6 @@ const char *edac_mem_types[] = { | |||
100 | }; | 102 | }; |
101 | EXPORT_SYMBOL_GPL(edac_mem_types); | 103 | EXPORT_SYMBOL_GPL(edac_mem_types); |
102 | 104 | ||
103 | #endif /* CONFIG_EDAC_DEBUG */ | ||
104 | |||
105 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. | 105 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. |
106 | * Adjust 'ptr' so that its alignment is at least as stringent as what the | 106 | * Adjust 'ptr' so that its alignment is at least as stringent as what the |
107 | * compiler would provide for X and return the aligned result. | 107 | * compiler would provide for X and return the aligned result. |
@@ -207,6 +207,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, | |||
207 | } | 207 | } |
208 | 208 | ||
209 | mci->op_state = OP_ALLOC; | 209 | mci->op_state = OP_ALLOC; |
210 | INIT_LIST_HEAD(&mci->grp_kobj_list); | ||
210 | 211 | ||
211 | /* | 212 | /* |
212 | * Initialize the 'root' kobj for the edac_mc controller | 213 | * Initialize the 'root' kobj for the edac_mc controller |
@@ -234,18 +235,24 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc); | |||
234 | */ | 235 | */ |
235 | void edac_mc_free(struct mem_ctl_info *mci) | 236 | void edac_mc_free(struct mem_ctl_info *mci) |
236 | { | 237 | { |
238 | debugf1("%s()\n", __func__); | ||
239 | |||
237 | edac_mc_unregister_sysfs_main_kobj(mci); | 240 | edac_mc_unregister_sysfs_main_kobj(mci); |
241 | |||
242 | /* free the mci instance memory here */ | ||
243 | kfree(mci); | ||
238 | } | 244 | } |
239 | EXPORT_SYMBOL_GPL(edac_mc_free); | 245 | EXPORT_SYMBOL_GPL(edac_mc_free); |
240 | 246 | ||
241 | 247 | ||
242 | /* | 248 | /** |
243 | * find_mci_by_dev | 249 | * find_mci_by_dev |
244 | * | 250 | * |
245 | * scan list of controllers looking for the one that manages | 251 | * scan list of controllers looking for the one that manages |
246 | * the 'dev' device | 252 | * the 'dev' device |
253 | * @dev: pointer to a struct device related with the MCI | ||
247 | */ | 254 | */ |
248 | static struct mem_ctl_info *find_mci_by_dev(struct device *dev) | 255 | struct mem_ctl_info *find_mci_by_dev(struct device *dev) |
249 | { | 256 | { |
250 | struct mem_ctl_info *mci; | 257 | struct mem_ctl_info *mci; |
251 | struct list_head *item; | 258 | struct list_head *item; |
@@ -261,6 +268,7 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev) | |||
261 | 268 | ||
262 | return NULL; | 269 | return NULL; |
263 | } | 270 | } |
271 | EXPORT_SYMBOL_GPL(find_mci_by_dev); | ||
264 | 272 | ||
265 | /* | 273 | /* |
266 | * handler for EDAC to check if NMI type handler has asserted interrupt | 274 | * handler for EDAC to check if NMI type handler has asserted interrupt |
@@ -439,20 +447,16 @@ fail1: | |||
439 | return 1; | 447 | return 1; |
440 | } | 448 | } |
441 | 449 | ||
442 | static void complete_mc_list_del(struct rcu_head *head) | ||
443 | { | ||
444 | struct mem_ctl_info *mci; | ||
445 | |||
446 | mci = container_of(head, struct mem_ctl_info, rcu); | ||
447 | INIT_LIST_HEAD(&mci->link); | ||
448 | } | ||
449 | |||
450 | static void del_mc_from_global_list(struct mem_ctl_info *mci) | 450 | static void del_mc_from_global_list(struct mem_ctl_info *mci) |
451 | { | 451 | { |
452 | atomic_dec(&edac_handlers); | 452 | atomic_dec(&edac_handlers); |
453 | list_del_rcu(&mci->link); | 453 | list_del_rcu(&mci->link); |
454 | call_rcu(&mci->rcu, complete_mc_list_del); | 454 | |
455 | rcu_barrier(); | 455 | /* these are for safe removal of devices from global list while |
456 | * NMI handlers may be traversing list | ||
457 | */ | ||
458 | synchronize_rcu(); | ||
459 | INIT_LIST_HEAD(&mci->link); | ||
456 | } | 460 | } |
457 | 461 | ||
458 | /** | 462 | /** |
@@ -578,14 +582,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) | |||
578 | return NULL; | 582 | return NULL; |
579 | } | 583 | } |
580 | 584 | ||
581 | /* marking MCI offline */ | ||
582 | mci->op_state = OP_OFFLINE; | ||
583 | |||
584 | del_mc_from_global_list(mci); | 585 | del_mc_from_global_list(mci); |
585 | mutex_unlock(&mem_ctls_mutex); | 586 | mutex_unlock(&mem_ctls_mutex); |
586 | 587 | ||
587 | /* flush workq processes and remove sysfs */ | 588 | /* flush workq processes */ |
588 | edac_mc_workq_teardown(mci); | 589 | edac_mc_workq_teardown(mci); |
590 | |||
591 | /* marking MCI offline */ | ||
592 | mci->op_state = OP_OFFLINE; | ||
593 | |||
594 | /* remove from sysfs */ | ||
589 | edac_remove_sysfs_mci_device(mci); | 595 | edac_remove_sysfs_mci_device(mci); |
590 | 596 | ||
591 | edac_printk(KERN_INFO, EDAC_MC, | 597 | edac_printk(KERN_INFO, EDAC_MC, |
@@ -714,7 +720,7 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci, | |||
714 | * Some MC's can remap memory so that it is still available | 720 | * Some MC's can remap memory so that it is still available |
715 | * at a different address when PCI devices map into memory. | 721 | * at a different address when PCI devices map into memory. |
716 | * MC's that can't do this lose the memory where PCI devices | 722 | * MC's that can't do this lose the memory where PCI devices |
717 | * are mapped. This mapping is MC dependant and so we call | 723 | * are mapped. This mapping is MC dependent and so we call |
718 | * back into the MC driver for it to map the MC page to | 724 | * back into the MC driver for it to map the MC page to |
719 | * a physical (CPU) page which can then be mapped to a virtual | 725 | * a physical (CPU) page which can then be mapped to a virtual |
720 | * page - which can then be scrubbed. | 726 | * page - which can then be scrubbed. |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 8aad94d10c0c..29ffa350bfbe 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/edac.h> | ||
14 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
15 | 16 | ||
16 | #include "edac_core.h" | 17 | #include "edac_core.h" |
@@ -435,56 +436,54 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, | |||
435 | return count; | 436 | return count; |
436 | } | 437 | } |
437 | 438 | ||
438 | /* memory scrubbing */ | 439 | /* Memory scrubbing interface: |
440 | * | ||
441 | * A MC driver can limit the scrubbing bandwidth based on the CPU type. | ||
442 | * Therefore, ->set_sdram_scrub_rate should be made to return the actual | ||
443 | * bandwidth that is accepted or 0 when scrubbing is to be disabled. | ||
444 | * | ||
445 | * Negative value still means that an error has occurred while setting | ||
446 | * the scrub rate. | ||
447 | */ | ||
439 | static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, | 448 | static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, |
440 | const char *data, size_t count) | 449 | const char *data, size_t count) |
441 | { | 450 | { |
442 | unsigned long bandwidth = 0; | 451 | unsigned long bandwidth = 0; |
443 | int err; | 452 | int new_bw = 0; |
444 | 453 | ||
445 | if (!mci->set_sdram_scrub_rate) { | 454 | if (!mci->set_sdram_scrub_rate) |
446 | edac_printk(KERN_WARNING, EDAC_MC, | ||
447 | "Memory scrub rate setting not implemented!\n"); | ||
448 | return -EINVAL; | 455 | return -EINVAL; |
449 | } | ||
450 | 456 | ||
451 | if (strict_strtoul(data, 10, &bandwidth) < 0) | 457 | if (strict_strtoul(data, 10, &bandwidth) < 0) |
452 | return -EINVAL; | 458 | return -EINVAL; |
453 | 459 | ||
454 | err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth); | 460 | new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); |
455 | if (err) { | 461 | if (new_bw < 0) { |
456 | edac_printk(KERN_DEBUG, EDAC_MC, | 462 | edac_printk(KERN_WARNING, EDAC_MC, |
457 | "Failed setting scrub rate to %lu\n", bandwidth); | 463 | "Error setting scrub rate to: %lu\n", bandwidth); |
458 | return -EINVAL; | 464 | return -EINVAL; |
459 | } | 465 | } |
460 | else { | 466 | |
461 | edac_printk(KERN_DEBUG, EDAC_MC, | 467 | return count; |
462 | "Scrub rate set to: %lu\n", bandwidth); | ||
463 | return count; | ||
464 | } | ||
465 | } | 468 | } |
466 | 469 | ||
470 | /* | ||
471 | * ->get_sdram_scrub_rate() return value semantics same as above. | ||
472 | */ | ||
467 | static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) | 473 | static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) |
468 | { | 474 | { |
469 | u32 bandwidth = 0; | 475 | int bandwidth = 0; |
470 | int err; | ||
471 | 476 | ||
472 | if (!mci->get_sdram_scrub_rate) { | 477 | if (!mci->get_sdram_scrub_rate) |
473 | edac_printk(KERN_WARNING, EDAC_MC, | ||
474 | "Memory scrub rate reading not implemented\n"); | ||
475 | return -EINVAL; | 478 | return -EINVAL; |
476 | } | ||
477 | 479 | ||
478 | err = mci->get_sdram_scrub_rate(mci, &bandwidth); | 480 | bandwidth = mci->get_sdram_scrub_rate(mci); |
479 | if (err) { | 481 | if (bandwidth < 0) { |
480 | edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); | 482 | edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); |
481 | return err; | 483 | return bandwidth; |
482 | } | ||
483 | else { | ||
484 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
485 | "Read scrub rate: %d\n", bandwidth); | ||
486 | return sprintf(data, "%d\n", bandwidth); | ||
487 | } | 484 | } |
485 | |||
486 | return sprintf(data, "%d\n", bandwidth); | ||
488 | } | 487 | } |
489 | 488 | ||
490 | /* default attribute files for the MCI object */ | 489 | /* default attribute files for the MCI object */ |
@@ -630,9 +629,6 @@ static void edac_mci_control_release(struct kobject *kobj) | |||
630 | 629 | ||
631 | /* decrement the module ref count */ | 630 | /* decrement the module ref count */ |
632 | module_put(mci->owner); | 631 | module_put(mci->owner); |
633 | |||
634 | /* free the mci instance memory here */ | ||
635 | kfree(mci); | ||
636 | } | 632 | } |
637 | 633 | ||
638 | static struct kobj_type ktype_mci = { | 634 | static struct kobj_type ktype_mci = { |
@@ -712,6 +708,8 @@ fail_out: | |||
712 | */ | 708 | */ |
713 | void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci) | 709 | void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci) |
714 | { | 710 | { |
711 | debugf1("%s()\n", __func__); | ||
712 | |||
715 | /* delete the kobj from the mc_kset */ | 713 | /* delete the kobj from the mc_kset */ |
716 | kobject_put(&mci->edac_mci_kobj); | 714 | kobject_put(&mci->edac_mci_kobj); |
717 | } | 715 | } |
@@ -759,8 +757,6 @@ static void edac_inst_grp_release(struct kobject *kobj) | |||
759 | 757 | ||
760 | grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj); | 758 | grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj); |
761 | mci = grp->mci; | 759 | mci = grp->mci; |
762 | |||
763 | kobject_put(&mci->edac_mci_kobj); | ||
764 | } | 760 | } |
765 | 761 | ||
766 | /* Intermediate show/store table */ | 762 | /* Intermediate show/store table */ |
@@ -783,14 +779,15 @@ static struct kobj_type ktype_inst_grp = { | |||
783 | * object tree. | 779 | * object tree. |
784 | */ | 780 | */ |
785 | static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, | 781 | static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, |
786 | struct mcidev_sysfs_attribute *sysfs_attrib, | 782 | const struct mcidev_sysfs_attribute *sysfs_attrib, |
787 | struct kobject *kobj) | 783 | struct kobject *kobj) |
788 | { | 784 | { |
789 | int err; | 785 | int err; |
790 | 786 | ||
791 | debugf1("%s()\n", __func__); | 787 | debugf4("%s()\n", __func__); |
792 | 788 | ||
793 | while (sysfs_attrib) { | 789 | while (sysfs_attrib) { |
790 | debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); | ||
794 | if (sysfs_attrib->grp) { | 791 | if (sysfs_attrib->grp) { |
795 | struct mcidev_sysfs_group_kobj *grp_kobj; | 792 | struct mcidev_sysfs_group_kobj *grp_kobj; |
796 | 793 | ||
@@ -798,10 +795,9 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, | |||
798 | if (!grp_kobj) | 795 | if (!grp_kobj) |
799 | return -ENOMEM; | 796 | return -ENOMEM; |
800 | 797 | ||
801 | list_add_tail(&grp_kobj->list, &mci->grp_kobj_list); | ||
802 | |||
803 | grp_kobj->grp = sysfs_attrib->grp; | 798 | grp_kobj->grp = sysfs_attrib->grp; |
804 | grp_kobj->mci = mci; | 799 | grp_kobj->mci = mci; |
800 | list_add_tail(&grp_kobj->list, &mci->grp_kobj_list); | ||
805 | 801 | ||
806 | debugf0("%s() grp %s, mci %p\n", __func__, | 802 | debugf0("%s() grp %s, mci %p\n", __func__, |
807 | sysfs_attrib->grp->name, mci); | 803 | sysfs_attrib->grp->name, mci); |
@@ -810,26 +806,28 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, | |||
810 | &ktype_inst_grp, | 806 | &ktype_inst_grp, |
811 | &mci->edac_mci_kobj, | 807 | &mci->edac_mci_kobj, |
812 | sysfs_attrib->grp->name); | 808 | sysfs_attrib->grp->name); |
813 | if (err) | 809 | if (err < 0) { |
810 | printk(KERN_ERR "kobject_init_and_add failed: %d\n", err); | ||
814 | return err; | 811 | return err; |
815 | 812 | } | |
816 | err = edac_create_mci_instance_attributes(mci, | 813 | err = edac_create_mci_instance_attributes(mci, |
817 | grp_kobj->grp->mcidev_attr, | 814 | grp_kobj->grp->mcidev_attr, |
818 | &grp_kobj->kobj); | 815 | &grp_kobj->kobj); |
819 | 816 | ||
820 | if (err) | 817 | if (err < 0) |
821 | return err; | 818 | return err; |
822 | } else if (sysfs_attrib->attr.name) { | 819 | } else if (sysfs_attrib->attr.name) { |
823 | debugf0("%s() file %s\n", __func__, | 820 | debugf4("%s() file %s\n", __func__, |
824 | sysfs_attrib->attr.name); | 821 | sysfs_attrib->attr.name); |
825 | 822 | ||
826 | err = sysfs_create_file(kobj, &sysfs_attrib->attr); | 823 | err = sysfs_create_file(kobj, &sysfs_attrib->attr); |
824 | if (err < 0) { | ||
825 | printk(KERN_ERR "sysfs_create_file failed: %d\n", err); | ||
826 | return err; | ||
827 | } | ||
827 | } else | 828 | } else |
828 | break; | 829 | break; |
829 | 830 | ||
830 | if (err) { | ||
831 | return err; | ||
832 | } | ||
833 | sysfs_attrib++; | 831 | sysfs_attrib++; |
834 | } | 832 | } |
835 | 833 | ||
@@ -842,7 +840,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, | |||
842 | * directory of this mci instance. | 840 | * directory of this mci instance. |
843 | */ | 841 | */ |
844 | static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, | 842 | static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, |
845 | struct mcidev_sysfs_attribute *sysfs_attrib, | 843 | const struct mcidev_sysfs_attribute *sysfs_attrib, |
846 | struct kobject *kobj, int count) | 844 | struct kobject *kobj, int count) |
847 | { | 845 | { |
848 | struct mcidev_sysfs_group_kobj *grp_kobj, *tmp; | 846 | struct mcidev_sysfs_group_kobj *grp_kobj, *tmp; |
@@ -851,18 +849,29 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, | |||
851 | 849 | ||
852 | /* | 850 | /* |
853 | * loop if there are attributes and until we hit a NULL entry | 851 | * loop if there are attributes and until we hit a NULL entry |
854 | * Remove first all the atributes | 852 | * Remove first all the attributes |
855 | */ | 853 | */ |
856 | while (sysfs_attrib) { | 854 | while (sysfs_attrib) { |
855 | debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); | ||
857 | if (sysfs_attrib->grp) { | 856 | if (sysfs_attrib->grp) { |
858 | list_for_each_entry(grp_kobj, &mci->grp_kobj_list, | 857 | debugf4("%s() seeking for group %s\n", |
859 | list) | 858 | __func__, sysfs_attrib->grp->name); |
860 | if (grp_kobj->grp == sysfs_attrib->grp) | 859 | list_for_each_entry(grp_kobj, |
860 | &mci->grp_kobj_list, list) { | ||
861 | debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); | ||
862 | if (grp_kobj->grp == sysfs_attrib->grp) { | ||
861 | edac_remove_mci_instance_attributes(mci, | 863 | edac_remove_mci_instance_attributes(mci, |
862 | grp_kobj->grp->mcidev_attr, | 864 | grp_kobj->grp->mcidev_attr, |
863 | &grp_kobj->kobj, count + 1); | 865 | &grp_kobj->kobj, count + 1); |
866 | debugf4("%s() group %s\n", __func__, | ||
867 | sysfs_attrib->grp->name); | ||
868 | kobject_put(&grp_kobj->kobj); | ||
869 | } | ||
870 | } | ||
871 | debugf4("%s() end of seeking for group %s\n", | ||
872 | __func__, sysfs_attrib->grp->name); | ||
864 | } else if (sysfs_attrib->attr.name) { | 873 | } else if (sysfs_attrib->attr.name) { |
865 | debugf0("%s() file %s\n", __func__, | 874 | debugf4("%s() file %s\n", __func__, |
866 | sysfs_attrib->attr.name); | 875 | sysfs_attrib->attr.name); |
867 | sysfs_remove_file(kobj, &sysfs_attrib->attr); | 876 | sysfs_remove_file(kobj, &sysfs_attrib->attr); |
868 | } else | 877 | } else |
@@ -870,15 +879,14 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, | |||
870 | sysfs_attrib++; | 879 | sysfs_attrib++; |
871 | } | 880 | } |
872 | 881 | ||
873 | /* | 882 | /* Remove the group objects */ |
874 | * Now that all attributes got removed, it is save to remove all groups | 883 | if (count) |
875 | */ | 884 | return; |
876 | if (!count) | 885 | list_for_each_entry_safe(grp_kobj, tmp, |
877 | list_for_each_entry_safe(grp_kobj, tmp, &mci->grp_kobj_list, | 886 | &mci->grp_kobj_list, list) { |
878 | list) { | 887 | list_del(&grp_kobj->list); |
879 | debugf0("%s() grp %s\n", __func__, grp_kobj->grp->name); | 888 | kfree(grp_kobj); |
880 | kobject_put(&grp_kobj->kobj); | 889 | } |
881 | } | ||
882 | } | 890 | } |
883 | 891 | ||
884 | 892 | ||
@@ -970,6 +978,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
970 | debugf0("%s()\n", __func__); | 978 | debugf0("%s()\n", __func__); |
971 | 979 | ||
972 | /* remove all csrow kobjects */ | 980 | /* remove all csrow kobjects */ |
981 | debugf4("%s() unregister this mci kobj\n", __func__); | ||
973 | for (i = 0; i < mci->nr_csrows; i++) { | 982 | for (i = 0; i < mci->nr_csrows; i++) { |
974 | if (mci->csrows[i].nr_pages > 0) { | 983 | if (mci->csrows[i].nr_pages > 0) { |
975 | debugf0("%s() unreg csrow-%d\n", __func__, i); | 984 | debugf0("%s() unreg csrow-%d\n", __func__, i); |
@@ -977,20 +986,20 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
977 | } | 986 | } |
978 | } | 987 | } |
979 | 988 | ||
980 | debugf0("%s() remove_link\n", __func__); | 989 | /* remove this mci instance's attribtes */ |
990 | if (mci->mc_driver_sysfs_attributes) { | ||
991 | debugf4("%s() unregister mci private attributes\n", __func__); | ||
992 | edac_remove_mci_instance_attributes(mci, | ||
993 | mci->mc_driver_sysfs_attributes, | ||
994 | &mci->edac_mci_kobj, 0); | ||
995 | } | ||
981 | 996 | ||
982 | /* remove the symlink */ | 997 | /* remove the symlink */ |
998 | debugf4("%s() remove_link\n", __func__); | ||
983 | sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); | 999 | sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); |
984 | 1000 | ||
985 | debugf0("%s() remove_mci_instance\n", __func__); | ||
986 | |||
987 | /* remove this mci instance's attribtes */ | ||
988 | edac_remove_mci_instance_attributes(mci, | ||
989 | mci->mc_driver_sysfs_attributes, | ||
990 | &mci->edac_mci_kobj, 0); | ||
991 | debugf0("%s() unregister this mci kobj\n", __func__); | ||
992 | |||
993 | /* unregister this instance's kobject */ | 1001 | /* unregister this instance's kobject */ |
1002 | debugf4("%s() remove_mci_instance\n", __func__); | ||
994 | kobject_put(&mci->edac_mci_kobj); | 1003 | kobject_put(&mci->edac_mci_kobj); |
995 | } | 1004 | } |
996 | 1005 | ||
@@ -1011,13 +1020,13 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1011 | */ | 1020 | */ |
1012 | int edac_sysfs_setup_mc_kset(void) | 1021 | int edac_sysfs_setup_mc_kset(void) |
1013 | { | 1022 | { |
1014 | int err = 0; | 1023 | int err = -EINVAL; |
1015 | struct sysdev_class *edac_class; | 1024 | struct sysdev_class *edac_class; |
1016 | 1025 | ||
1017 | debugf1("%s()\n", __func__); | 1026 | debugf1("%s()\n", __func__); |
1018 | 1027 | ||
1019 | /* get the /sys/devices/system/edac class reference */ | 1028 | /* get the /sys/devices/system/edac class reference */ |
1020 | edac_class = edac_get_edac_class(); | 1029 | edac_class = edac_get_sysfs_class(); |
1021 | if (edac_class == NULL) { | 1030 | if (edac_class == NULL) { |
1022 | debugf1("%s() no edac_class error=%d\n", __func__, err); | 1031 | debugf1("%s() no edac_class error=%d\n", __func__, err); |
1023 | goto fail_out; | 1032 | goto fail_out; |
@@ -1028,15 +1037,16 @@ int edac_sysfs_setup_mc_kset(void) | |||
1028 | if (!mc_kset) { | 1037 | if (!mc_kset) { |
1029 | err = -ENOMEM; | 1038 | err = -ENOMEM; |
1030 | debugf1("%s() Failed to register '.../edac/mc'\n", __func__); | 1039 | debugf1("%s() Failed to register '.../edac/mc'\n", __func__); |
1031 | goto fail_out; | 1040 | goto fail_kset; |
1032 | } | 1041 | } |
1033 | 1042 | ||
1034 | debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); | 1043 | debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); |
1035 | 1044 | ||
1036 | return 0; | 1045 | return 0; |
1037 | 1046 | ||
1047 | fail_kset: | ||
1048 | edac_put_sysfs_class(); | ||
1038 | 1049 | ||
1039 | /* error unwind stack */ | ||
1040 | fail_out: | 1050 | fail_out: |
1041 | return err; | 1051 | return err; |
1042 | } | 1052 | } |
@@ -1049,5 +1059,6 @@ fail_out: | |||
1049 | void edac_sysfs_teardown_mc_kset(void) | 1059 | void edac_sysfs_teardown_mc_kset(void) |
1050 | { | 1060 | { |
1051 | kset_unregister(mc_kset); | 1061 | kset_unregister(mc_kset); |
1062 | edac_put_sysfs_class(); | ||
1052 | } | 1063 | } |
1053 | 1064 | ||
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c deleted file mode 100644 index 9014df6f605d..000000000000 --- a/drivers/edac/edac_mce_amd.c +++ /dev/null | |||
@@ -1,452 +0,0 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include "edac_mce_amd.h" | ||
3 | |||
4 | static bool report_gart_errors; | ||
5 | static void (*nb_bus_decoder)(int node_id, struct err_regs *regs); | ||
6 | |||
7 | void amd_report_gart_errors(bool v) | ||
8 | { | ||
9 | report_gart_errors = v; | ||
10 | } | ||
11 | EXPORT_SYMBOL_GPL(amd_report_gart_errors); | ||
12 | |||
13 | void amd_register_ecc_decoder(void (*f)(int, struct err_regs *)) | ||
14 | { | ||
15 | nb_bus_decoder = f; | ||
16 | } | ||
17 | EXPORT_SYMBOL_GPL(amd_register_ecc_decoder); | ||
18 | |||
19 | void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *)) | ||
20 | { | ||
21 | if (nb_bus_decoder) { | ||
22 | WARN_ON(nb_bus_decoder != f); | ||
23 | |||
24 | nb_bus_decoder = NULL; | ||
25 | } | ||
26 | } | ||
27 | EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder); | ||
28 | |||
29 | /* | ||
30 | * string representation for the different MCA reported error types, see F3x48 | ||
31 | * or MSR0000_0411. | ||
32 | */ | ||
33 | const char *tt_msgs[] = { /* transaction type */ | ||
34 | "instruction", | ||
35 | "data", | ||
36 | "generic", | ||
37 | "reserved" | ||
38 | }; | ||
39 | EXPORT_SYMBOL_GPL(tt_msgs); | ||
40 | |||
41 | const char *ll_msgs[] = { /* cache level */ | ||
42 | "L0", | ||
43 | "L1", | ||
44 | "L2", | ||
45 | "L3/generic" | ||
46 | }; | ||
47 | EXPORT_SYMBOL_GPL(ll_msgs); | ||
48 | |||
49 | const char *rrrr_msgs[] = { | ||
50 | "generic", | ||
51 | "generic read", | ||
52 | "generic write", | ||
53 | "data read", | ||
54 | "data write", | ||
55 | "inst fetch", | ||
56 | "prefetch", | ||
57 | "evict", | ||
58 | "snoop", | ||
59 | "reserved RRRR= 9", | ||
60 | "reserved RRRR= 10", | ||
61 | "reserved RRRR= 11", | ||
62 | "reserved RRRR= 12", | ||
63 | "reserved RRRR= 13", | ||
64 | "reserved RRRR= 14", | ||
65 | "reserved RRRR= 15" | ||
66 | }; | ||
67 | EXPORT_SYMBOL_GPL(rrrr_msgs); | ||
68 | |||
69 | const char *pp_msgs[] = { /* participating processor */ | ||
70 | "local node originated (SRC)", | ||
71 | "local node responded to request (RES)", | ||
72 | "local node observed as 3rd party (OBS)", | ||
73 | "generic" | ||
74 | }; | ||
75 | EXPORT_SYMBOL_GPL(pp_msgs); | ||
76 | |||
77 | const char *to_msgs[] = { | ||
78 | "no timeout", | ||
79 | "timed out" | ||
80 | }; | ||
81 | EXPORT_SYMBOL_GPL(to_msgs); | ||
82 | |||
83 | const char *ii_msgs[] = { /* memory or i/o */ | ||
84 | "mem access", | ||
85 | "reserved", | ||
86 | "i/o access", | ||
87 | "generic" | ||
88 | }; | ||
89 | EXPORT_SYMBOL_GPL(ii_msgs); | ||
90 | |||
91 | /* | ||
92 | * Map the 4 or 5 (family-specific) bits of Extended Error code to the | ||
93 | * string table. | ||
94 | */ | ||
95 | const char *ext_msgs[] = { | ||
96 | "K8 ECC error", /* 0_0000b */ | ||
97 | "CRC error on link", /* 0_0001b */ | ||
98 | "Sync error packets on link", /* 0_0010b */ | ||
99 | "Master Abort during link operation", /* 0_0011b */ | ||
100 | "Target Abort during link operation", /* 0_0100b */ | ||
101 | "Invalid GART PTE entry during table walk", /* 0_0101b */ | ||
102 | "Unsupported atomic RMW command received", /* 0_0110b */ | ||
103 | "WDT error: NB transaction timeout", /* 0_0111b */ | ||
104 | "ECC/ChipKill ECC error", /* 0_1000b */ | ||
105 | "SVM DEV Error", /* 0_1001b */ | ||
106 | "Link Data error", /* 0_1010b */ | ||
107 | "Link/L3/Probe Filter Protocol error", /* 0_1011b */ | ||
108 | "NB Internal Arrays Parity error", /* 0_1100b */ | ||
109 | "DRAM Address/Control Parity error", /* 0_1101b */ | ||
110 | "Link Transmission error", /* 0_1110b */ | ||
111 | "GART/DEV Table Walk Data error" /* 0_1111b */ | ||
112 | "Res 0x100 error", /* 1_0000b */ | ||
113 | "Res 0x101 error", /* 1_0001b */ | ||
114 | "Res 0x102 error", /* 1_0010b */ | ||
115 | "Res 0x103 error", /* 1_0011b */ | ||
116 | "Res 0x104 error", /* 1_0100b */ | ||
117 | "Res 0x105 error", /* 1_0101b */ | ||
118 | "Res 0x106 error", /* 1_0110b */ | ||
119 | "Res 0x107 error", /* 1_0111b */ | ||
120 | "Res 0x108 error", /* 1_1000b */ | ||
121 | "Res 0x109 error", /* 1_1001b */ | ||
122 | "Res 0x10A error", /* 1_1010b */ | ||
123 | "Res 0x10B error", /* 1_1011b */ | ||
124 | "ECC error in L3 Cache Data", /* 1_1100b */ | ||
125 | "L3 Cache Tag error", /* 1_1101b */ | ||
126 | "L3 Cache LRU Parity error", /* 1_1110b */ | ||
127 | "Probe Filter error" /* 1_1111b */ | ||
128 | }; | ||
129 | EXPORT_SYMBOL_GPL(ext_msgs); | ||
130 | |||
131 | static void amd_decode_dc_mce(u64 mc0_status) | ||
132 | { | ||
133 | u32 ec = mc0_status & 0xffff; | ||
134 | u32 xec = (mc0_status >> 16) & 0xf; | ||
135 | |||
136 | pr_emerg("Data Cache Error"); | ||
137 | |||
138 | if (xec == 1 && TLB_ERROR(ec)) | ||
139 | pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); | ||
140 | else if (xec == 0) { | ||
141 | if (mc0_status & (1ULL << 40)) | ||
142 | pr_cont(" during Data Scrub.\n"); | ||
143 | else if (TLB_ERROR(ec)) | ||
144 | pr_cont(": %s TLB parity error.\n", LL_MSG(ec)); | ||
145 | else if (MEM_ERROR(ec)) { | ||
146 | u8 ll = ec & 0x3; | ||
147 | u8 tt = (ec >> 2) & 0x3; | ||
148 | u8 rrrr = (ec >> 4) & 0xf; | ||
149 | |||
150 | /* see F10h BKDG (31116), Table 92. */ | ||
151 | if (ll == 0x1) { | ||
152 | if (tt != 0x1) | ||
153 | goto wrong_dc_mce; | ||
154 | |||
155 | pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec)); | ||
156 | |||
157 | } else if (ll == 0x2 && rrrr == 0x3) | ||
158 | pr_cont(" during L1 linefill from L2.\n"); | ||
159 | else | ||
160 | goto wrong_dc_mce; | ||
161 | } else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf) | ||
162 | pr_cont(" during system linefill.\n"); | ||
163 | else | ||
164 | goto wrong_dc_mce; | ||
165 | } else | ||
166 | goto wrong_dc_mce; | ||
167 | |||
168 | return; | ||
169 | |||
170 | wrong_dc_mce: | ||
171 | pr_warning("Corrupted DC MCE info?\n"); | ||
172 | } | ||
173 | |||
174 | static void amd_decode_ic_mce(u64 mc1_status) | ||
175 | { | ||
176 | u32 ec = mc1_status & 0xffff; | ||
177 | u32 xec = (mc1_status >> 16) & 0xf; | ||
178 | |||
179 | pr_emerg("Instruction Cache Error"); | ||
180 | |||
181 | if (xec == 1 && TLB_ERROR(ec)) | ||
182 | pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); | ||
183 | else if (xec == 0) { | ||
184 | if (TLB_ERROR(ec)) | ||
185 | pr_cont(": %s TLB Parity error.\n", LL_MSG(ec)); | ||
186 | else if (BUS_ERROR(ec)) { | ||
187 | if (boot_cpu_data.x86 == 0xf && | ||
188 | (mc1_status & (1ULL << 58))) | ||
189 | pr_cont(" during system linefill.\n"); | ||
190 | else | ||
191 | pr_cont(" during attempted NB data read.\n"); | ||
192 | } else if (MEM_ERROR(ec)) { | ||
193 | u8 ll = ec & 0x3; | ||
194 | u8 rrrr = (ec >> 4) & 0xf; | ||
195 | |||
196 | if (ll == 0x2) | ||
197 | pr_cont(" during a linefill from L2.\n"); | ||
198 | else if (ll == 0x1) { | ||
199 | |||
200 | switch (rrrr) { | ||
201 | case 0x5: | ||
202 | pr_cont(": Parity error during " | ||
203 | "data load.\n"); | ||
204 | break; | ||
205 | |||
206 | case 0x7: | ||
207 | pr_cont(": Copyback Parity/Victim" | ||
208 | " error.\n"); | ||
209 | break; | ||
210 | |||
211 | case 0x8: | ||
212 | pr_cont(": Tag Snoop error.\n"); | ||
213 | break; | ||
214 | |||
215 | default: | ||
216 | goto wrong_ic_mce; | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | } else | ||
221 | goto wrong_ic_mce; | ||
222 | } else | ||
223 | goto wrong_ic_mce; | ||
224 | |||
225 | return; | ||
226 | |||
227 | wrong_ic_mce: | ||
228 | pr_warning("Corrupted IC MCE info?\n"); | ||
229 | } | ||
230 | |||
231 | static void amd_decode_bu_mce(u64 mc2_status) | ||
232 | { | ||
233 | u32 ec = mc2_status & 0xffff; | ||
234 | u32 xec = (mc2_status >> 16) & 0xf; | ||
235 | |||
236 | pr_emerg("Bus Unit Error"); | ||
237 | |||
238 | if (xec == 0x1) | ||
239 | pr_cont(" in the write data buffers.\n"); | ||
240 | else if (xec == 0x3) | ||
241 | pr_cont(" in the victim data buffers.\n"); | ||
242 | else if (xec == 0x2 && MEM_ERROR(ec)) | ||
243 | pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec)); | ||
244 | else if (xec == 0x0) { | ||
245 | if (TLB_ERROR(ec)) | ||
246 | pr_cont(": %s error in a Page Descriptor Cache or " | ||
247 | "Guest TLB.\n", TT_MSG(ec)); | ||
248 | else if (BUS_ERROR(ec)) | ||
249 | pr_cont(": %s/ECC error in data read from NB: %s.\n", | ||
250 | RRRR_MSG(ec), PP_MSG(ec)); | ||
251 | else if (MEM_ERROR(ec)) { | ||
252 | u8 rrrr = (ec >> 4) & 0xf; | ||
253 | |||
254 | if (rrrr >= 0x7) | ||
255 | pr_cont(": %s error during data copyback.\n", | ||
256 | RRRR_MSG(ec)); | ||
257 | else if (rrrr <= 0x1) | ||
258 | pr_cont(": %s parity/ECC error during data " | ||
259 | "access from L2.\n", RRRR_MSG(ec)); | ||
260 | else | ||
261 | goto wrong_bu_mce; | ||
262 | } else | ||
263 | goto wrong_bu_mce; | ||
264 | } else | ||
265 | goto wrong_bu_mce; | ||
266 | |||
267 | return; | ||
268 | |||
269 | wrong_bu_mce: | ||
270 | pr_warning("Corrupted BU MCE info?\n"); | ||
271 | } | ||
272 | |||
273 | static void amd_decode_ls_mce(u64 mc3_status) | ||
274 | { | ||
275 | u32 ec = mc3_status & 0xffff; | ||
276 | u32 xec = (mc3_status >> 16) & 0xf; | ||
277 | |||
278 | pr_emerg("Load Store Error"); | ||
279 | |||
280 | if (xec == 0x0) { | ||
281 | u8 rrrr = (ec >> 4) & 0xf; | ||
282 | |||
283 | if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4)) | ||
284 | goto wrong_ls_mce; | ||
285 | |||
286 | pr_cont(" during %s.\n", RRRR_MSG(ec)); | ||
287 | } | ||
288 | return; | ||
289 | |||
290 | wrong_ls_mce: | ||
291 | pr_warning("Corrupted LS MCE info?\n"); | ||
292 | } | ||
293 | |||
294 | void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) | ||
295 | { | ||
296 | u32 ec = ERROR_CODE(regs->nbsl); | ||
297 | |||
298 | if (!handle_errors) | ||
299 | return; | ||
300 | |||
301 | /* | ||
302 | * GART TLB error reporting is disabled by default. Bail out early. | ||
303 | */ | ||
304 | if (TLB_ERROR(ec) && !report_gart_errors) | ||
305 | return; | ||
306 | |||
307 | pr_emerg("Northbridge Error, node %d", node_id); | ||
308 | |||
309 | /* | ||
310 | * F10h, revD can disable ErrCpu[3:0] so check that first and also the | ||
311 | * value encoding has changed so interpret those differently | ||
312 | */ | ||
313 | if ((boot_cpu_data.x86 == 0x10) && | ||
314 | (boot_cpu_data.x86_model > 7)) { | ||
315 | if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) | ||
316 | pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); | ||
317 | } else { | ||
318 | u8 assoc_cpus = regs->nbsh & 0xf; | ||
319 | |||
320 | if (assoc_cpus > 0) | ||
321 | pr_cont(", core: %d", fls(assoc_cpus) - 1); | ||
322 | |||
323 | pr_cont("\n"); | ||
324 | } | ||
325 | |||
326 | pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl)); | ||
327 | |||
328 | if (BUS_ERROR(ec) && nb_bus_decoder) | ||
329 | nb_bus_decoder(node_id, regs); | ||
330 | } | ||
331 | EXPORT_SYMBOL_GPL(amd_decode_nb_mce); | ||
332 | |||
333 | static void amd_decode_fr_mce(u64 mc5_status) | ||
334 | { | ||
335 | /* we have only one error signature so match all fields at once. */ | ||
336 | if ((mc5_status & 0xffff) == 0x0f0f) | ||
337 | pr_emerg(" FR Error: CPU Watchdog timer expire.\n"); | ||
338 | else | ||
339 | pr_warning("Corrupted FR MCE info?\n"); | ||
340 | } | ||
341 | |||
342 | static inline void amd_decode_err_code(unsigned int ec) | ||
343 | { | ||
344 | if (TLB_ERROR(ec)) { | ||
345 | pr_emerg("Transaction: %s, Cache Level %s\n", | ||
346 | TT_MSG(ec), LL_MSG(ec)); | ||
347 | } else if (MEM_ERROR(ec)) { | ||
348 | pr_emerg("Transaction: %s, Type: %s, Cache Level: %s", | ||
349 | RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); | ||
350 | } else if (BUS_ERROR(ec)) { | ||
351 | pr_emerg("Transaction type: %s(%s), %s, Cache Level: %s, " | ||
352 | "Participating Processor: %s\n", | ||
353 | RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), | ||
354 | PP_MSG(ec)); | ||
355 | } else | ||
356 | pr_warning("Huh? Unknown MCE error 0x%x\n", ec); | ||
357 | } | ||
358 | |||
359 | static int amd_decode_mce(struct notifier_block *nb, unsigned long val, | ||
360 | void *data) | ||
361 | { | ||
362 | struct mce *m = (struct mce *)data; | ||
363 | struct err_regs regs; | ||
364 | int node, ecc; | ||
365 | |||
366 | pr_emerg("MC%d_STATUS: ", m->bank); | ||
367 | |||
368 | pr_cont("%sorrected error, other errors lost: %s, " | ||
369 | "CPU context corrupt: %s", | ||
370 | ((m->status & MCI_STATUS_UC) ? "Unc" : "C"), | ||
371 | ((m->status & MCI_STATUS_OVER) ? "yes" : "no"), | ||
372 | ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); | ||
373 | |||
374 | /* do the two bits[14:13] together */ | ||
375 | ecc = (m->status >> 45) & 0x3; | ||
376 | if (ecc) | ||
377 | pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U")); | ||
378 | |||
379 | pr_cont("\n"); | ||
380 | |||
381 | switch (m->bank) { | ||
382 | case 0: | ||
383 | amd_decode_dc_mce(m->status); | ||
384 | break; | ||
385 | |||
386 | case 1: | ||
387 | amd_decode_ic_mce(m->status); | ||
388 | break; | ||
389 | |||
390 | case 2: | ||
391 | amd_decode_bu_mce(m->status); | ||
392 | break; | ||
393 | |||
394 | case 3: | ||
395 | amd_decode_ls_mce(m->status); | ||
396 | break; | ||
397 | |||
398 | case 4: | ||
399 | regs.nbsl = (u32) m->status; | ||
400 | regs.nbsh = (u32)(m->status >> 32); | ||
401 | regs.nbeal = (u32) m->addr; | ||
402 | regs.nbeah = (u32)(m->addr >> 32); | ||
403 | node = amd_get_nb_id(m->extcpu); | ||
404 | |||
405 | amd_decode_nb_mce(node, ®s, 1); | ||
406 | break; | ||
407 | |||
408 | case 5: | ||
409 | amd_decode_fr_mce(m->status); | ||
410 | break; | ||
411 | |||
412 | default: | ||
413 | break; | ||
414 | } | ||
415 | |||
416 | amd_decode_err_code(m->status & 0xffff); | ||
417 | |||
418 | return NOTIFY_STOP; | ||
419 | } | ||
420 | |||
421 | static struct notifier_block amd_mce_dec_nb = { | ||
422 | .notifier_call = amd_decode_mce, | ||
423 | }; | ||
424 | |||
425 | static int __init mce_amd_init(void) | ||
426 | { | ||
427 | /* | ||
428 | * We can decode MCEs for K8, F10h and F11h CPUs: | ||
429 | */ | ||
430 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | ||
431 | return 0; | ||
432 | |||
433 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | ||
434 | return 0; | ||
435 | |||
436 | atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | early_initcall(mce_amd_init); | ||
441 | |||
442 | #ifdef MODULE | ||
443 | static void __exit mce_amd_exit(void) | ||
444 | { | ||
445 | atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); | ||
446 | } | ||
447 | |||
448 | MODULE_DESCRIPTION("AMD MCE decoder"); | ||
449 | MODULE_ALIAS("edac-mce-amd"); | ||
450 | MODULE_LICENSE("GPL"); | ||
451 | module_exit(mce_amd_exit); | ||
452 | #endif | ||
diff --git a/drivers/edac/edac_mce_amd.h b/drivers/edac/edac_mce_amd.h deleted file mode 100644 index df23ee065f79..000000000000 --- a/drivers/edac/edac_mce_amd.h +++ /dev/null | |||
@@ -1,69 +0,0 @@ | |||
1 | #ifndef _EDAC_MCE_AMD_H | ||
2 | #define _EDAC_MCE_AMD_H | ||
3 | |||
4 | #include <asm/mce.h> | ||
5 | |||
6 | #define ERROR_CODE(x) ((x) & 0xffff) | ||
7 | #define EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f) | ||
8 | #define EXT_ERR_MSG(x) ext_msgs[EXT_ERROR_CODE(x)] | ||
9 | |||
10 | #define LOW_SYNDROME(x) (((x) >> 15) & 0xff) | ||
11 | #define HIGH_SYNDROME(x) (((x) >> 24) & 0xff) | ||
12 | |||
13 | #define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010) | ||
14 | #define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100) | ||
15 | #define BUS_ERROR(x) (((x) & 0xF800) == 0x0800) | ||
16 | |||
17 | #define TT(x) (((x) >> 2) & 0x3) | ||
18 | #define TT_MSG(x) tt_msgs[TT(x)] | ||
19 | #define II(x) (((x) >> 2) & 0x3) | ||
20 | #define II_MSG(x) ii_msgs[II(x)] | ||
21 | #define LL(x) (((x) >> 0) & 0x3) | ||
22 | #define LL_MSG(x) ll_msgs[LL(x)] | ||
23 | #define RRRR(x) (((x) >> 4) & 0xf) | ||
24 | #define RRRR_MSG(x) rrrr_msgs[RRRR(x)] | ||
25 | #define TO(x) (((x) >> 8) & 0x1) | ||
26 | #define TO_MSG(x) to_msgs[TO(x)] | ||
27 | #define PP(x) (((x) >> 9) & 0x3) | ||
28 | #define PP_MSG(x) pp_msgs[PP(x)] | ||
29 | |||
30 | #define K8_NBSH 0x4C | ||
31 | |||
32 | #define K8_NBSH_VALID_BIT BIT(31) | ||
33 | #define K8_NBSH_OVERFLOW BIT(30) | ||
34 | #define K8_NBSH_UC_ERR BIT(29) | ||
35 | #define K8_NBSH_ERR_EN BIT(28) | ||
36 | #define K8_NBSH_MISCV BIT(27) | ||
37 | #define K8_NBSH_VALID_ERROR_ADDR BIT(26) | ||
38 | #define K8_NBSH_PCC BIT(25) | ||
39 | #define K8_NBSH_ERR_CPU_VAL BIT(24) | ||
40 | #define K8_NBSH_CECC BIT(14) | ||
41 | #define K8_NBSH_UECC BIT(13) | ||
42 | #define K8_NBSH_ERR_SCRUBER BIT(8) | ||
43 | |||
44 | extern const char *tt_msgs[]; | ||
45 | extern const char *ll_msgs[]; | ||
46 | extern const char *rrrr_msgs[]; | ||
47 | extern const char *pp_msgs[]; | ||
48 | extern const char *to_msgs[]; | ||
49 | extern const char *ii_msgs[]; | ||
50 | extern const char *ext_msgs[]; | ||
51 | |||
52 | /* | ||
53 | * relevant NB regs | ||
54 | */ | ||
55 | struct err_regs { | ||
56 | u32 nbcfg; | ||
57 | u32 nbsh; | ||
58 | u32 nbsl; | ||
59 | u32 nbeah; | ||
60 | u32 nbeal; | ||
61 | }; | ||
62 | |||
63 | |||
64 | void amd_report_gart_errors(bool); | ||
65 | void amd_register_ecc_decoder(void (*f)(int, struct err_regs *)); | ||
66 | void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *)); | ||
67 | void amd_decode_nb_mce(int, struct err_regs *, int); | ||
68 | |||
69 | #endif /* _EDAC_MCE_AMD_H */ | ||
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index 7e1374afd967..5ddaa86d6a6e 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include "edac_core.h" | 15 | #include "edac_core.h" |
16 | #include "edac_module.h" | 16 | #include "edac_module.h" |
17 | 17 | ||
18 | #define EDAC_VERSION "Ver: 2.1.0 " __DATE__ | 18 | #define EDAC_VERSION "Ver: 2.1.0" |
19 | 19 | ||
20 | #ifdef CONFIG_EDAC_DEBUG | 20 | #ifdef CONFIG_EDAC_DEBUG |
21 | /* Values of 0 to 4 will generate output */ | 21 | /* Values of 0 to 4 will generate output */ |
@@ -27,15 +27,6 @@ EXPORT_SYMBOL_GPL(edac_debug_level); | |||
27 | struct workqueue_struct *edac_workqueue; | 27 | struct workqueue_struct *edac_workqueue; |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * sysfs object: /sys/devices/system/edac | ||
31 | * need to export to other files in this modules | ||
32 | */ | ||
33 | static struct sysdev_class edac_class = { | ||
34 | .name = "edac", | ||
35 | }; | ||
36 | static int edac_class_valid; | ||
37 | |||
38 | /* | ||
39 | * edac_op_state_to_string() | 30 | * edac_op_state_to_string() |
40 | */ | 31 | */ |
41 | char *edac_op_state_to_string(int opstate) | 32 | char *edac_op_state_to_string(int opstate) |
@@ -55,60 +46,6 @@ char *edac_op_state_to_string(int opstate) | |||
55 | } | 46 | } |
56 | 47 | ||
57 | /* | 48 | /* |
58 | * edac_get_edac_class() | ||
59 | * | ||
60 | * return pointer to the edac class of 'edac' | ||
61 | */ | ||
62 | struct sysdev_class *edac_get_edac_class(void) | ||
63 | { | ||
64 | struct sysdev_class *classptr = NULL; | ||
65 | |||
66 | if (edac_class_valid) | ||
67 | classptr = &edac_class; | ||
68 | |||
69 | return classptr; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * edac_register_sysfs_edac_name() | ||
74 | * | ||
75 | * register the 'edac' into /sys/devices/system | ||
76 | * | ||
77 | * return: | ||
78 | * 0 success | ||
79 | * !0 error | ||
80 | */ | ||
81 | static int edac_register_sysfs_edac_name(void) | ||
82 | { | ||
83 | int err; | ||
84 | |||
85 | /* create the /sys/devices/system/edac directory */ | ||
86 | err = sysdev_class_register(&edac_class); | ||
87 | |||
88 | if (err) { | ||
89 | debugf1("%s() error=%d\n", __func__, err); | ||
90 | return err; | ||
91 | } | ||
92 | |||
93 | edac_class_valid = 1; | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * sysdev_class_unregister() | ||
99 | * | ||
100 | * unregister the 'edac' from /sys/devices/system | ||
101 | */ | ||
102 | static void edac_unregister_sysfs_edac_name(void) | ||
103 | { | ||
104 | /* only if currently registered, then unregister it */ | ||
105 | if (edac_class_valid) | ||
106 | sysdev_class_unregister(&edac_class); | ||
107 | |||
108 | edac_class_valid = 0; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * edac_workqueue_setup | 49 | * edac_workqueue_setup |
113 | * initialize the edac work queue for polling operations | 50 | * initialize the edac work queue for polling operations |
114 | */ | 51 | */ |
@@ -154,21 +91,11 @@ static int __init edac_init(void) | |||
154 | edac_pci_clear_parity_errors(); | 91 | edac_pci_clear_parity_errors(); |
155 | 92 | ||
156 | /* | 93 | /* |
157 | * perform the registration of the /sys/devices/system/edac class object | ||
158 | */ | ||
159 | if (edac_register_sysfs_edac_name()) { | ||
160 | edac_printk(KERN_ERR, EDAC_MC, | ||
161 | "Error initializing 'edac' kobject\n"); | ||
162 | err = -ENODEV; | ||
163 | goto error; | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * now set up the mc_kset under the edac class object | 94 | * now set up the mc_kset under the edac class object |
168 | */ | 95 | */ |
169 | err = edac_sysfs_setup_mc_kset(); | 96 | err = edac_sysfs_setup_mc_kset(); |
170 | if (err) | 97 | if (err) |
171 | goto sysfs_setup_fail; | 98 | goto error; |
172 | 99 | ||
173 | /* Setup/Initialize the workq for this core */ | 100 | /* Setup/Initialize the workq for this core */ |
174 | err = edac_workqueue_setup(); | 101 | err = edac_workqueue_setup(); |
@@ -183,9 +110,6 @@ static int __init edac_init(void) | |||
183 | workq_fail: | 110 | workq_fail: |
184 | edac_sysfs_teardown_mc_kset(); | 111 | edac_sysfs_teardown_mc_kset(); |
185 | 112 | ||
186 | sysfs_setup_fail: | ||
187 | edac_unregister_sysfs_edac_name(); | ||
188 | |||
189 | error: | 113 | error: |
190 | return err; | 114 | return err; |
191 | } | 115 | } |
@@ -201,7 +125,6 @@ static void __exit edac_exit(void) | |||
201 | /* tear down the various subsystems */ | 125 | /* tear down the various subsystems */ |
202 | edac_workqueue_teardown(); | 126 | edac_workqueue_teardown(); |
203 | edac_sysfs_teardown_mc_kset(); | 127 | edac_sysfs_teardown_mc_kset(); |
204 | edac_unregister_sysfs_edac_name(); | ||
205 | } | 128 | } |
206 | 129 | ||
207 | /* | 130 | /* |
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 233d4798c3aa..17aabb7b90ec 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h | |||
@@ -42,7 +42,6 @@ extern void edac_device_unregister_sysfs_main_kobj( | |||
42 | struct edac_device_ctl_info *edac_dev); | 42 | struct edac_device_ctl_info *edac_dev); |
43 | extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev); | 43 | extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev); |
44 | extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev); | 44 | extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev); |
45 | extern struct sysdev_class *edac_get_edac_class(void); | ||
46 | 45 | ||
47 | /* edac core workqueue: single CPU mode */ | 46 | /* edac core workqueue: single CPU mode */ |
48 | extern struct workqueue_struct *edac_workqueue; | 47 | extern struct workqueue_struct *edac_workqueue; |
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index efb5d5650783..2b378207d571 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c | |||
@@ -164,19 +164,6 @@ fail1: | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * complete_edac_pci_list_del | ||
168 | * | ||
169 | * RCU completion callback to indicate item is deleted | ||
170 | */ | ||
171 | static void complete_edac_pci_list_del(struct rcu_head *head) | ||
172 | { | ||
173 | struct edac_pci_ctl_info *pci; | ||
174 | |||
175 | pci = container_of(head, struct edac_pci_ctl_info, rcu); | ||
176 | INIT_LIST_HEAD(&pci->link); | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * del_edac_pci_from_global_list | 167 | * del_edac_pci_from_global_list |
181 | * | 168 | * |
182 | * remove the PCI control struct from the global list | 169 | * remove the PCI control struct from the global list |
@@ -184,8 +171,12 @@ static void complete_edac_pci_list_del(struct rcu_head *head) | |||
184 | static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) | 171 | static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) |
185 | { | 172 | { |
186 | list_del_rcu(&pci->link); | 173 | list_del_rcu(&pci->link); |
187 | call_rcu(&pci->rcu, complete_edac_pci_list_del); | 174 | |
188 | rcu_barrier(); | 175 | /* these are for safe removal of devices from global list while |
176 | * NMI handlers may be traversing list | ||
177 | */ | ||
178 | synchronize_rcu(); | ||
179 | INIT_LIST_HEAD(&pci->link); | ||
189 | } | 180 | } |
190 | 181 | ||
191 | #if 0 | 182 | #if 0 |
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index c39697df9cb4..495198ad059c 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/sysdev.h> | 10 | #include <linux/edac.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
13 | 13 | ||
@@ -352,9 +352,9 @@ static int edac_pci_main_kobj_setup(void) | |||
352 | return 0; | 352 | return 0; |
353 | 353 | ||
354 | /* First time, so create the main kobject and its | 354 | /* First time, so create the main kobject and its |
355 | * controls and atributes | 355 | * controls and attributes |
356 | */ | 356 | */ |
357 | edac_class = edac_get_edac_class(); | 357 | edac_class = edac_get_sysfs_class(); |
358 | if (edac_class == NULL) { | 358 | if (edac_class == NULL) { |
359 | debugf1("%s() no edac_class\n", __func__); | 359 | debugf1("%s() no edac_class\n", __func__); |
360 | err = -ENODEV; | 360 | err = -ENODEV; |
@@ -368,7 +368,7 @@ static int edac_pci_main_kobj_setup(void) | |||
368 | if (!try_module_get(THIS_MODULE)) { | 368 | if (!try_module_get(THIS_MODULE)) { |
369 | debugf1("%s() try_module_get() failed\n", __func__); | 369 | debugf1("%s() try_module_get() failed\n", __func__); |
370 | err = -ENODEV; | 370 | err = -ENODEV; |
371 | goto decrement_count_fail; | 371 | goto mod_get_fail; |
372 | } | 372 | } |
373 | 373 | ||
374 | edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); | 374 | edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); |
@@ -403,6 +403,9 @@ kobject_init_and_add_fail: | |||
403 | kzalloc_fail: | 403 | kzalloc_fail: |
404 | module_put(THIS_MODULE); | 404 | module_put(THIS_MODULE); |
405 | 405 | ||
406 | mod_get_fail: | ||
407 | edac_put_sysfs_class(); | ||
408 | |||
406 | decrement_count_fail: | 409 | decrement_count_fail: |
407 | /* if are on this error exit, nothing to tear down */ | 410 | /* if are on this error exit, nothing to tear down */ |
408 | atomic_dec(&edac_pci_sysfs_refcount); | 411 | atomic_dec(&edac_pci_sysfs_refcount); |
@@ -429,6 +432,7 @@ static void edac_pci_main_kobj_teardown(void) | |||
429 | __func__); | 432 | __func__); |
430 | kobject_put(edac_pci_top_main_kobj); | 433 | kobject_put(edac_pci_top_main_kobj); |
431 | } | 434 | } |
435 | edac_put_sysfs_class(); | ||
432 | } | 436 | } |
433 | 437 | ||
434 | /* | 438 | /* |
@@ -547,7 +551,7 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev) | |||
547 | /* | 551 | /* |
548 | * PCI Parity polling | 552 | * PCI Parity polling |
549 | * | 553 | * |
550 | * Fucntion to retrieve the current parity status | 554 | * Function to retrieve the current parity status |
551 | * and decode it | 555 | * and decode it |
552 | * | 556 | * |
553 | */ | 557 | */ |
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index 20b428aa155e..aab970760b75 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c | |||
@@ -3,10 +3,13 @@ | |||
3 | * | 3 | * |
4 | * Author: Dave Jiang <djiang@mvista.com> | 4 | * Author: Dave Jiang <djiang@mvista.com> |
5 | * | 5 | * |
6 | * 2007 (c) MontaVista Software, Inc. This file is licensed under | 6 | * 2007 (c) MontaVista Software, Inc. |
7 | * the terms of the GNU General Public License version 2. This program | 7 | * 2010 (c) Advanced Micro Devices Inc. |
8 | * is licensed "as is" without any warranty of any kind, whether express | 8 | * Borislav Petkov <borislav.petkov@amd.com> |
9 | * or implied. | 9 | * |
10 | * This file is licensed under the terms of the GNU General Public | ||
11 | * License version 2. This program is licensed "as is" without any | ||
12 | * warranty of any kind, whether express or implied. | ||
10 | * | 13 | * |
11 | */ | 14 | */ |
12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
@@ -23,6 +26,8 @@ EXPORT_SYMBOL_GPL(edac_handlers); | |||
23 | int edac_err_assert = 0; | 26 | int edac_err_assert = 0; |
24 | EXPORT_SYMBOL_GPL(edac_err_assert); | 27 | EXPORT_SYMBOL_GPL(edac_err_assert); |
25 | 28 | ||
29 | static atomic_t edac_class_valid = ATOMIC_INIT(0); | ||
30 | |||
26 | /* | 31 | /* |
27 | * called to determine if there is an EDAC driver interested in | 32 | * called to determine if there is an EDAC driver interested in |
28 | * knowing an event (such as NMI) occurred | 33 | * knowing an event (such as NMI) occurred |
@@ -44,3 +49,41 @@ void edac_atomic_assert_error(void) | |||
44 | edac_err_assert++; | 49 | edac_err_assert++; |
45 | } | 50 | } |
46 | EXPORT_SYMBOL_GPL(edac_atomic_assert_error); | 51 | EXPORT_SYMBOL_GPL(edac_atomic_assert_error); |
52 | |||
53 | /* | ||
54 | * sysfs object: /sys/devices/system/edac | ||
55 | * need to export to other files | ||
56 | */ | ||
57 | struct sysdev_class edac_class = { | ||
58 | .name = "edac", | ||
59 | }; | ||
60 | EXPORT_SYMBOL_GPL(edac_class); | ||
61 | |||
62 | /* return pointer to the 'edac' node in sysfs */ | ||
63 | struct sysdev_class *edac_get_sysfs_class(void) | ||
64 | { | ||
65 | int err = 0; | ||
66 | |||
67 | if (atomic_read(&edac_class_valid)) | ||
68 | goto out; | ||
69 | |||
70 | /* create the /sys/devices/system/edac directory */ | ||
71 | err = sysdev_class_register(&edac_class); | ||
72 | if (err) { | ||
73 | printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n"); | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | out: | ||
78 | atomic_inc(&edac_class_valid); | ||
79 | return &edac_class; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(edac_get_sysfs_class); | ||
82 | |||
83 | void edac_put_sysfs_class(void) | ||
84 | { | ||
85 | /* last user unregisters it */ | ||
86 | if (atomic_dec_and_test(&edac_class_valid)) | ||
87 | sysdev_class_unregister(&edac_class); | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(edac_put_sysfs_class); | ||
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index d41f9002da45..aa08497a075a 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c | |||
@@ -101,6 +101,19 @@ struct i3200_priv { | |||
101 | 101 | ||
102 | static int nr_channels; | 102 | static int nr_channels; |
103 | 103 | ||
104 | #ifndef readq | ||
105 | static inline __u64 readq(const volatile void __iomem *addr) | ||
106 | { | ||
107 | const volatile u32 __iomem *p = addr; | ||
108 | u32 low, high; | ||
109 | |||
110 | low = readl(p); | ||
111 | high = readl(p + 1); | ||
112 | |||
113 | return low + ((u64)high << 32); | ||
114 | } | ||
115 | #endif | ||
116 | |||
104 | static int how_many_channels(struct pci_dev *pdev) | 117 | static int how_many_channels(struct pci_dev *pdev) |
105 | { | 118 | { |
106 | unsigned char capid0_8b; /* 8th byte of CAPID0 */ | 119 | unsigned char capid0_8b; /* 8th byte of CAPID0 */ |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index a5cefab8d65d..4dc3ac25a422 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
@@ -27,7 +27,7 @@ | |||
27 | /* | 27 | /* |
28 | * Alter this version for the I5000 module when modifications are made | 28 | * Alter this version for the I5000 module when modifications are made |
29 | */ | 29 | */ |
30 | #define I5000_REVISION " Ver: 2.0.12 " __DATE__ | 30 | #define I5000_REVISION " Ver: 2.0.12" |
31 | #define EDAC_MOD_STR "i5000_edac" | 31 | #define EDAC_MOD_STR "i5000_edac" |
32 | 32 | ||
33 | #define i5000_printk(level, fmt, arg...) \ | 33 | #define i5000_printk(level, fmt, arg...) \ |
@@ -1372,7 +1372,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) | |||
1372 | * actual number of slots/dimms per channel, we thus utilize the | 1372 | * actual number of slots/dimms per channel, we thus utilize the |
1373 | * resource as specified by the chipset. Thus, we might have | 1373 | * resource as specified by the chipset. Thus, we might have |
1374 | * have more DIMMs per channel than actually on the mobo, but this | 1374 | * have more DIMMs per channel than actually on the mobo, but this |
1375 | * allows the driver to support upto the chipset max, without | 1375 | * allows the driver to support up to the chipset max, without |
1376 | * some fancy mobo determination. | 1376 | * some fancy mobo determination. |
1377 | */ | 1377 | */ |
1378 | i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, | 1378 | i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, |
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index f459a6c0886b..bcbdeeca48b8 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * | 11 | * |
12 | * The intel 5100 has two independent channels. EDAC core currently | 12 | * The intel 5100 has two independent channels. EDAC core currently |
13 | * can not reflect this configuration so instead the chip-select | 13 | * can not reflect this configuration so instead the chip-select |
14 | * rows for each respective channel are layed out one after another, | 14 | * rows for each respective channel are laid out one after another, |
15 | * the first half belonging to channel 0, the second half belonging | 15 | * the first half belonging to channel 0, the second half belonging |
16 | * to channel 1. | 16 | * to channel 1. |
17 | */ | 17 | */ |
@@ -611,20 +611,17 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) | |||
611 | 611 | ||
612 | bandwidth = 5900000 * i5100_mc_scrben(dw); | 612 | bandwidth = 5900000 * i5100_mc_scrben(dw); |
613 | 613 | ||
614 | return 0; | 614 | return bandwidth; |
615 | } | 615 | } |
616 | 616 | ||
617 | static int i5100_get_scrub_rate(struct mem_ctl_info *mci, | 617 | static int i5100_get_scrub_rate(struct mem_ctl_info *mci) |
618 | u32 *bandwidth) | ||
619 | { | 618 | { |
620 | struct i5100_priv *priv = mci->pvt_info; | 619 | struct i5100_priv *priv = mci->pvt_info; |
621 | u32 dw; | 620 | u32 dw; |
622 | 621 | ||
623 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 622 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
624 | 623 | ||
625 | *bandwidth = 5900000 * i5100_mc_scrben(dw); | 624 | return 5900000 * i5100_mc_scrben(dw); |
626 | |||
627 | return 0; | ||
628 | } | 625 | } |
629 | 626 | ||
630 | static struct pci_dev *pci_get_device_func(unsigned vendor, | 627 | static struct pci_dev *pci_get_device_func(unsigned vendor, |
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index 38a9be9e1c7c..74d6ec342afb 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c | |||
@@ -33,7 +33,7 @@ | |||
33 | /* | 33 | /* |
34 | * Alter this version for the I5400 module when modifications are made | 34 | * Alter this version for the I5400 module when modifications are made |
35 | */ | 35 | */ |
36 | #define I5400_REVISION " Ver: 1.0.0 " __DATE__ | 36 | #define I5400_REVISION " Ver: 1.0.0" |
37 | 37 | ||
38 | #define EDAC_MOD_STR "i5400_edac" | 38 | #define EDAC_MOD_STR "i5400_edac" |
39 | 39 | ||
@@ -648,7 +648,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, | |||
648 | return; | 648 | return; |
649 | } | 649 | } |
650 | 650 | ||
651 | /* Miscelaneous errors */ | 651 | /* Miscellaneous errors */ |
652 | errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); | 652 | errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); |
653 | 653 | ||
654 | branch = extract_fbdchan_indx(info->ferr_nf_fbd); | 654 | branch = extract_fbdchan_indx(info->ferr_nf_fbd); |
@@ -1240,7 +1240,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) | |||
1240 | * actual number of slots/dimms per channel, we thus utilize the | 1240 | * actual number of slots/dimms per channel, we thus utilize the |
1241 | * resource as specified by the chipset. Thus, we might have | 1241 | * resource as specified by the chipset. Thus, we might have |
1242 | * have more DIMMs per channel than actually on the mobo, but this | 1242 | * have more DIMMs per channel than actually on the mobo, but this |
1243 | * allows the driver to support upto the chipset max, without | 1243 | * allows the driver to support up to the chipset max, without |
1244 | * some fancy mobo determination. | 1244 | * some fancy mobo determination. |
1245 | */ | 1245 | */ |
1246 | num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL; | 1246 | num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL; |
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c new file mode 100644 index 000000000000..a76fe8366b68 --- /dev/null +++ b/drivers/edac/i7300_edac.c | |||
@@ -0,0 +1,1247 @@ | |||
1 | /* | ||
2 | * Intel 7300 class Memory Controllers kernel module (Clarksboro) | ||
3 | * | ||
4 | * This file may be distributed under the terms of the | ||
5 | * GNU General Public License version 2 only. | ||
6 | * | ||
7 | * Copyright (c) 2010 by: | ||
8 | * Mauro Carvalho Chehab <mchehab@redhat.com> | ||
9 | * | ||
10 | * Red Hat Inc. http://www.redhat.com | ||
11 | * | ||
12 | * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet | ||
13 | * http://www.intel.com/Assets/PDF/datasheet/318082.pdf | ||
14 | * | ||
15 | * TODO: The chipset allow checking for PCI Express errors also. Currently, | ||
16 | * the driver covers only memory error errors | ||
17 | * | ||
18 | * This driver uses "csrows" EDAC attribute to represent DIMM slot# | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/pci_ids.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/edac.h> | ||
27 | #include <linux/mmzone.h> | ||
28 | |||
29 | #include "edac_core.h" | ||
30 | |||
31 | /* | ||
32 | * Alter this version for the I7300 module when modifications are made | ||
33 | */ | ||
34 | #define I7300_REVISION " Ver: 1.0.0" | ||
35 | |||
36 | #define EDAC_MOD_STR "i7300_edac" | ||
37 | |||
38 | #define i7300_printk(level, fmt, arg...) \ | ||
39 | edac_printk(level, "i7300", fmt, ##arg) | ||
40 | |||
41 | #define i7300_mc_printk(mci, level, fmt, arg...) \ | ||
42 | edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg) | ||
43 | |||
44 | /*********************************************** | ||
45 | * i7300 Limit constants Structs and static vars | ||
46 | ***********************************************/ | ||
47 | |||
48 | /* | ||
49 | * Memory topology is organized as: | ||
50 | * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0) | ||
51 | * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0) | ||
52 | * Each channel can have to 8 DIMM sets (called as SLOTS) | ||
53 | * Slots should generally be filled in pairs | ||
54 | * Except on Single Channel mode of operation | ||
55 | * just slot 0/channel0 filled on this mode | ||
56 | * On normal operation mode, the two channels on a branch should be | ||
57 | * filled together for the same SLOT# | ||
58 | * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four | ||
59 | * channels on both branches should be filled | ||
60 | */ | ||
61 | |||
62 | /* Limits for i7300 */ | ||
63 | #define MAX_SLOTS 8 | ||
64 | #define MAX_BRANCHES 2 | ||
65 | #define MAX_CH_PER_BRANCH 2 | ||
66 | #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES) | ||
67 | #define MAX_MIR 3 | ||
68 | |||
69 | #define to_channel(ch, branch) ((((branch)) << 1) | (ch)) | ||
70 | |||
71 | #define to_csrow(slot, ch, branch) \ | ||
72 | (to_channel(ch, branch) | ((slot) << 2)) | ||
73 | |||
74 | /* Device name and register DID (Device ID) */ | ||
75 | struct i7300_dev_info { | ||
76 | const char *ctl_name; /* name for this device */ | ||
77 | u16 fsb_mapping_errors; /* DID for the branchmap,control */ | ||
78 | }; | ||
79 | |||
80 | /* Table of devices attributes supported by this driver */ | ||
81 | static const struct i7300_dev_info i7300_devs[] = { | ||
82 | { | ||
83 | .ctl_name = "I7300", | ||
84 | .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, | ||
85 | }, | ||
86 | }; | ||
87 | |||
88 | struct i7300_dimm_info { | ||
89 | int megabytes; /* size, 0 means not present */ | ||
90 | }; | ||
91 | |||
92 | /* driver private data structure */ | ||
93 | struct i7300_pvt { | ||
94 | struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */ | ||
95 | struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */ | ||
96 | struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */ | ||
97 | struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */ | ||
98 | |||
99 | u16 tolm; /* top of low memory */ | ||
100 | u64 ambase; /* AMB BAR */ | ||
101 | |||
102 | u32 mc_settings; /* Report several settings */ | ||
103 | u32 mc_settings_a; | ||
104 | |||
105 | u16 mir[MAX_MIR]; /* Memory Interleave Reg*/ | ||
106 | |||
107 | u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */ | ||
108 | u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */ | ||
109 | |||
110 | /* DIMM information matrix, allocating architecture maximums */ | ||
111 | struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS]; | ||
112 | |||
113 | /* Temporary buffer for use when preparing error messages */ | ||
114 | char *tmp_prt_buffer; | ||
115 | }; | ||
116 | |||
117 | /* FIXME: Why do we need to have this static? */ | ||
118 | static struct edac_pci_ctl_info *i7300_pci; | ||
119 | |||
120 | /*************************************************** | ||
121 | * i7300 Register definitions for memory enumeration | ||
122 | ***************************************************/ | ||
123 | |||
124 | /* | ||
125 | * Device 16, | ||
126 | * Function 0: System Address (not documented) | ||
127 | * Function 1: Memory Branch Map, Control, Errors Register | ||
128 | */ | ||
129 | |||
130 | /* OFFSETS for Function 0 */ | ||
131 | #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ | ||
132 | #define MAXCH 0x56 /* Max Channel Number */ | ||
133 | #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ | ||
134 | |||
135 | /* OFFSETS for Function 1 */ | ||
136 | #define MC_SETTINGS 0x40 | ||
137 | #define IS_MIRRORED(mc) ((mc) & (1 << 16)) | ||
138 | #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5)) | ||
139 | #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31)) | ||
140 | #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8)) | ||
141 | |||
142 | #define MC_SETTINGS_A 0x58 | ||
143 | #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14)) | ||
144 | |||
145 | #define TOLM 0x6C | ||
146 | |||
147 | #define MIR0 0x80 | ||
148 | #define MIR1 0x84 | ||
149 | #define MIR2 0x88 | ||
150 | |||
151 | /* | ||
152 | * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available | ||
153 | * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it | ||
154 | * seems that we cannot use this information directly for the same usage. | ||
155 | * Each memory slot may have up to 2 AMB interfaces, one for income and another | ||
156 | * for outcome interface to the next slot. | ||
157 | * For now, the driver just stores the AMB present registers, but rely only at | ||
158 | * the MTR info to detect memory. | ||
159 | * Datasheet is also not clear about how to map each AMBPRESENT registers to | ||
160 | * one of the 4 available channels. | ||
161 | */ | ||
162 | #define AMBPRESENT_0 0x64 | ||
163 | #define AMBPRESENT_1 0x66 | ||
164 | |||
165 | static const u16 mtr_regs[MAX_SLOTS] = { | ||
166 | 0x80, 0x84, 0x88, 0x8c, | ||
167 | 0x82, 0x86, 0x8a, 0x8e | ||
168 | }; | ||
169 | |||
170 | /* | ||
171 | * Defines to extract the vaious fields from the | ||
172 | * MTRx - Memory Technology Registers | ||
173 | */ | ||
174 | #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8)) | ||
175 | #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7)) | ||
176 | #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4) | ||
177 | #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4) | ||
178 | #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0) | ||
179 | #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) | ||
180 | #define MTR_DRAM_BANKS_ADDR_BITS 2 | ||
181 | #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) | ||
182 | #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) | ||
183 | #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) | ||
184 | |||
185 | #ifdef CONFIG_EDAC_DEBUG | ||
186 | /* MTR NUMROW */ | ||
187 | static const char *numrow_toString[] = { | ||
188 | "8,192 - 13 rows", | ||
189 | "16,384 - 14 rows", | ||
190 | "32,768 - 15 rows", | ||
191 | "65,536 - 16 rows" | ||
192 | }; | ||
193 | |||
194 | /* MTR NUMCOL */ | ||
195 | static const char *numcol_toString[] = { | ||
196 | "1,024 - 10 columns", | ||
197 | "2,048 - 11 columns", | ||
198 | "4,096 - 12 columns", | ||
199 | "reserved" | ||
200 | }; | ||
201 | #endif | ||
202 | |||
203 | /************************************************ | ||
204 | * i7300 Register definitions for error detection | ||
205 | ************************************************/ | ||
206 | |||
207 | /* | ||
208 | * Device 16.1: FBD Error Registers | ||
209 | */ | ||
210 | #define FERR_FAT_FBD 0x98 | ||
211 | static const char *ferr_fat_fbd_name[] = { | ||
212 | [22] = "Non-Redundant Fast Reset Timeout", | ||
213 | [2] = ">Tmid Thermal event with intelligent throttling disabled", | ||
214 | [1] = "Memory or FBD configuration CRC read error", | ||
215 | [0] = "Memory Write error on non-redundant retry or " | ||
216 | "FBD configuration Write error on retry", | ||
217 | }; | ||
218 | #define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28)) | ||
219 | #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)) | ||
220 | |||
221 | #define FERR_NF_FBD 0xa0 | ||
222 | static const char *ferr_nf_fbd_name[] = { | ||
223 | [24] = "DIMM-Spare Copy Completed", | ||
224 | [23] = "DIMM-Spare Copy Initiated", | ||
225 | [22] = "Redundant Fast Reset Timeout", | ||
226 | [21] = "Memory Write error on redundant retry", | ||
227 | [18] = "SPD protocol Error", | ||
228 | [17] = "FBD Northbound parity error on FBD Sync Status", | ||
229 | [16] = "Correctable Patrol Data ECC", | ||
230 | [15] = "Correctable Resilver- or Spare-Copy Data ECC", | ||
231 | [14] = "Correctable Mirrored Demand Data ECC", | ||
232 | [13] = "Correctable Non-Mirrored Demand Data ECC", | ||
233 | [11] = "Memory or FBD configuration CRC read error", | ||
234 | [10] = "FBD Configuration Write error on first attempt", | ||
235 | [9] = "Memory Write error on first attempt", | ||
236 | [8] = "Non-Aliased Uncorrectable Patrol Data ECC", | ||
237 | [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", | ||
238 | [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC", | ||
239 | [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", | ||
240 | [4] = "Aliased Uncorrectable Patrol Data ECC", | ||
241 | [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", | ||
242 | [2] = "Aliased Uncorrectable Mirrored Demand Data ECC", | ||
243 | [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", | ||
244 | [0] = "Uncorrectable Data ECC on Replay", | ||
245 | }; | ||
246 | #define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28)) | ||
247 | #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ | ||
248 | (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ | ||
249 | (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ | ||
250 | (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ | ||
251 | (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ | ||
252 | (1 << 1) | (1 << 0)) | ||
253 | |||
254 | #define EMASK_FBD 0xa8 | ||
255 | #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\ | ||
256 | (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\ | ||
257 | (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\ | ||
258 | (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\ | ||
259 | (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ | ||
260 | (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ | ||
261 | (1 << 1) | (1 << 0)) | ||
262 | |||
263 | /* | ||
264 | * Device 16.2: Global Error Registers | ||
265 | */ | ||
266 | |||
267 | #define FERR_GLOBAL_HI 0x48 | ||
268 | static const char *ferr_global_hi_name[] = { | ||
269 | [3] = "FSB 3 Fatal Error", | ||
270 | [2] = "FSB 2 Fatal Error", | ||
271 | [1] = "FSB 1 Fatal Error", | ||
272 | [0] = "FSB 0 Fatal Error", | ||
273 | }; | ||
274 | #define ferr_global_hi_is_fatal(errno) 1 | ||
275 | |||
276 | #define FERR_GLOBAL_LO 0x40 | ||
277 | static const char *ferr_global_lo_name[] = { | ||
278 | [31] = "Internal MCH Fatal Error", | ||
279 | [30] = "Intel QuickData Technology Device Fatal Error", | ||
280 | [29] = "FSB1 Fatal Error", | ||
281 | [28] = "FSB0 Fatal Error", | ||
282 | [27] = "FBD Channel 3 Fatal Error", | ||
283 | [26] = "FBD Channel 2 Fatal Error", | ||
284 | [25] = "FBD Channel 1 Fatal Error", | ||
285 | [24] = "FBD Channel 0 Fatal Error", | ||
286 | [23] = "PCI Express Device 7Fatal Error", | ||
287 | [22] = "PCI Express Device 6 Fatal Error", | ||
288 | [21] = "PCI Express Device 5 Fatal Error", | ||
289 | [20] = "PCI Express Device 4 Fatal Error", | ||
290 | [19] = "PCI Express Device 3 Fatal Error", | ||
291 | [18] = "PCI Express Device 2 Fatal Error", | ||
292 | [17] = "PCI Express Device 1 Fatal Error", | ||
293 | [16] = "ESI Fatal Error", | ||
294 | [15] = "Internal MCH Non-Fatal Error", | ||
295 | [14] = "Intel QuickData Technology Device Non Fatal Error", | ||
296 | [13] = "FSB1 Non-Fatal Error", | ||
297 | [12] = "FSB 0 Non-Fatal Error", | ||
298 | [11] = "FBD Channel 3 Non-Fatal Error", | ||
299 | [10] = "FBD Channel 2 Non-Fatal Error", | ||
300 | [9] = "FBD Channel 1 Non-Fatal Error", | ||
301 | [8] = "FBD Channel 0 Non-Fatal Error", | ||
302 | [7] = "PCI Express Device 7 Non-Fatal Error", | ||
303 | [6] = "PCI Express Device 6 Non-Fatal Error", | ||
304 | [5] = "PCI Express Device 5 Non-Fatal Error", | ||
305 | [4] = "PCI Express Device 4 Non-Fatal Error", | ||
306 | [3] = "PCI Express Device 3 Non-Fatal Error", | ||
307 | [2] = "PCI Express Device 2 Non-Fatal Error", | ||
308 | [1] = "PCI Express Device 1 Non-Fatal Error", | ||
309 | [0] = "ESI Non-Fatal Error", | ||
310 | }; | ||
311 | #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1) | ||
312 | |||
313 | #define NRECMEMA 0xbe | ||
314 | #define NRECMEMA_BANK(v) (((v) >> 12) & 7) | ||
315 | #define NRECMEMA_RANK(v) (((v) >> 8) & 15) | ||
316 | |||
317 | #define NRECMEMB 0xc0 | ||
318 | #define NRECMEMB_IS_WR(v) ((v) & (1 << 31)) | ||
319 | #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff) | ||
320 | #define NRECMEMB_RAS(v) ((v) & 0xffff) | ||
321 | |||
322 | #define REDMEMA 0xdc | ||
323 | |||
324 | #define REDMEMB 0x7c | ||
325 | #define IS_SECOND_CH(v) ((v) * (1 << 17)) | ||
326 | |||
327 | #define RECMEMA 0xe0 | ||
328 | #define RECMEMA_BANK(v) (((v) >> 12) & 7) | ||
329 | #define RECMEMA_RANK(v) (((v) >> 8) & 15) | ||
330 | |||
331 | #define RECMEMB 0xe4 | ||
332 | #define RECMEMB_IS_WR(v) ((v) & (1 << 31)) | ||
333 | #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff) | ||
334 | #define RECMEMB_RAS(v) ((v) & 0xffff) | ||
335 | |||
336 | /******************************************** | ||
337 | * i7300 Functions related to error detection | ||
338 | ********************************************/ | ||
339 | |||
340 | /** | ||
341 | * get_err_from_table() - Gets the error message from a table | ||
342 | * @table: table name (array of char *) | ||
343 | * @size: number of elements at the table | ||
344 | * @pos: position of the element to be returned | ||
345 | * | ||
346 | * This is a small routine that gets the pos-th element of a table. If the | ||
347 | * element doesn't exist (or it is empty), it returns "reserved". | ||
348 | * Instead of calling it directly, the better is to call via the macro | ||
349 | * GET_ERR_FROM_TABLE(), that automatically checks the table size via | ||
350 | * ARRAY_SIZE() macro | ||
351 | */ | ||
352 | static const char *get_err_from_table(const char *table[], int size, int pos) | ||
353 | { | ||
354 | if (unlikely(pos >= size)) | ||
355 | return "Reserved"; | ||
356 | |||
357 | if (unlikely(!table[pos])) | ||
358 | return "Reserved"; | ||
359 | |||
360 | return table[pos]; | ||
361 | } | ||
362 | |||
363 | #define GET_ERR_FROM_TABLE(table, pos) \ | ||
364 | get_err_from_table(table, ARRAY_SIZE(table), pos) | ||
365 | |||
366 | /** | ||
367 | * i7300_process_error_global() - Retrieve the hardware error information from | ||
368 | * the hardware global error registers and | ||
369 | * sends it to dmesg | ||
370 | * @mci: struct mem_ctl_info pointer | ||
371 | */ | ||
372 | static void i7300_process_error_global(struct mem_ctl_info *mci) | ||
373 | { | ||
374 | struct i7300_pvt *pvt; | ||
375 | u32 errnum, value; | ||
376 | unsigned long errors; | ||
377 | const char *specific; | ||
378 | bool is_fatal; | ||
379 | |||
380 | pvt = mci->pvt_info; | ||
381 | |||
382 | /* read in the 1st FATAL error register */ | ||
383 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
384 | FERR_GLOBAL_HI, &value); | ||
385 | if (unlikely(value)) { | ||
386 | errors = value; | ||
387 | errnum = find_first_bit(&errors, | ||
388 | ARRAY_SIZE(ferr_global_hi_name)); | ||
389 | specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum); | ||
390 | is_fatal = ferr_global_hi_is_fatal(errnum); | ||
391 | |||
392 | /* Clear the error bit */ | ||
393 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
394 | FERR_GLOBAL_HI, value); | ||
395 | |||
396 | goto error_global; | ||
397 | } | ||
398 | |||
399 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
400 | FERR_GLOBAL_LO, &value); | ||
401 | if (unlikely(value)) { | ||
402 | errors = value; | ||
403 | errnum = find_first_bit(&errors, | ||
404 | ARRAY_SIZE(ferr_global_lo_name)); | ||
405 | specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum); | ||
406 | is_fatal = ferr_global_lo_is_fatal(errnum); | ||
407 | |||
408 | /* Clear the error bit */ | ||
409 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
410 | FERR_GLOBAL_LO, value); | ||
411 | |||
412 | goto error_global; | ||
413 | } | ||
414 | return; | ||
415 | |||
416 | error_global: | ||
417 | i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n", | ||
418 | is_fatal ? "Fatal" : "NOT fatal", specific); | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * i7300_process_fbd_error() - Retrieve the hardware error information from | ||
423 | * the FBD error registers and sends it via | ||
424 | * EDAC error API calls | ||
425 | * @mci: struct mem_ctl_info pointer | ||
426 | */ | ||
427 | static void i7300_process_fbd_error(struct mem_ctl_info *mci) | ||
428 | { | ||
429 | struct i7300_pvt *pvt; | ||
430 | u32 errnum, value; | ||
431 | u16 val16; | ||
432 | unsigned branch, channel, bank, rank, cas, ras; | ||
433 | u32 syndrome; | ||
434 | |||
435 | unsigned long errors; | ||
436 | const char *specific; | ||
437 | bool is_wr; | ||
438 | |||
439 | pvt = mci->pvt_info; | ||
440 | |||
441 | /* read in the 1st FATAL error register */ | ||
442 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
443 | FERR_FAT_FBD, &value); | ||
444 | if (unlikely(value & FERR_FAT_FBD_ERR_MASK)) { | ||
445 | errors = value & FERR_FAT_FBD_ERR_MASK ; | ||
446 | errnum = find_first_bit(&errors, | ||
447 | ARRAY_SIZE(ferr_fat_fbd_name)); | ||
448 | specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum); | ||
449 | |||
450 | branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0; | ||
451 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, | ||
452 | NRECMEMA, &val16); | ||
453 | bank = NRECMEMA_BANK(val16); | ||
454 | rank = NRECMEMA_RANK(val16); | ||
455 | |||
456 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
457 | NRECMEMB, &value); | ||
458 | |||
459 | is_wr = NRECMEMB_IS_WR(value); | ||
460 | cas = NRECMEMB_CAS(value); | ||
461 | ras = NRECMEMB_RAS(value); | ||
462 | |||
463 | snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, | ||
464 | "FATAL (Branch=%d DRAM-Bank=%d %s " | ||
465 | "RAS=%d CAS=%d Err=0x%lx (%s))", | ||
466 | branch, bank, | ||
467 | is_wr ? "RDWR" : "RD", | ||
468 | ras, cas, | ||
469 | errors, specific); | ||
470 | |||
471 | /* Call the helper to output message */ | ||
472 | edac_mc_handle_fbd_ue(mci, rank, branch << 1, | ||
473 | (branch << 1) + 1, | ||
474 | pvt->tmp_prt_buffer); | ||
475 | } | ||
476 | |||
477 | /* read in the 1st NON-FATAL error register */ | ||
478 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
479 | FERR_NF_FBD, &value); | ||
480 | if (unlikely(value & FERR_NF_FBD_ERR_MASK)) { | ||
481 | errors = value & FERR_NF_FBD_ERR_MASK; | ||
482 | errnum = find_first_bit(&errors, | ||
483 | ARRAY_SIZE(ferr_nf_fbd_name)); | ||
484 | specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); | ||
485 | |||
486 | /* Clear the error bit */ | ||
487 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
488 | FERR_GLOBAL_LO, value); | ||
489 | |||
490 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
491 | REDMEMA, &syndrome); | ||
492 | |||
493 | branch = (GET_FBD_FAT_IDX(value) == 2) ? 1 : 0; | ||
494 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, | ||
495 | RECMEMA, &val16); | ||
496 | bank = RECMEMA_BANK(val16); | ||
497 | rank = RECMEMA_RANK(val16); | ||
498 | |||
499 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
500 | RECMEMB, &value); | ||
501 | |||
502 | is_wr = RECMEMB_IS_WR(value); | ||
503 | cas = RECMEMB_CAS(value); | ||
504 | ras = RECMEMB_RAS(value); | ||
505 | |||
506 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
507 | REDMEMB, &value); | ||
508 | |||
509 | channel = (branch << 1); | ||
510 | if (IS_SECOND_CH(value)) | ||
511 | channel++; | ||
512 | |||
513 | /* Form out message */ | ||
514 | snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, | ||
515 | "Corrected error (Branch=%d, Channel %d), " | ||
516 | " DRAM-Bank=%d %s " | ||
517 | "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))", | ||
518 | branch, channel, | ||
519 | bank, | ||
520 | is_wr ? "RDWR" : "RD", | ||
521 | ras, cas, | ||
522 | errors, syndrome, specific); | ||
523 | |||
524 | /* | ||
525 | * Call the helper to output message | ||
526 | * NOTE: Errors are reported per-branch, and not per-channel | ||
527 | * Currently, we don't know how to identify the right | ||
528 | * channel. | ||
529 | */ | ||
530 | edac_mc_handle_fbd_ce(mci, rank, channel, | ||
531 | pvt->tmp_prt_buffer); | ||
532 | } | ||
533 | return; | ||
534 | } | ||
535 | |||
536 | /** | ||
537 | * i7300_check_error() - Calls the error checking subroutines | ||
538 | * @mci: struct mem_ctl_info pointer | ||
539 | */ | ||
540 | static void i7300_check_error(struct mem_ctl_info *mci) | ||
541 | { | ||
542 | i7300_process_error_global(mci); | ||
543 | i7300_process_fbd_error(mci); | ||
544 | }; | ||
545 | |||
546 | /** | ||
547 | * i7300_clear_error() - Clears the error registers | ||
548 | * @mci: struct mem_ctl_info pointer | ||
549 | */ | ||
550 | static void i7300_clear_error(struct mem_ctl_info *mci) | ||
551 | { | ||
552 | struct i7300_pvt *pvt = mci->pvt_info; | ||
553 | u32 value; | ||
554 | /* | ||
555 | * All error values are RWC - we need to read and write 1 to the | ||
556 | * bit that we want to cleanup | ||
557 | */ | ||
558 | |||
559 | /* Clear global error registers */ | ||
560 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
561 | FERR_GLOBAL_HI, &value); | ||
562 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
563 | FERR_GLOBAL_HI, value); | ||
564 | |||
565 | pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
566 | FERR_GLOBAL_LO, &value); | ||
567 | pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, | ||
568 | FERR_GLOBAL_LO, value); | ||
569 | |||
570 | /* Clear FBD error registers */ | ||
571 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
572 | FERR_FAT_FBD, &value); | ||
573 | pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
574 | FERR_FAT_FBD, value); | ||
575 | |||
576 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
577 | FERR_NF_FBD, &value); | ||
578 | pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
579 | FERR_NF_FBD, value); | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * i7300_enable_error_reporting() - Enable the memory reporting logic at the | ||
584 | * hardware | ||
585 | * @mci: struct mem_ctl_info pointer | ||
586 | */ | ||
587 | static void i7300_enable_error_reporting(struct mem_ctl_info *mci) | ||
588 | { | ||
589 | struct i7300_pvt *pvt = mci->pvt_info; | ||
590 | u32 fbd_error_mask; | ||
591 | |||
592 | /* Read the FBD Error Mask Register */ | ||
593 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
594 | EMASK_FBD, &fbd_error_mask); | ||
595 | |||
596 | /* Enable with a '0' */ | ||
597 | fbd_error_mask &= ~(EMASK_FBD_ERR_MASK); | ||
598 | |||
599 | pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
600 | EMASK_FBD, fbd_error_mask); | ||
601 | } | ||
602 | |||
603 | /************************************************ | ||
604 | * i7300 Functions related to memory enumberation | ||
605 | ************************************************/ | ||
606 | |||
607 | /** | ||
608 | * decode_mtr() - Decodes the MTR descriptor, filling the edac structs | ||
609 | * @pvt: pointer to the private data struct used by i7300 driver | ||
610 | * @slot: DIMM slot (0 to 7) | ||
611 | * @ch: Channel number within the branch (0 or 1) | ||
612 | * @branch: Branch number (0 or 1) | ||
613 | * @dinfo: Pointer to DIMM info where dimm size is stored | ||
614 | * @p_csrow: Pointer to the struct csrow_info that corresponds to that element | ||
615 | */ | ||
616 | static int decode_mtr(struct i7300_pvt *pvt, | ||
617 | int slot, int ch, int branch, | ||
618 | struct i7300_dimm_info *dinfo, | ||
619 | struct csrow_info *p_csrow, | ||
620 | u32 *nr_pages) | ||
621 | { | ||
622 | int mtr, ans, addrBits, channel; | ||
623 | |||
624 | channel = to_channel(ch, branch); | ||
625 | |||
626 | mtr = pvt->mtr[slot][branch]; | ||
627 | ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; | ||
628 | |||
629 | debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n", | ||
630 | slot, channel, | ||
631 | ans ? "Present" : "NOT Present"); | ||
632 | |||
633 | /* Determine if there is a DIMM present in this DIMM slot */ | ||
634 | if (!ans) | ||
635 | return 0; | ||
636 | |||
637 | /* Start with the number of bits for a Bank | ||
638 | * on the DRAM */ | ||
639 | addrBits = MTR_DRAM_BANKS_ADDR_BITS; | ||
640 | /* Add thenumber of ROW bits */ | ||
641 | addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); | ||
642 | /* add the number of COLUMN bits */ | ||
643 | addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); | ||
644 | /* add the number of RANK bits */ | ||
645 | addrBits += MTR_DIMM_RANKS(mtr); | ||
646 | |||
647 | addrBits += 6; /* add 64 bits per DIMM */ | ||
648 | addrBits -= 20; /* divide by 2^^20 */ | ||
649 | addrBits -= 3; /* 8 bits per bytes */ | ||
650 | |||
651 | dinfo->megabytes = 1 << addrBits; | ||
652 | *nr_pages = dinfo->megabytes << 8; | ||
653 | |||
654 | debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); | ||
655 | |||
656 | debugf2("\t\tELECTRICAL THROTTLING is %s\n", | ||
657 | MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); | ||
658 | |||
659 | debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); | ||
660 | debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single"); | ||
661 | debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); | ||
662 | debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); | ||
663 | debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); | ||
664 | |||
665 | p_csrow->grain = 8; | ||
666 | p_csrow->mtype = MEM_FB_DDR2; | ||
667 | p_csrow->csrow_idx = slot; | ||
668 | p_csrow->page_mask = 0; | ||
669 | |||
670 | /* | ||
671 | * The type of error detection actually depends of the | ||
672 | * mode of operation. When it is just one single memory chip, at | ||
673 | * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code. | ||
674 | * In normal or mirrored mode, it uses Lockstep mode, | ||
675 | * with the possibility of using an extended algorithm for x8 memories | ||
676 | * See datasheet Sections 7.3.6 to 7.3.8 | ||
677 | */ | ||
678 | |||
679 | if (IS_SINGLE_MODE(pvt->mc_settings_a)) { | ||
680 | p_csrow->edac_mode = EDAC_SECDED; | ||
681 | debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); | ||
682 | } else { | ||
683 | debugf2("\t\tECC code is on Lockstep mode\n"); | ||
684 | if (MTR_DRAM_WIDTH(mtr) == 8) | ||
685 | p_csrow->edac_mode = EDAC_S8ECD8ED; | ||
686 | else | ||
687 | p_csrow->edac_mode = EDAC_S4ECD4ED; | ||
688 | } | ||
689 | |||
690 | /* ask what device type on this row */ | ||
691 | if (MTR_DRAM_WIDTH(mtr) == 8) { | ||
692 | debugf2("\t\tScrub algorithm for x8 is on %s mode\n", | ||
693 | IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? | ||
694 | "enhanced" : "normal"); | ||
695 | |||
696 | p_csrow->dtype = DEV_X8; | ||
697 | } else | ||
698 | p_csrow->dtype = DEV_X4; | ||
699 | |||
700 | return mtr; | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * print_dimm_size() - Prints dump of the memory organization | ||
705 | * @pvt: pointer to the private data struct used by i7300 driver | ||
706 | * | ||
707 | * Useful for debug. If debug is disabled, this routine do nothing | ||
708 | */ | ||
709 | static void print_dimm_size(struct i7300_pvt *pvt) | ||
710 | { | ||
711 | #ifdef CONFIG_EDAC_DEBUG | ||
712 | struct i7300_dimm_info *dinfo; | ||
713 | char *p; | ||
714 | int space, n; | ||
715 | int channel, slot; | ||
716 | |||
717 | space = PAGE_SIZE; | ||
718 | p = pvt->tmp_prt_buffer; | ||
719 | |||
720 | n = snprintf(p, space, " "); | ||
721 | p += n; | ||
722 | space -= n; | ||
723 | for (channel = 0; channel < MAX_CHANNELS; channel++) { | ||
724 | n = snprintf(p, space, "channel %d | ", channel); | ||
725 | p += n; | ||
726 | space -= n; | ||
727 | } | ||
728 | debugf2("%s\n", pvt->tmp_prt_buffer); | ||
729 | p = pvt->tmp_prt_buffer; | ||
730 | space = PAGE_SIZE; | ||
731 | n = snprintf(p, space, "-------------------------------" | ||
732 | "------------------------------"); | ||
733 | p += n; | ||
734 | space -= n; | ||
735 | debugf2("%s\n", pvt->tmp_prt_buffer); | ||
736 | p = pvt->tmp_prt_buffer; | ||
737 | space = PAGE_SIZE; | ||
738 | |||
739 | for (slot = 0; slot < MAX_SLOTS; slot++) { | ||
740 | n = snprintf(p, space, "csrow/SLOT %d ", slot); | ||
741 | p += n; | ||
742 | space -= n; | ||
743 | |||
744 | for (channel = 0; channel < MAX_CHANNELS; channel++) { | ||
745 | dinfo = &pvt->dimm_info[slot][channel]; | ||
746 | n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); | ||
747 | p += n; | ||
748 | space -= n; | ||
749 | } | ||
750 | |||
751 | debugf2("%s\n", pvt->tmp_prt_buffer); | ||
752 | p = pvt->tmp_prt_buffer; | ||
753 | space = PAGE_SIZE; | ||
754 | } | ||
755 | |||
756 | n = snprintf(p, space, "-------------------------------" | ||
757 | "------------------------------"); | ||
758 | p += n; | ||
759 | space -= n; | ||
760 | debugf2("%s\n", pvt->tmp_prt_buffer); | ||
761 | p = pvt->tmp_prt_buffer; | ||
762 | space = PAGE_SIZE; | ||
763 | #endif | ||
764 | } | ||
765 | |||
766 | /** | ||
767 | * i7300_init_csrows() - Initialize the 'csrows' table within | ||
768 | * the mci control structure with the | ||
769 | * addressing of memory. | ||
770 | * @mci: struct mem_ctl_info pointer | ||
771 | */ | ||
772 | static int i7300_init_csrows(struct mem_ctl_info *mci) | ||
773 | { | ||
774 | struct i7300_pvt *pvt; | ||
775 | struct i7300_dimm_info *dinfo; | ||
776 | struct csrow_info *p_csrow; | ||
777 | int rc = -ENODEV; | ||
778 | int mtr; | ||
779 | int ch, branch, slot, channel; | ||
780 | u32 last_page = 0, nr_pages; | ||
781 | |||
782 | pvt = mci->pvt_info; | ||
783 | |||
784 | debugf2("Memory Technology Registers:\n"); | ||
785 | |||
786 | /* Get the AMB present registers for the four channels */ | ||
787 | for (branch = 0; branch < MAX_BRANCHES; branch++) { | ||
788 | /* Read and dump branch 0's MTRs */ | ||
789 | channel = to_channel(0, branch); | ||
790 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], | ||
791 | AMBPRESENT_0, | ||
792 | &pvt->ambpresent[channel]); | ||
793 | debugf2("\t\tAMB-present CH%d = 0x%x:\n", | ||
794 | channel, pvt->ambpresent[channel]); | ||
795 | |||
796 | channel = to_channel(1, branch); | ||
797 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], | ||
798 | AMBPRESENT_1, | ||
799 | &pvt->ambpresent[channel]); | ||
800 | debugf2("\t\tAMB-present CH%d = 0x%x:\n", | ||
801 | channel, pvt->ambpresent[channel]); | ||
802 | } | ||
803 | |||
804 | /* Get the set of MTR[0-7] regs by each branch */ | ||
805 | for (slot = 0; slot < MAX_SLOTS; slot++) { | ||
806 | int where = mtr_regs[slot]; | ||
807 | for (branch = 0; branch < MAX_BRANCHES; branch++) { | ||
808 | pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], | ||
809 | where, | ||
810 | &pvt->mtr[slot][branch]); | ||
811 | for (ch = 0; ch < MAX_BRANCHES; ch++) { | ||
812 | int channel = to_channel(ch, branch); | ||
813 | |||
814 | dinfo = &pvt->dimm_info[slot][channel]; | ||
815 | p_csrow = &mci->csrows[slot]; | ||
816 | |||
817 | mtr = decode_mtr(pvt, slot, ch, branch, | ||
818 | dinfo, p_csrow, &nr_pages); | ||
819 | /* if no DIMMS on this row, continue */ | ||
820 | if (!MTR_DIMMS_PRESENT(mtr)) | ||
821 | continue; | ||
822 | |||
823 | /* Update per_csrow memory count */ | ||
824 | p_csrow->nr_pages += nr_pages; | ||
825 | p_csrow->first_page = last_page; | ||
826 | last_page += nr_pages; | ||
827 | p_csrow->last_page = last_page; | ||
828 | |||
829 | rc = 0; | ||
830 | } | ||
831 | } | ||
832 | } | ||
833 | |||
834 | return rc; | ||
835 | } | ||
836 | |||
837 | /** | ||
838 | * decode_mir() - Decodes Memory Interleave Register (MIR) info | ||
839 | * @int mir_no: number of the MIR register to decode | ||
840 | * @mir: array with the MIR data cached on the driver | ||
841 | */ | ||
842 | static void decode_mir(int mir_no, u16 mir[MAX_MIR]) | ||
843 | { | ||
844 | if (mir[mir_no] & 3) | ||
845 | debugf2("MIR%d: limit= 0x%x Branch(es) that participate:" | ||
846 | " %s %s\n", | ||
847 | mir_no, | ||
848 | (mir[mir_no] >> 4) & 0xfff, | ||
849 | (mir[mir_no] & 1) ? "B0" : "", | ||
850 | (mir[mir_no] & 2) ? "B1" : ""); | ||
851 | } | ||
852 | |||
853 | /** | ||
854 | * i7300_get_mc_regs() - Get the contents of the MC enumeration registers | ||
855 | * @mci: struct mem_ctl_info pointer | ||
856 | * | ||
857 | * Data read is cached internally for its usage when needed | ||
858 | */ | ||
859 | static int i7300_get_mc_regs(struct mem_ctl_info *mci) | ||
860 | { | ||
861 | struct i7300_pvt *pvt; | ||
862 | u32 actual_tolm; | ||
863 | int i, rc; | ||
864 | |||
865 | pvt = mci->pvt_info; | ||
866 | |||
867 | pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, | ||
868 | (u32 *) &pvt->ambase); | ||
869 | |||
870 | debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); | ||
871 | |||
872 | /* Get the Branch Map regs */ | ||
873 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); | ||
874 | pvt->tolm >>= 12; | ||
875 | debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, | ||
876 | pvt->tolm); | ||
877 | |||
878 | actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); | ||
879 | debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", | ||
880 | actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); | ||
881 | |||
882 | /* Get memory controller settings */ | ||
883 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, | ||
884 | &pvt->mc_settings); | ||
885 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A, | ||
886 | &pvt->mc_settings_a); | ||
887 | |||
888 | if (IS_SINGLE_MODE(pvt->mc_settings_a)) | ||
889 | debugf0("Memory controller operating on single mode\n"); | ||
890 | else | ||
891 | debugf0("Memory controller operating on %s mode\n", | ||
892 | IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored"); | ||
893 | |||
894 | debugf0("Error detection is %s\n", | ||
895 | IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); | ||
896 | debugf0("Retry is %s\n", | ||
897 | IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); | ||
898 | |||
899 | /* Get Memory Interleave Range registers */ | ||
900 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, | ||
901 | &pvt->mir[0]); | ||
902 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1, | ||
903 | &pvt->mir[1]); | ||
904 | pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2, | ||
905 | &pvt->mir[2]); | ||
906 | |||
907 | /* Decode the MIR regs */ | ||
908 | for (i = 0; i < MAX_MIR; i++) | ||
909 | decode_mir(i, pvt->mir); | ||
910 | |||
911 | rc = i7300_init_csrows(mci); | ||
912 | if (rc < 0) | ||
913 | return rc; | ||
914 | |||
915 | /* Go and determine the size of each DIMM and place in an | ||
916 | * orderly matrix */ | ||
917 | print_dimm_size(pvt); | ||
918 | |||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | /************************************************* | ||
923 | * i7300 Functions related to device probe/release | ||
924 | *************************************************/ | ||
925 | |||
926 | /** | ||
927 | * i7300_put_devices() - Release the PCI devices | ||
928 | * @mci: struct mem_ctl_info pointer | ||
929 | */ | ||
930 | static void i7300_put_devices(struct mem_ctl_info *mci) | ||
931 | { | ||
932 | struct i7300_pvt *pvt; | ||
933 | int branch; | ||
934 | |||
935 | pvt = mci->pvt_info; | ||
936 | |||
937 | /* Decrement usage count for devices */ | ||
938 | for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++) | ||
939 | pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]); | ||
940 | pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs); | ||
941 | pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map); | ||
942 | } | ||
943 | |||
944 | /** | ||
945 | * i7300_get_devices() - Find and perform 'get' operation on the MCH's | ||
946 | * device/functions we want to reference for this driver | ||
947 | * @mci: struct mem_ctl_info pointer | ||
948 | * | ||
949 | * Access and prepare the several devices for usage: | ||
950 | * I7300 devices used by this driver: | ||
951 | * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR | ||
952 | * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 | ||
953 | * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 | ||
954 | */ | ||
955 | static int __devinit i7300_get_devices(struct mem_ctl_info *mci) | ||
956 | { | ||
957 | struct i7300_pvt *pvt; | ||
958 | struct pci_dev *pdev; | ||
959 | |||
960 | pvt = mci->pvt_info; | ||
961 | |||
962 | /* Attempt to 'get' the MCH register we want */ | ||
963 | pdev = NULL; | ||
964 | while (!pvt->pci_dev_16_1_fsb_addr_map || | ||
965 | !pvt->pci_dev_16_2_fsb_err_regs) { | ||
966 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
967 | PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev); | ||
968 | if (!pdev) { | ||
969 | /* End of list, leave */ | ||
970 | i7300_printk(KERN_ERR, | ||
971 | "'system address,Process Bus' " | ||
972 | "device not found:" | ||
973 | "vendor 0x%x device 0x%x ERR funcs " | ||
974 | "(broken BIOS?)\n", | ||
975 | PCI_VENDOR_ID_INTEL, | ||
976 | PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); | ||
977 | goto error; | ||
978 | } | ||
979 | |||
980 | /* Store device 16 funcs 1 and 2 */ | ||
981 | switch (PCI_FUNC(pdev->devfn)) { | ||
982 | case 1: | ||
983 | pvt->pci_dev_16_1_fsb_addr_map = pdev; | ||
984 | break; | ||
985 | case 2: | ||
986 | pvt->pci_dev_16_2_fsb_err_regs = pdev; | ||
987 | break; | ||
988 | } | ||
989 | } | ||
990 | |||
991 | debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", | ||
992 | pci_name(pvt->pci_dev_16_0_fsb_ctlr), | ||
993 | pvt->pci_dev_16_0_fsb_ctlr->vendor, | ||
994 | pvt->pci_dev_16_0_fsb_ctlr->device); | ||
995 | debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", | ||
996 | pci_name(pvt->pci_dev_16_1_fsb_addr_map), | ||
997 | pvt->pci_dev_16_1_fsb_addr_map->vendor, | ||
998 | pvt->pci_dev_16_1_fsb_addr_map->device); | ||
999 | debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", | ||
1000 | pci_name(pvt->pci_dev_16_2_fsb_err_regs), | ||
1001 | pvt->pci_dev_16_2_fsb_err_regs->vendor, | ||
1002 | pvt->pci_dev_16_2_fsb_err_regs->device); | ||
1003 | |||
1004 | pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
1005 | PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, | ||
1006 | NULL); | ||
1007 | if (!pvt->pci_dev_2x_0_fbd_branch[0]) { | ||
1008 | i7300_printk(KERN_ERR, | ||
1009 | "MC: 'BRANCH 0' device not found:" | ||
1010 | "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", | ||
1011 | PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0); | ||
1012 | goto error; | ||
1013 | } | ||
1014 | |||
1015 | pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
1016 | PCI_DEVICE_ID_INTEL_I7300_MCH_FB1, | ||
1017 | NULL); | ||
1018 | if (!pvt->pci_dev_2x_0_fbd_branch[1]) { | ||
1019 | i7300_printk(KERN_ERR, | ||
1020 | "MC: 'BRANCH 1' device not found:" | ||
1021 | "vendor 0x%x device 0x%x Func 0 " | ||
1022 | "(broken BIOS?)\n", | ||
1023 | PCI_VENDOR_ID_INTEL, | ||
1024 | PCI_DEVICE_ID_INTEL_I7300_MCH_FB1); | ||
1025 | goto error; | ||
1026 | } | ||
1027 | |||
1028 | return 0; | ||
1029 | |||
1030 | error: | ||
1031 | i7300_put_devices(mci); | ||
1032 | return -ENODEV; | ||
1033 | } | ||
1034 | |||
1035 | /** | ||
1036 | * i7300_init_one() - Probe for one instance of the device | ||
1037 | * @pdev: struct pci_dev pointer | ||
1038 | * @id: struct pci_device_id pointer - currently unused | ||
1039 | */ | ||
1040 | static int __devinit i7300_init_one(struct pci_dev *pdev, | ||
1041 | const struct pci_device_id *id) | ||
1042 | { | ||
1043 | struct mem_ctl_info *mci; | ||
1044 | struct i7300_pvt *pvt; | ||
1045 | int num_channels; | ||
1046 | int num_dimms_per_channel; | ||
1047 | int num_csrows; | ||
1048 | int rc; | ||
1049 | |||
1050 | /* wake up device */ | ||
1051 | rc = pci_enable_device(pdev); | ||
1052 | if (rc == -EIO) | ||
1053 | return rc; | ||
1054 | |||
1055 | debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", | ||
1056 | __func__, | ||
1057 | pdev->bus->number, | ||
1058 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | ||
1059 | |||
1060 | /* We only are looking for func 0 of the set */ | ||
1061 | if (PCI_FUNC(pdev->devfn) != 0) | ||
1062 | return -ENODEV; | ||
1063 | |||
1064 | /* As we don't have a motherboard identification routine to determine | ||
1065 | * actual number of slots/dimms per channel, we thus utilize the | ||
1066 | * resource as specified by the chipset. Thus, we might have | ||
1067 | * have more DIMMs per channel than actually on the mobo, but this | ||
1068 | * allows the driver to support up to the chipset max, without | ||
1069 | * some fancy mobo determination. | ||
1070 | */ | ||
1071 | num_dimms_per_channel = MAX_SLOTS; | ||
1072 | num_channels = MAX_CHANNELS; | ||
1073 | num_csrows = MAX_SLOTS * MAX_CHANNELS; | ||
1074 | |||
1075 | debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", | ||
1076 | __func__, num_channels, num_dimms_per_channel, num_csrows); | ||
1077 | |||
1078 | /* allocate a new MC control structure */ | ||
1079 | mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); | ||
1080 | |||
1081 | if (mci == NULL) | ||
1082 | return -ENOMEM; | ||
1083 | |||
1084 | debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); | ||
1085 | |||
1086 | mci->dev = &pdev->dev; /* record ptr to the generic device */ | ||
1087 | |||
1088 | pvt = mci->pvt_info; | ||
1089 | pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ | ||
1090 | |||
1091 | pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1092 | if (!pvt->tmp_prt_buffer) { | ||
1093 | edac_mc_free(mci); | ||
1094 | return -ENOMEM; | ||
1095 | } | ||
1096 | |||
1097 | /* 'get' the pci devices we want to reserve for our use */ | ||
1098 | if (i7300_get_devices(mci)) | ||
1099 | goto fail0; | ||
1100 | |||
1101 | mci->mc_idx = 0; | ||
1102 | mci->mtype_cap = MEM_FLAG_FB_DDR2; | ||
1103 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | ||
1104 | mci->edac_cap = EDAC_FLAG_NONE; | ||
1105 | mci->mod_name = "i7300_edac.c"; | ||
1106 | mci->mod_ver = I7300_REVISION; | ||
1107 | mci->ctl_name = i7300_devs[0].ctl_name; | ||
1108 | mci->dev_name = pci_name(pdev); | ||
1109 | mci->ctl_page_to_phys = NULL; | ||
1110 | |||
1111 | /* Set the function pointer to an actual operation function */ | ||
1112 | mci->edac_check = i7300_check_error; | ||
1113 | |||
1114 | /* initialize the MC control structure 'csrows' table | ||
1115 | * with the mapping and control information */ | ||
1116 | if (i7300_get_mc_regs(mci)) { | ||
1117 | debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" | ||
1118 | " because i7300_init_csrows() returned nonzero " | ||
1119 | "value\n"); | ||
1120 | mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ | ||
1121 | } else { | ||
1122 | debugf1("MC: Enable error reporting now\n"); | ||
1123 | i7300_enable_error_reporting(mci); | ||
1124 | } | ||
1125 | |||
1126 | /* add this new MC control structure to EDAC's list of MCs */ | ||
1127 | if (edac_mc_add_mc(mci)) { | ||
1128 | debugf0("MC: " __FILE__ | ||
1129 | ": %s(): failed edac_mc_add_mc()\n", __func__); | ||
1130 | /* FIXME: perhaps some code should go here that disables error | ||
1131 | * reporting if we just enabled it | ||
1132 | */ | ||
1133 | goto fail1; | ||
1134 | } | ||
1135 | |||
1136 | i7300_clear_error(mci); | ||
1137 | |||
1138 | /* allocating generic PCI control info */ | ||
1139 | i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); | ||
1140 | if (!i7300_pci) { | ||
1141 | printk(KERN_WARNING | ||
1142 | "%s(): Unable to create PCI control\n", | ||
1143 | __func__); | ||
1144 | printk(KERN_WARNING | ||
1145 | "%s(): PCI error report via EDAC not setup\n", | ||
1146 | __func__); | ||
1147 | } | ||
1148 | |||
1149 | return 0; | ||
1150 | |||
1151 | /* Error exit unwinding stack */ | ||
1152 | fail1: | ||
1153 | |||
1154 | i7300_put_devices(mci); | ||
1155 | |||
1156 | fail0: | ||
1157 | kfree(pvt->tmp_prt_buffer); | ||
1158 | edac_mc_free(mci); | ||
1159 | return -ENODEV; | ||
1160 | } | ||
1161 | |||
1162 | /** | ||
1163 | * i7300_remove_one() - Remove the driver | ||
1164 | * @pdev: struct pci_dev pointer | ||
1165 | */ | ||
1166 | static void __devexit i7300_remove_one(struct pci_dev *pdev) | ||
1167 | { | ||
1168 | struct mem_ctl_info *mci; | ||
1169 | char *tmp; | ||
1170 | |||
1171 | debugf0(__FILE__ ": %s()\n", __func__); | ||
1172 | |||
1173 | if (i7300_pci) | ||
1174 | edac_pci_release_generic_ctl(i7300_pci); | ||
1175 | |||
1176 | mci = edac_mc_del_mc(&pdev->dev); | ||
1177 | if (!mci) | ||
1178 | return; | ||
1179 | |||
1180 | tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer; | ||
1181 | |||
1182 | /* retrieve references to resources, and free those resources */ | ||
1183 | i7300_put_devices(mci); | ||
1184 | |||
1185 | kfree(tmp); | ||
1186 | edac_mc_free(mci); | ||
1187 | } | ||
1188 | |||
1189 | /* | ||
1190 | * pci_device_id: table for which devices we are looking for | ||
1191 | * | ||
1192 | * Has only 8086:360c PCI ID | ||
1193 | */ | ||
1194 | static const struct pci_device_id i7300_pci_tbl[] __devinitdata = { | ||
1195 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, | ||
1196 | {0,} /* 0 terminated list. */ | ||
1197 | }; | ||
1198 | |||
1199 | MODULE_DEVICE_TABLE(pci, i7300_pci_tbl); | ||
1200 | |||
1201 | /* | ||
1202 | * i7300_driver: pci_driver structure for this module | ||
1203 | */ | ||
1204 | static struct pci_driver i7300_driver = { | ||
1205 | .name = "i7300_edac", | ||
1206 | .probe = i7300_init_one, | ||
1207 | .remove = __devexit_p(i7300_remove_one), | ||
1208 | .id_table = i7300_pci_tbl, | ||
1209 | }; | ||
1210 | |||
1211 | /** | ||
1212 | * i7300_init() - Registers the driver | ||
1213 | */ | ||
1214 | static int __init i7300_init(void) | ||
1215 | { | ||
1216 | int pci_rc; | ||
1217 | |||
1218 | debugf2("MC: " __FILE__ ": %s()\n", __func__); | ||
1219 | |||
1220 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1221 | opstate_init(); | ||
1222 | |||
1223 | pci_rc = pci_register_driver(&i7300_driver); | ||
1224 | |||
1225 | return (pci_rc < 0) ? pci_rc : 0; | ||
1226 | } | ||
1227 | |||
1228 | /** | ||
1229 | * i7300_init() - Unregisters the driver | ||
1230 | */ | ||
1231 | static void __exit i7300_exit(void) | ||
1232 | { | ||
1233 | debugf2("MC: " __FILE__ ": %s()\n", __func__); | ||
1234 | pci_unregister_driver(&i7300_driver); | ||
1235 | } | ||
1236 | |||
1237 | module_init(i7300_init); | ||
1238 | module_exit(i7300_exit); | ||
1239 | |||
1240 | MODULE_LICENSE("GPL"); | ||
1241 | MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); | ||
1242 | MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); | ||
1243 | MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - " | ||
1244 | I7300_REVISION); | ||
1245 | |||
1246 | module_param(edac_op_state, int, 0444); | ||
1247 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 0fd5b85a0f75..04f1e7ce02b1 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Intel i7 core/Nehalem Memory Controller kernel module | 1 | /* Intel i7 core/Nehalem Memory Controller kernel module |
2 | * | 2 | * |
3 | * This driver supports yhe memory controllers found on the Intel | 3 | * This driver supports the memory controllers found on the Intel |
4 | * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx, | 4 | * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx, |
5 | * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield | 5 | * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield |
6 | * and Westmere-EP. | 6 | * and Westmere-EP. |
@@ -39,6 +39,14 @@ | |||
39 | 39 | ||
40 | #include "edac_core.h" | 40 | #include "edac_core.h" |
41 | 41 | ||
42 | /* Static vars */ | ||
43 | static LIST_HEAD(i7core_edac_list); | ||
44 | static DEFINE_MUTEX(i7core_edac_lock); | ||
45 | static int probed; | ||
46 | |||
47 | static int use_pci_fixup; | ||
48 | module_param(use_pci_fixup, int, 0444); | ||
49 | MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); | ||
42 | /* | 50 | /* |
43 | * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core | 51 | * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core |
44 | * registers start at bus 255, and are not reported by BIOS. | 52 | * registers start at bus 255, and are not reported by BIOS. |
@@ -51,7 +59,7 @@ | |||
51 | /* | 59 | /* |
52 | * Alter this version for the module when modifications are made | 60 | * Alter this version for the module when modifications are made |
53 | */ | 61 | */ |
54 | #define I7CORE_REVISION " Ver: 1.0.0 " __DATE__ | 62 | #define I7CORE_REVISION " Ver: 1.0.0" |
55 | #define EDAC_MOD_STR "i7core_edac" | 63 | #define EDAC_MOD_STR "i7core_edac" |
56 | 64 | ||
57 | /* | 65 | /* |
@@ -212,8 +220,8 @@ struct pci_id_descr { | |||
212 | }; | 220 | }; |
213 | 221 | ||
214 | struct pci_id_table { | 222 | struct pci_id_table { |
215 | struct pci_id_descr *descr; | 223 | const struct pci_id_descr *descr; |
216 | int n_devs; | 224 | int n_devs; |
217 | }; | 225 | }; |
218 | 226 | ||
219 | struct i7core_dev { | 227 | struct i7core_dev { |
@@ -235,8 +243,6 @@ struct i7core_pvt { | |||
235 | struct i7core_inject inject; | 243 | struct i7core_inject inject; |
236 | struct i7core_channel channel[NUM_CHANS]; | 244 | struct i7core_channel channel[NUM_CHANS]; |
237 | 245 | ||
238 | int channels; /* Number of active channels */ | ||
239 | |||
240 | int ce_count_available; | 246 | int ce_count_available; |
241 | int csrow_map[NUM_CHANS][MAX_DIMMS]; | 247 | int csrow_map[NUM_CHANS][MAX_DIMMS]; |
242 | 248 | ||
@@ -261,22 +267,22 @@ struct i7core_pvt { | |||
261 | 267 | ||
262 | /* Count indicator to show errors not got */ | 268 | /* Count indicator to show errors not got */ |
263 | unsigned mce_overrun; | 269 | unsigned mce_overrun; |
264 | }; | ||
265 | 270 | ||
266 | /* Static vars */ | 271 | /* Struct to control EDAC polling */ |
267 | static LIST_HEAD(i7core_edac_list); | 272 | struct edac_pci_ctl_info *i7core_pci; |
268 | static DEFINE_MUTEX(i7core_edac_lock); | 273 | }; |
269 | 274 | ||
270 | #define PCI_DESCR(device, function, device_id) \ | 275 | #define PCI_DESCR(device, function, device_id) \ |
271 | .dev = (device), \ | 276 | .dev = (device), \ |
272 | .func = (function), \ | 277 | .func = (function), \ |
273 | .dev_id = (device_id) | 278 | .dev_id = (device_id) |
274 | 279 | ||
275 | struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { | 280 | static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { |
276 | /* Memory controller */ | 281 | /* Memory controller */ |
277 | { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, | 282 | { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, |
278 | { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, | 283 | { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, |
279 | /* Exists only for RDIMM */ | 284 | |
285 | /* Exists only for RDIMM */ | ||
280 | { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, | 286 | { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, |
281 | { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, | 287 | { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, |
282 | 288 | ||
@@ -297,19 +303,9 @@ struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { | |||
297 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, | 303 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, |
298 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, | 304 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, |
299 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, | 305 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, |
300 | |||
301 | /* Generic Non-core registers */ | ||
302 | /* | ||
303 | * This is the PCI device on i7core and on Xeon 35xx (8086:2c41) | ||
304 | * On Xeon 55xx, however, it has a different id (8086:2c40). So, | ||
305 | * the probing code needs to test for the other address in case of | ||
306 | * failure of this one | ||
307 | */ | ||
308 | { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) }, | ||
309 | |||
310 | }; | 306 | }; |
311 | 307 | ||
312 | struct pci_id_descr pci_dev_descr_lynnfield[] = { | 308 | static const struct pci_id_descr pci_dev_descr_lynnfield[] = { |
313 | { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) }, | 309 | { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) }, |
314 | { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) }, | 310 | { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) }, |
315 | { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) }, | 311 | { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) }, |
@@ -323,15 +319,9 @@ struct pci_id_descr pci_dev_descr_lynnfield[] = { | |||
323 | { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, | 319 | { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, |
324 | { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, | 320 | { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, |
325 | { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, | 321 | { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, |
326 | |||
327 | /* | ||
328 | * This is the PCI device has an alternate address on some | ||
329 | * processors like Core i7 860 | ||
330 | */ | ||
331 | { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, | ||
332 | }; | 322 | }; |
333 | 323 | ||
334 | struct pci_id_descr pci_dev_descr_i7core_westmere[] = { | 324 | static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { |
335 | /* Memory controller */ | 325 | /* Memory controller */ |
336 | { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) }, | 326 | { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) }, |
337 | { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) }, | 327 | { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) }, |
@@ -356,17 +346,14 @@ struct pci_id_descr pci_dev_descr_i7core_westmere[] = { | |||
356 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, | 346 | { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, |
357 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, | 347 | { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, |
358 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, | 348 | { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, |
359 | |||
360 | /* Generic Non-core registers */ | ||
361 | { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) }, | ||
362 | |||
363 | }; | 349 | }; |
364 | 350 | ||
365 | #define PCI_ID_TABLE_ENTRY(A) { A, ARRAY_SIZE(A) } | 351 | #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } |
366 | struct pci_id_table pci_dev_table[] = { | 352 | static const struct pci_id_table pci_dev_table[] = { |
367 | PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), | 353 | PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), |
368 | PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), | 354 | PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), |
369 | PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), | 355 | PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), |
356 | {0,} /* 0 terminated list. */ | ||
370 | }; | 357 | }; |
371 | 358 | ||
372 | /* | 359 | /* |
@@ -378,8 +365,6 @@ static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { | |||
378 | {0,} /* 0 terminated list. */ | 365 | {0,} /* 0 terminated list. */ |
379 | }; | 366 | }; |
380 | 367 | ||
381 | static struct edac_pci_ctl_info *i7core_pci; | ||
382 | |||
383 | /**************************************************************************** | 368 | /**************************************************************************** |
384 | Anciliary status routines | 369 | Anciliary status routines |
385 | ****************************************************************************/ | 370 | ****************************************************************************/ |
@@ -442,6 +427,36 @@ static struct i7core_dev *get_i7core_dev(u8 socket) | |||
442 | return NULL; | 427 | return NULL; |
443 | } | 428 | } |
444 | 429 | ||
430 | static struct i7core_dev *alloc_i7core_dev(u8 socket, | ||
431 | const struct pci_id_table *table) | ||
432 | { | ||
433 | struct i7core_dev *i7core_dev; | ||
434 | |||
435 | i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL); | ||
436 | if (!i7core_dev) | ||
437 | return NULL; | ||
438 | |||
439 | i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs, | ||
440 | GFP_KERNEL); | ||
441 | if (!i7core_dev->pdev) { | ||
442 | kfree(i7core_dev); | ||
443 | return NULL; | ||
444 | } | ||
445 | |||
446 | i7core_dev->socket = socket; | ||
447 | i7core_dev->n_devs = table->n_devs; | ||
448 | list_add_tail(&i7core_dev->list, &i7core_edac_list); | ||
449 | |||
450 | return i7core_dev; | ||
451 | } | ||
452 | |||
453 | static void free_i7core_dev(struct i7core_dev *i7core_dev) | ||
454 | { | ||
455 | list_del(&i7core_dev->list); | ||
456 | kfree(i7core_dev->pdev); | ||
457 | kfree(i7core_dev); | ||
458 | } | ||
459 | |||
445 | /**************************************************************************** | 460 | /**************************************************************************** |
446 | Memory check routines | 461 | Memory check routines |
447 | ****************************************************************************/ | 462 | ****************************************************************************/ |
@@ -484,7 +499,7 @@ static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot, | |||
484 | * to add a fake description for csrows. | 499 | * to add a fake description for csrows. |
485 | * So, this driver is attributing one DIMM memory for one csrow. | 500 | * So, this driver is attributing one DIMM memory for one csrow. |
486 | */ | 501 | */ |
487 | static int i7core_get_active_channels(u8 socket, unsigned *channels, | 502 | static int i7core_get_active_channels(const u8 socket, unsigned *channels, |
488 | unsigned *csrows) | 503 | unsigned *csrows) |
489 | { | 504 | { |
490 | struct pci_dev *pdev = NULL; | 505 | struct pci_dev *pdev = NULL; |
@@ -545,12 +560,13 @@ static int i7core_get_active_channels(u8 socket, unsigned *channels, | |||
545 | return 0; | 560 | return 0; |
546 | } | 561 | } |
547 | 562 | ||
548 | static int get_dimm_config(struct mem_ctl_info *mci, int *csrow) | 563 | static int get_dimm_config(const struct mem_ctl_info *mci) |
549 | { | 564 | { |
550 | struct i7core_pvt *pvt = mci->pvt_info; | 565 | struct i7core_pvt *pvt = mci->pvt_info; |
551 | struct csrow_info *csr; | 566 | struct csrow_info *csr; |
552 | struct pci_dev *pdev; | 567 | struct pci_dev *pdev; |
553 | int i, j; | 568 | int i, j; |
569 | int csrow = 0; | ||
554 | unsigned long last_page = 0; | 570 | unsigned long last_page = 0; |
555 | enum edac_type mode; | 571 | enum edac_type mode; |
556 | enum mem_type mtype; | 572 | enum mem_type mtype; |
@@ -664,13 +680,9 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow) | |||
664 | RANKOFFSET(dimm_dod[j]), | 680 | RANKOFFSET(dimm_dod[j]), |
665 | banks, ranks, rows, cols); | 681 | banks, ranks, rows, cols); |
666 | 682 | ||
667 | #if PAGE_SHIFT > 20 | 683 | npages = MiB_TO_PAGES(size); |
668 | npages = size >> (PAGE_SHIFT - 20); | ||
669 | #else | ||
670 | npages = size << (20 - PAGE_SHIFT); | ||
671 | #endif | ||
672 | 684 | ||
673 | csr = &mci->csrows[*csrow]; | 685 | csr = &mci->csrows[csrow]; |
674 | csr->first_page = last_page + 1; | 686 | csr->first_page = last_page + 1; |
675 | last_page += npages; | 687 | last_page += npages; |
676 | csr->last_page = last_page; | 688 | csr->last_page = last_page; |
@@ -678,13 +690,13 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow) | |||
678 | 690 | ||
679 | csr->page_mask = 0; | 691 | csr->page_mask = 0; |
680 | csr->grain = 8; | 692 | csr->grain = 8; |
681 | csr->csrow_idx = *csrow; | 693 | csr->csrow_idx = csrow; |
682 | csr->nr_channels = 1; | 694 | csr->nr_channels = 1; |
683 | 695 | ||
684 | csr->channels[0].chan_idx = i; | 696 | csr->channels[0].chan_idx = i; |
685 | csr->channels[0].ce_count = 0; | 697 | csr->channels[0].ce_count = 0; |
686 | 698 | ||
687 | pvt->csrow_map[i][j] = *csrow; | 699 | pvt->csrow_map[i][j] = csrow; |
688 | 700 | ||
689 | switch (banks) { | 701 | switch (banks) { |
690 | case 4: | 702 | case 4: |
@@ -703,7 +715,7 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow) | |||
703 | csr->edac_mode = mode; | 715 | csr->edac_mode = mode; |
704 | csr->mtype = mtype; | 716 | csr->mtype = mtype; |
705 | 717 | ||
706 | (*csrow)++; | 718 | csrow++; |
707 | } | 719 | } |
708 | 720 | ||
709 | pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); | 721 | pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); |
@@ -736,7 +748,7 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow) | |||
736 | we're disabling error injection on all write calls to the sysfs nodes that | 748 | we're disabling error injection on all write calls to the sysfs nodes that |
737 | controls the error code injection. | 749 | controls the error code injection. |
738 | */ | 750 | */ |
739 | static int disable_inject(struct mem_ctl_info *mci) | 751 | static int disable_inject(const struct mem_ctl_info *mci) |
740 | { | 752 | { |
741 | struct i7core_pvt *pvt = mci->pvt_info; | 753 | struct i7core_pvt *pvt = mci->pvt_info; |
742 | 754 | ||
@@ -921,7 +933,7 @@ DECLARE_ADDR_MATCH(bank, 32); | |||
921 | DECLARE_ADDR_MATCH(page, 0x10000); | 933 | DECLARE_ADDR_MATCH(page, 0x10000); |
922 | DECLARE_ADDR_MATCH(col, 0x4000); | 934 | DECLARE_ADDR_MATCH(col, 0x4000); |
923 | 935 | ||
924 | static int write_and_test(struct pci_dev *dev, int where, u32 val) | 936 | static int write_and_test(struct pci_dev *dev, const int where, const u32 val) |
925 | { | 937 | { |
926 | u32 read; | 938 | u32 read; |
927 | int count; | 939 | int count; |
@@ -1120,35 +1132,34 @@ DECLARE_COUNTER(2); | |||
1120 | * Sysfs struct | 1132 | * Sysfs struct |
1121 | */ | 1133 | */ |
1122 | 1134 | ||
1123 | 1135 | static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { | |
1124 | static struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { | ||
1125 | ATTR_ADDR_MATCH(channel), | 1136 | ATTR_ADDR_MATCH(channel), |
1126 | ATTR_ADDR_MATCH(dimm), | 1137 | ATTR_ADDR_MATCH(dimm), |
1127 | ATTR_ADDR_MATCH(rank), | 1138 | ATTR_ADDR_MATCH(rank), |
1128 | ATTR_ADDR_MATCH(bank), | 1139 | ATTR_ADDR_MATCH(bank), |
1129 | ATTR_ADDR_MATCH(page), | 1140 | ATTR_ADDR_MATCH(page), |
1130 | ATTR_ADDR_MATCH(col), | 1141 | ATTR_ADDR_MATCH(col), |
1131 | { .attr = { .name = NULL } } | 1142 | { } /* End of list */ |
1132 | }; | 1143 | }; |
1133 | 1144 | ||
1134 | static struct mcidev_sysfs_group i7core_inject_addrmatch = { | 1145 | static const struct mcidev_sysfs_group i7core_inject_addrmatch = { |
1135 | .name = "inject_addrmatch", | 1146 | .name = "inject_addrmatch", |
1136 | .mcidev_attr = i7core_addrmatch_attrs, | 1147 | .mcidev_attr = i7core_addrmatch_attrs, |
1137 | }; | 1148 | }; |
1138 | 1149 | ||
1139 | static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { | 1150 | static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { |
1140 | ATTR_COUNTER(0), | 1151 | ATTR_COUNTER(0), |
1141 | ATTR_COUNTER(1), | 1152 | ATTR_COUNTER(1), |
1142 | ATTR_COUNTER(2), | 1153 | ATTR_COUNTER(2), |
1143 | { .attr = { .name = NULL } } | 1154 | { .attr = { .name = NULL } } |
1144 | }; | 1155 | }; |
1145 | 1156 | ||
1146 | static struct mcidev_sysfs_group i7core_udimm_counters = { | 1157 | static const struct mcidev_sysfs_group i7core_udimm_counters = { |
1147 | .name = "all_channel_counts", | 1158 | .name = "all_channel_counts", |
1148 | .mcidev_attr = i7core_udimm_counters_attrs, | 1159 | .mcidev_attr = i7core_udimm_counters_attrs, |
1149 | }; | 1160 | }; |
1150 | 1161 | ||
1151 | static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = { | 1162 | static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { |
1152 | { | 1163 | { |
1153 | .attr = { | 1164 | .attr = { |
1154 | .name = "inject_section", | 1165 | .name = "inject_section", |
@@ -1180,8 +1191,44 @@ static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = { | |||
1180 | .show = i7core_inject_enable_show, | 1191 | .show = i7core_inject_enable_show, |
1181 | .store = i7core_inject_enable_store, | 1192 | .store = i7core_inject_enable_store, |
1182 | }, | 1193 | }, |
1183 | { .attr = { .name = NULL } }, /* Reserved for udimm counters */ | 1194 | { } /* End of list */ |
1184 | { .attr = { .name = NULL } } | 1195 | }; |
1196 | |||
1197 | static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { | ||
1198 | { | ||
1199 | .attr = { | ||
1200 | .name = "inject_section", | ||
1201 | .mode = (S_IRUGO | S_IWUSR) | ||
1202 | }, | ||
1203 | .show = i7core_inject_section_show, | ||
1204 | .store = i7core_inject_section_store, | ||
1205 | }, { | ||
1206 | .attr = { | ||
1207 | .name = "inject_type", | ||
1208 | .mode = (S_IRUGO | S_IWUSR) | ||
1209 | }, | ||
1210 | .show = i7core_inject_type_show, | ||
1211 | .store = i7core_inject_type_store, | ||
1212 | }, { | ||
1213 | .attr = { | ||
1214 | .name = "inject_eccmask", | ||
1215 | .mode = (S_IRUGO | S_IWUSR) | ||
1216 | }, | ||
1217 | .show = i7core_inject_eccmask_show, | ||
1218 | .store = i7core_inject_eccmask_store, | ||
1219 | }, { | ||
1220 | .grp = &i7core_inject_addrmatch, | ||
1221 | }, { | ||
1222 | .attr = { | ||
1223 | .name = "inject_enable", | ||
1224 | .mode = (S_IRUGO | S_IWUSR) | ||
1225 | }, | ||
1226 | .show = i7core_inject_enable_show, | ||
1227 | .store = i7core_inject_enable_store, | ||
1228 | }, { | ||
1229 | .grp = &i7core_udimm_counters, | ||
1230 | }, | ||
1231 | { } /* End of list */ | ||
1185 | }; | 1232 | }; |
1186 | 1233 | ||
1187 | /**************************************************************************** | 1234 | /**************************************************************************** |
@@ -1189,7 +1236,7 @@ static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = { | |||
1189 | ****************************************************************************/ | 1236 | ****************************************************************************/ |
1190 | 1237 | ||
1191 | /* | 1238 | /* |
1192 | * i7core_put_devices 'put' all the devices that we have | 1239 | * i7core_put_all_devices 'put' all the devices that we have |
1193 | * reserved via 'get' | 1240 | * reserved via 'get' |
1194 | */ | 1241 | */ |
1195 | static void i7core_put_devices(struct i7core_dev *i7core_dev) | 1242 | static void i7core_put_devices(struct i7core_dev *i7core_dev) |
@@ -1206,25 +1253,25 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev) | |||
1206 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | 1253 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); |
1207 | pci_dev_put(pdev); | 1254 | pci_dev_put(pdev); |
1208 | } | 1255 | } |
1209 | kfree(i7core_dev->pdev); | ||
1210 | list_del(&i7core_dev->list); | ||
1211 | kfree(i7core_dev); | ||
1212 | } | 1256 | } |
1213 | 1257 | ||
1214 | static void i7core_put_all_devices(void) | 1258 | static void i7core_put_all_devices(void) |
1215 | { | 1259 | { |
1216 | struct i7core_dev *i7core_dev, *tmp; | 1260 | struct i7core_dev *i7core_dev, *tmp; |
1217 | 1261 | ||
1218 | list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) | 1262 | list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) { |
1219 | i7core_put_devices(i7core_dev); | 1263 | i7core_put_devices(i7core_dev); |
1264 | free_i7core_dev(i7core_dev); | ||
1265 | } | ||
1220 | } | 1266 | } |
1221 | 1267 | ||
1222 | static void __init i7core_xeon_pci_fixup(struct pci_id_table *table) | 1268 | static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table) |
1223 | { | 1269 | { |
1224 | struct pci_dev *pdev = NULL; | 1270 | struct pci_dev *pdev = NULL; |
1225 | int i; | 1271 | int i; |
1272 | |||
1226 | /* | 1273 | /* |
1227 | * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses | 1274 | * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses |
1228 | * aren't announced by acpi. So, we need to use a legacy scan probing | 1275 | * aren't announced by acpi. So, we need to use a legacy scan probing |
1229 | * to detect them | 1276 | * to detect them |
1230 | */ | 1277 | */ |
@@ -1257,16 +1304,18 @@ static unsigned i7core_pci_lastbus(void) | |||
1257 | } | 1304 | } |
1258 | 1305 | ||
1259 | /* | 1306 | /* |
1260 | * i7core_get_devices Find and perform 'get' operation on the MCH's | 1307 | * i7core_get_all_devices Find and perform 'get' operation on the MCH's |
1261 | * device/functions we want to reference for this driver | 1308 | * device/functions we want to reference for this driver |
1262 | * | 1309 | * |
1263 | * Need to 'get' device 16 func 1 and func 2 | 1310 | * Need to 'get' device 16 func 1 and func 2 |
1264 | */ | 1311 | */ |
1265 | int i7core_get_onedevice(struct pci_dev **prev, int devno, | 1312 | static int i7core_get_onedevice(struct pci_dev **prev, |
1266 | struct pci_id_descr *dev_descr, unsigned n_devs, | 1313 | const struct pci_id_table *table, |
1267 | unsigned last_bus) | 1314 | const unsigned devno, |
1315 | const unsigned last_bus) | ||
1268 | { | 1316 | { |
1269 | struct i7core_dev *i7core_dev; | 1317 | struct i7core_dev *i7core_dev; |
1318 | const struct pci_id_descr *dev_descr = &table->descr[devno]; | ||
1270 | 1319 | ||
1271 | struct pci_dev *pdev = NULL; | 1320 | struct pci_dev *pdev = NULL; |
1272 | u8 bus = 0; | 1321 | u8 bus = 0; |
@@ -1275,20 +1324,6 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, | |||
1275 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1324 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
1276 | dev_descr->dev_id, *prev); | 1325 | dev_descr->dev_id, *prev); |
1277 | 1326 | ||
1278 | /* | ||
1279 | * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs | ||
1280 | * is at addr 8086:2c40, instead of 8086:2c41. So, we need | ||
1281 | * to probe for the alternate address in case of failure | ||
1282 | */ | ||
1283 | if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) | ||
1284 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
1285 | PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); | ||
1286 | |||
1287 | if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) | ||
1288 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
1289 | PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, | ||
1290 | *prev); | ||
1291 | |||
1292 | if (!pdev) { | 1327 | if (!pdev) { |
1293 | if (*prev) { | 1328 | if (*prev) { |
1294 | *prev = pdev; | 1329 | *prev = pdev; |
@@ -1315,18 +1350,11 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, | |||
1315 | 1350 | ||
1316 | i7core_dev = get_i7core_dev(socket); | 1351 | i7core_dev = get_i7core_dev(socket); |
1317 | if (!i7core_dev) { | 1352 | if (!i7core_dev) { |
1318 | i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL); | 1353 | i7core_dev = alloc_i7core_dev(socket, table); |
1319 | if (!i7core_dev) | 1354 | if (!i7core_dev) { |
1320 | return -ENOMEM; | 1355 | pci_dev_put(pdev); |
1321 | i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * n_devs, | ||
1322 | GFP_KERNEL); | ||
1323 | if (!i7core_dev->pdev) { | ||
1324 | kfree(i7core_dev); | ||
1325 | return -ENOMEM; | 1356 | return -ENOMEM; |
1326 | } | 1357 | } |
1327 | i7core_dev->socket = socket; | ||
1328 | i7core_dev->n_devs = n_devs; | ||
1329 | list_add_tail(&i7core_dev->list, &i7core_edac_list); | ||
1330 | } | 1358 | } |
1331 | 1359 | ||
1332 | if (i7core_dev->pdev[devno]) { | 1360 | if (i7core_dev->pdev[devno]) { |
@@ -1368,27 +1396,31 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, | |||
1368 | dev_descr->func, | 1396 | dev_descr->func, |
1369 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | 1397 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); |
1370 | 1398 | ||
1399 | /* | ||
1400 | * As stated on drivers/pci/search.c, the reference count for | ||
1401 | * @from is always decremented if it is not %NULL. So, as we need | ||
1402 | * to get all devices up to null, we need to do a get for the device | ||
1403 | */ | ||
1404 | pci_dev_get(pdev); | ||
1405 | |||
1371 | *prev = pdev; | 1406 | *prev = pdev; |
1372 | 1407 | ||
1373 | return 0; | 1408 | return 0; |
1374 | } | 1409 | } |
1375 | 1410 | ||
1376 | static int i7core_get_devices(struct pci_id_table *table) | 1411 | static int i7core_get_all_devices(void) |
1377 | { | 1412 | { |
1378 | int i, rc, last_bus; | 1413 | int i, rc, last_bus; |
1379 | struct pci_dev *pdev = NULL; | 1414 | struct pci_dev *pdev = NULL; |
1380 | struct pci_id_descr *dev_descr; | 1415 | const struct pci_id_table *table = pci_dev_table; |
1381 | 1416 | ||
1382 | last_bus = i7core_pci_lastbus(); | 1417 | last_bus = i7core_pci_lastbus(); |
1383 | 1418 | ||
1384 | while (table && table->descr) { | 1419 | while (table && table->descr) { |
1385 | dev_descr = table->descr; | ||
1386 | for (i = 0; i < table->n_devs; i++) { | 1420 | for (i = 0; i < table->n_devs; i++) { |
1387 | pdev = NULL; | 1421 | pdev = NULL; |
1388 | do { | 1422 | do { |
1389 | rc = i7core_get_onedevice(&pdev, i, | 1423 | rc = i7core_get_onedevice(&pdev, table, i, |
1390 | &dev_descr[i], | ||
1391 | table->n_devs, | ||
1392 | last_bus); | 1424 | last_bus); |
1393 | if (rc < 0) { | 1425 | if (rc < 0) { |
1394 | if (i == 0) { | 1426 | if (i == 0) { |
@@ -1404,7 +1436,6 @@ static int i7core_get_devices(struct pci_id_table *table) | |||
1404 | } | 1436 | } |
1405 | 1437 | ||
1406 | return 0; | 1438 | return 0; |
1407 | return 0; | ||
1408 | } | 1439 | } |
1409 | 1440 | ||
1410 | static int mci_bind_devs(struct mem_ctl_info *mci, | 1441 | static int mci_bind_devs(struct mem_ctl_info *mci, |
@@ -1414,10 +1445,6 @@ static int mci_bind_devs(struct mem_ctl_info *mci, | |||
1414 | struct pci_dev *pdev; | 1445 | struct pci_dev *pdev; |
1415 | int i, func, slot; | 1446 | int i, func, slot; |
1416 | 1447 | ||
1417 | /* Associates i7core_dev and mci for future usage */ | ||
1418 | pvt->i7core_dev = i7core_dev; | ||
1419 | i7core_dev->mci = mci; | ||
1420 | |||
1421 | pvt->is_registered = 0; | 1448 | pvt->is_registered = 0; |
1422 | for (i = 0; i < i7core_dev->n_devs; i++) { | 1449 | for (i = 0; i < i7core_dev->n_devs; i++) { |
1423 | pdev = i7core_dev->pdev[i]; | 1450 | pdev = i7core_dev->pdev[i]; |
@@ -1448,15 +1475,6 @@ static int mci_bind_devs(struct mem_ctl_info *mci, | |||
1448 | pvt->is_registered = 1; | 1475 | pvt->is_registered = 1; |
1449 | } | 1476 | } |
1450 | 1477 | ||
1451 | /* | ||
1452 | * Add extra nodes to count errors on udimm | ||
1453 | * For registered memory, this is not needed, since the counters | ||
1454 | * are already displayed at the standard locations | ||
1455 | */ | ||
1456 | if (!pvt->is_registered) | ||
1457 | i7core_sysfs_attrs[ARRAY_SIZE(i7core_sysfs_attrs)-2].grp = | ||
1458 | &i7core_udimm_counters; | ||
1459 | |||
1460 | return 0; | 1478 | return 0; |
1461 | 1479 | ||
1462 | error: | 1480 | error: |
@@ -1470,7 +1488,9 @@ error: | |||
1470 | Error check routines | 1488 | Error check routines |
1471 | ****************************************************************************/ | 1489 | ****************************************************************************/ |
1472 | static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, | 1490 | static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, |
1473 | int chan, int dimm, int add) | 1491 | const int chan, |
1492 | const int dimm, | ||
1493 | const int add) | ||
1474 | { | 1494 | { |
1475 | char *msg; | 1495 | char *msg; |
1476 | struct i7core_pvt *pvt = mci->pvt_info; | 1496 | struct i7core_pvt *pvt = mci->pvt_info; |
@@ -1487,7 +1507,10 @@ static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, | |||
1487 | } | 1507 | } |
1488 | 1508 | ||
1489 | static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, | 1509 | static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, |
1490 | int chan, int new0, int new1, int new2) | 1510 | const int chan, |
1511 | const int new0, | ||
1512 | const int new1, | ||
1513 | const int new2) | ||
1491 | { | 1514 | { |
1492 | struct i7core_pvt *pvt = mci->pvt_info; | 1515 | struct i7core_pvt *pvt = mci->pvt_info; |
1493 | int add0 = 0, add1 = 0, add2 = 0; | 1516 | int add0 = 0, add1 = 0, add2 = 0; |
@@ -1641,7 +1664,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci) | |||
1641 | * fields | 1664 | * fields |
1642 | */ | 1665 | */ |
1643 | static void i7core_mce_output_error(struct mem_ctl_info *mci, | 1666 | static void i7core_mce_output_error(struct mem_ctl_info *mci, |
1644 | struct mce *m) | 1667 | const struct mce *m) |
1645 | { | 1668 | { |
1646 | struct i7core_pvt *pvt = mci->pvt_info; | 1669 | struct i7core_pvt *pvt = mci->pvt_info; |
1647 | char *type, *optype, *err, *msg; | 1670 | char *type, *optype, *err, *msg; |
@@ -1749,7 +1772,7 @@ static void i7core_check_error(struct mem_ctl_info *mci) | |||
1749 | /* | 1772 | /* |
1750 | * MCE first step: Copy all mce errors into a temporary buffer | 1773 | * MCE first step: Copy all mce errors into a temporary buffer |
1751 | * We use a double buffering here, to reduce the risk of | 1774 | * We use a double buffering here, to reduce the risk of |
1752 | * loosing an error. | 1775 | * losing an error. |
1753 | */ | 1776 | */ |
1754 | smp_rmb(); | 1777 | smp_rmb(); |
1755 | count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) | 1778 | count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) |
@@ -1841,32 +1864,89 @@ static int i7core_mce_check_error(void *priv, struct mce *mce) | |||
1841 | if (mce->mcgstatus & 1) | 1864 | if (mce->mcgstatus & 1) |
1842 | i7core_check_error(mci); | 1865 | i7core_check_error(mci); |
1843 | 1866 | ||
1844 | /* Advice mcelog that the error were handled */ | 1867 | /* Advise mcelog that the errors were handled */ |
1845 | return 1; | 1868 | return 1; |
1846 | } | 1869 | } |
1847 | 1870 | ||
1848 | static int i7core_register_mci(struct i7core_dev *i7core_dev, | 1871 | static void i7core_pci_ctl_create(struct i7core_pvt *pvt) |
1849 | int num_channels, int num_csrows) | 1872 | { |
1873 | pvt->i7core_pci = edac_pci_create_generic_ctl( | ||
1874 | &pvt->i7core_dev->pdev[0]->dev, | ||
1875 | EDAC_MOD_STR); | ||
1876 | if (unlikely(!pvt->i7core_pci)) | ||
1877 | pr_warn("Unable to setup PCI error report via EDAC\n"); | ||
1878 | } | ||
1879 | |||
1880 | static void i7core_pci_ctl_release(struct i7core_pvt *pvt) | ||
1881 | { | ||
1882 | if (likely(pvt->i7core_pci)) | ||
1883 | edac_pci_release_generic_ctl(pvt->i7core_pci); | ||
1884 | else | ||
1885 | i7core_printk(KERN_ERR, | ||
1886 | "Couldn't find mem_ctl_info for socket %d\n", | ||
1887 | pvt->i7core_dev->socket); | ||
1888 | pvt->i7core_pci = NULL; | ||
1889 | } | ||
1890 | |||
1891 | static void i7core_unregister_mci(struct i7core_dev *i7core_dev) | ||
1892 | { | ||
1893 | struct mem_ctl_info *mci = i7core_dev->mci; | ||
1894 | struct i7core_pvt *pvt; | ||
1895 | |||
1896 | if (unlikely(!mci || !mci->pvt_info)) { | ||
1897 | debugf0("MC: " __FILE__ ": %s(): dev = %p\n", | ||
1898 | __func__, &i7core_dev->pdev[0]->dev); | ||
1899 | |||
1900 | i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); | ||
1901 | return; | ||
1902 | } | ||
1903 | |||
1904 | pvt = mci->pvt_info; | ||
1905 | |||
1906 | debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", | ||
1907 | __func__, mci, &i7core_dev->pdev[0]->dev); | ||
1908 | |||
1909 | /* Disable MCE NMI handler */ | ||
1910 | edac_mce_unregister(&pvt->edac_mce); | ||
1911 | |||
1912 | /* Disable EDAC polling */ | ||
1913 | i7core_pci_ctl_release(pvt); | ||
1914 | |||
1915 | /* Remove MC sysfs nodes */ | ||
1916 | edac_mc_del_mc(mci->dev); | ||
1917 | |||
1918 | debugf1("%s: free mci struct\n", mci->ctl_name); | ||
1919 | kfree(mci->ctl_name); | ||
1920 | edac_mc_free(mci); | ||
1921 | i7core_dev->mci = NULL; | ||
1922 | } | ||
1923 | |||
1924 | static int i7core_register_mci(struct i7core_dev *i7core_dev) | ||
1850 | { | 1925 | { |
1851 | struct mem_ctl_info *mci; | 1926 | struct mem_ctl_info *mci; |
1852 | struct i7core_pvt *pvt; | 1927 | struct i7core_pvt *pvt; |
1853 | int csrow = 0; | 1928 | int rc, channels, csrows; |
1854 | int rc; | 1929 | |
1930 | /* Check the number of active and not disabled channels */ | ||
1931 | rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows); | ||
1932 | if (unlikely(rc < 0)) | ||
1933 | return rc; | ||
1855 | 1934 | ||
1856 | /* allocate a new MC control structure */ | 1935 | /* allocate a new MC control structure */ |
1857 | mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, | 1936 | mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket); |
1858 | i7core_dev->socket); | ||
1859 | if (unlikely(!mci)) | 1937 | if (unlikely(!mci)) |
1860 | return -ENOMEM; | 1938 | return -ENOMEM; |
1861 | 1939 | ||
1862 | debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); | 1940 | debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", |
1863 | 1941 | __func__, mci, &i7core_dev->pdev[0]->dev); | |
1864 | /* record ptr to the generic device */ | ||
1865 | mci->dev = &i7core_dev->pdev[0]->dev; | ||
1866 | 1942 | ||
1867 | pvt = mci->pvt_info; | 1943 | pvt = mci->pvt_info; |
1868 | memset(pvt, 0, sizeof(*pvt)); | 1944 | memset(pvt, 0, sizeof(*pvt)); |
1869 | 1945 | ||
1946 | /* Associates i7core_dev and mci for future usage */ | ||
1947 | pvt->i7core_dev = i7core_dev; | ||
1948 | i7core_dev->mci = mci; | ||
1949 | |||
1870 | /* | 1950 | /* |
1871 | * FIXME: how to handle RDDR3 at MCI level? It is possible to have | 1951 | * FIXME: how to handle RDDR3 at MCI level? It is possible to have |
1872 | * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different | 1952 | * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different |
@@ -1881,17 +1961,23 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev, | |||
1881 | i7core_dev->socket); | 1961 | i7core_dev->socket); |
1882 | mci->dev_name = pci_name(i7core_dev->pdev[0]); | 1962 | mci->dev_name = pci_name(i7core_dev->pdev[0]); |
1883 | mci->ctl_page_to_phys = NULL; | 1963 | mci->ctl_page_to_phys = NULL; |
1884 | mci->mc_driver_sysfs_attributes = i7core_sysfs_attrs; | ||
1885 | /* Set the function pointer to an actual operation function */ | ||
1886 | mci->edac_check = i7core_check_error; | ||
1887 | 1964 | ||
1888 | /* Store pci devices at mci for faster access */ | 1965 | /* Store pci devices at mci for faster access */ |
1889 | rc = mci_bind_devs(mci, i7core_dev); | 1966 | rc = mci_bind_devs(mci, i7core_dev); |
1890 | if (unlikely(rc < 0)) | 1967 | if (unlikely(rc < 0)) |
1891 | goto fail; | 1968 | goto fail0; |
1969 | |||
1970 | if (pvt->is_registered) | ||
1971 | mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs; | ||
1972 | else | ||
1973 | mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs; | ||
1892 | 1974 | ||
1893 | /* Get dimm basic config */ | 1975 | /* Get dimm basic config */ |
1894 | get_dimm_config(mci, &csrow); | 1976 | get_dimm_config(mci); |
1977 | /* record ptr to the generic device */ | ||
1978 | mci->dev = &i7core_dev->pdev[0]->dev; | ||
1979 | /* Set the function pointer to an actual operation function */ | ||
1980 | mci->edac_check = i7core_check_error; | ||
1895 | 1981 | ||
1896 | /* add this new MC control structure to EDAC's list of MCs */ | 1982 | /* add this new MC control structure to EDAC's list of MCs */ |
1897 | if (unlikely(edac_mc_add_mc(mci))) { | 1983 | if (unlikely(edac_mc_add_mc(mci))) { |
@@ -1902,19 +1988,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev, | |||
1902 | */ | 1988 | */ |
1903 | 1989 | ||
1904 | rc = -EINVAL; | 1990 | rc = -EINVAL; |
1905 | goto fail; | 1991 | goto fail0; |
1906 | } | ||
1907 | |||
1908 | /* allocating generic PCI control info */ | ||
1909 | i7core_pci = edac_pci_create_generic_ctl(&i7core_dev->pdev[0]->dev, | ||
1910 | EDAC_MOD_STR); | ||
1911 | if (unlikely(!i7core_pci)) { | ||
1912 | printk(KERN_WARNING | ||
1913 | "%s(): Unable to create PCI control\n", | ||
1914 | __func__); | ||
1915 | printk(KERN_WARNING | ||
1916 | "%s(): PCI error report via EDAC not setup\n", | ||
1917 | __func__); | ||
1918 | } | 1992 | } |
1919 | 1993 | ||
1920 | /* Default error mask is any memory */ | 1994 | /* Default error mask is any memory */ |
@@ -1925,19 +1999,28 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev, | |||
1925 | pvt->inject.page = -1; | 1999 | pvt->inject.page = -1; |
1926 | pvt->inject.col = -1; | 2000 | pvt->inject.col = -1; |
1927 | 2001 | ||
2002 | /* allocating generic PCI control info */ | ||
2003 | i7core_pci_ctl_create(pvt); | ||
2004 | |||
1928 | /* Registers on edac_mce in order to receive memory errors */ | 2005 | /* Registers on edac_mce in order to receive memory errors */ |
1929 | pvt->edac_mce.priv = mci; | 2006 | pvt->edac_mce.priv = mci; |
1930 | pvt->edac_mce.check_error = i7core_mce_check_error; | 2007 | pvt->edac_mce.check_error = i7core_mce_check_error; |
1931 | |||
1932 | rc = edac_mce_register(&pvt->edac_mce); | 2008 | rc = edac_mce_register(&pvt->edac_mce); |
1933 | if (unlikely(rc < 0)) { | 2009 | if (unlikely(rc < 0)) { |
1934 | debugf0("MC: " __FILE__ | 2010 | debugf0("MC: " __FILE__ |
1935 | ": %s(): failed edac_mce_register()\n", __func__); | 2011 | ": %s(): failed edac_mce_register()\n", __func__); |
2012 | goto fail1; | ||
1936 | } | 2013 | } |
1937 | 2014 | ||
1938 | fail: | 2015 | return 0; |
1939 | if (rc < 0) | 2016 | |
1940 | edac_mc_free(mci); | 2017 | fail1: |
2018 | i7core_pci_ctl_release(pvt); | ||
2019 | edac_mc_del_mc(mci->dev); | ||
2020 | fail0: | ||
2021 | kfree(mci->ctl_name); | ||
2022 | edac_mc_free(mci); | ||
2023 | i7core_dev->mci = NULL; | ||
1941 | return rc; | 2024 | return rc; |
1942 | } | 2025 | } |
1943 | 2026 | ||
@@ -1949,8 +2032,6 @@ fail: | |||
1949 | * < 0 for error code | 2032 | * < 0 for error code |
1950 | */ | 2033 | */ |
1951 | 2034 | ||
1952 | static int probed = 0; | ||
1953 | |||
1954 | static int __devinit i7core_probe(struct pci_dev *pdev, | 2035 | static int __devinit i7core_probe(struct pci_dev *pdev, |
1955 | const struct pci_device_id *id) | 2036 | const struct pci_device_id *id) |
1956 | { | 2037 | { |
@@ -1965,25 +2046,16 @@ static int __devinit i7core_probe(struct pci_dev *pdev, | |||
1965 | */ | 2046 | */ |
1966 | if (unlikely(probed >= 1)) { | 2047 | if (unlikely(probed >= 1)) { |
1967 | mutex_unlock(&i7core_edac_lock); | 2048 | mutex_unlock(&i7core_edac_lock); |
1968 | return -EINVAL; | 2049 | return -ENODEV; |
1969 | } | 2050 | } |
1970 | probed++; | 2051 | probed++; |
1971 | 2052 | ||
1972 | rc = i7core_get_devices(pci_dev_table); | 2053 | rc = i7core_get_all_devices(); |
1973 | if (unlikely(rc < 0)) | 2054 | if (unlikely(rc < 0)) |
1974 | goto fail0; | 2055 | goto fail0; |
1975 | 2056 | ||
1976 | list_for_each_entry(i7core_dev, &i7core_edac_list, list) { | 2057 | list_for_each_entry(i7core_dev, &i7core_edac_list, list) { |
1977 | int channels; | 2058 | rc = i7core_register_mci(i7core_dev); |
1978 | int csrows; | ||
1979 | |||
1980 | /* Check the number of active and not disabled channels */ | ||
1981 | rc = i7core_get_active_channels(i7core_dev->socket, | ||
1982 | &channels, &csrows); | ||
1983 | if (unlikely(rc < 0)) | ||
1984 | goto fail1; | ||
1985 | |||
1986 | rc = i7core_register_mci(i7core_dev, channels, csrows); | ||
1987 | if (unlikely(rc < 0)) | 2059 | if (unlikely(rc < 0)) |
1988 | goto fail1; | 2060 | goto fail1; |
1989 | } | 2061 | } |
@@ -1994,6 +2066,9 @@ static int __devinit i7core_probe(struct pci_dev *pdev, | |||
1994 | return 0; | 2066 | return 0; |
1995 | 2067 | ||
1996 | fail1: | 2068 | fail1: |
2069 | list_for_each_entry(i7core_dev, &i7core_edac_list, list) | ||
2070 | i7core_unregister_mci(i7core_dev); | ||
2071 | |||
1997 | i7core_put_all_devices(); | 2072 | i7core_put_all_devices(); |
1998 | fail0: | 2073 | fail0: |
1999 | mutex_unlock(&i7core_edac_lock); | 2074 | mutex_unlock(&i7core_edac_lock); |
@@ -2006,14 +2081,10 @@ fail0: | |||
2006 | */ | 2081 | */ |
2007 | static void __devexit i7core_remove(struct pci_dev *pdev) | 2082 | static void __devexit i7core_remove(struct pci_dev *pdev) |
2008 | { | 2083 | { |
2009 | struct mem_ctl_info *mci; | 2084 | struct i7core_dev *i7core_dev; |
2010 | struct i7core_dev *i7core_dev, *tmp; | ||
2011 | 2085 | ||
2012 | debugf0(__FILE__ ": %s()\n", __func__); | 2086 | debugf0(__FILE__ ": %s()\n", __func__); |
2013 | 2087 | ||
2014 | if (i7core_pci) | ||
2015 | edac_pci_release_generic_ctl(i7core_pci); | ||
2016 | |||
2017 | /* | 2088 | /* |
2018 | * we have a trouble here: pdev value for removal will be wrong, since | 2089 | * we have a trouble here: pdev value for removal will be wrong, since |
2019 | * it will point to the X58 register used to detect that the machine | 2090 | * it will point to the X58 register used to detect that the machine |
@@ -2023,22 +2094,18 @@ static void __devexit i7core_remove(struct pci_dev *pdev) | |||
2023 | */ | 2094 | */ |
2024 | 2095 | ||
2025 | mutex_lock(&i7core_edac_lock); | 2096 | mutex_lock(&i7core_edac_lock); |
2026 | list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) { | 2097 | |
2027 | mci = edac_mc_del_mc(&i7core_dev->pdev[0]->dev); | 2098 | if (unlikely(!probed)) { |
2028 | if (mci) { | 2099 | mutex_unlock(&i7core_edac_lock); |
2029 | struct i7core_pvt *pvt = mci->pvt_info; | 2100 | return; |
2030 | |||
2031 | i7core_dev = pvt->i7core_dev; | ||
2032 | edac_mce_unregister(&pvt->edac_mce); | ||
2033 | kfree(mci->ctl_name); | ||
2034 | edac_mc_free(mci); | ||
2035 | i7core_put_devices(i7core_dev); | ||
2036 | } else { | ||
2037 | i7core_printk(KERN_ERR, | ||
2038 | "Couldn't find mci for socket %d\n", | ||
2039 | i7core_dev->socket); | ||
2040 | } | ||
2041 | } | 2101 | } |
2102 | |||
2103 | list_for_each_entry(i7core_dev, &i7core_edac_list, list) | ||
2104 | i7core_unregister_mci(i7core_dev); | ||
2105 | |||
2106 | /* Release PCI resources */ | ||
2107 | i7core_put_all_devices(); | ||
2108 | |||
2042 | probed--; | 2109 | probed--; |
2043 | 2110 | ||
2044 | mutex_unlock(&i7core_edac_lock); | 2111 | mutex_unlock(&i7core_edac_lock); |
@@ -2070,7 +2137,8 @@ static int __init i7core_init(void) | |||
2070 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | 2137 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
2071 | opstate_init(); | 2138 | opstate_init(); |
2072 | 2139 | ||
2073 | i7core_xeon_pci_fixup(pci_dev_table); | 2140 | if (use_pci_fixup) |
2141 | i7core_xeon_pci_fixup(pci_dev_table); | ||
2074 | 2142 | ||
2075 | pci_rc = pci_register_driver(&i7core_driver); | 2143 | pci_rc = pci_register_driver(&i7core_driver); |
2076 | 2144 | ||
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index a2fa1feed724..4329d39f902c 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. | 12 | * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. |
13 | * | 13 | * |
14 | * Written with reference to 82443BX Host Bridge Datasheet: | 14 | * Written with reference to 82443BX Host Bridge Datasheet: |
15 | * http://www.intel.com/design/chipsets/440/documentation.htm | 15 | * http://download.intel.com/design/chipsets/datashts/29063301.pdf |
16 | * references to this document given in []. | 16 | * references to this document given in []. |
17 | * | 17 | * |
18 | * This module doesn't support the 440LX, but it may be possible to | 18 | * This module doesn't support the 440LX, but it may be possible to |
@@ -203,7 +203,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, | |||
203 | row_high_limit = ((u32) drbar << 23); | 203 | row_high_limit = ((u32) drbar << 23); |
204 | /* find the DRAM Chip Select Base address and mask */ | 204 | /* find the DRAM Chip Select Base address and mask */ |
205 | debugf1("MC%d: %s: %s() Row=%d, " | 205 | debugf1("MC%d: %s: %s() Row=%d, " |
206 | "Boundry Address=%#0x, Last = %#0x\n", | 206 | "Boundary Address=%#0x, Last = %#0x\n", |
207 | mci->mc_idx, __FILE__, __func__, index, row_high_limit, | 207 | mci->mc_idx, __FILE__, __func__, index, row_high_limit, |
208 | row_high_limit_last); | 208 | row_high_limit_last); |
209 | 209 | ||
@@ -305,7 +305,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) | |||
305 | i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype); | 305 | i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype); |
306 | 306 | ||
307 | /* Many BIOSes don't clear error flags on boot, so do this | 307 | /* Many BIOSes don't clear error flags on boot, so do this |
308 | * here, or we get "phantom" errors occuring at module-load | 308 | * here, or we get "phantom" errors occurring at module-load |
309 | * time. */ | 309 | * time. */ |
310 | pci_write_bits32(pdev, I82443BXGX_EAP, | 310 | pci_write_bits32(pdev, I82443BXGX_EAP, |
311 | (I82443BXGX_EAP_OFFSET_SBE | | 311 | (I82443BXGX_EAP_OFFSET_SBE | |
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index b8a95cf50718..931a05775049 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/edac.h> | 16 | #include <linux/edac.h> |
17 | #include "edac_core.h" | 17 | #include "edac_core.h" |
18 | 18 | ||
19 | #define I82860_REVISION " Ver: 2.0.2 " __DATE__ | 19 | #define I82860_REVISION " Ver: 2.0.2" |
20 | #define EDAC_MOD_STR "i82860_edac" | 20 | #define EDAC_MOD_STR "i82860_edac" |
21 | 21 | ||
22 | #define i82860_printk(level, fmt, arg...) \ | 22 | #define i82860_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index b2fd1e899142..33864c63c684 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/edac.h> | 20 | #include <linux/edac.h> |
21 | #include "edac_core.h" | 21 | #include "edac_core.h" |
22 | 22 | ||
23 | #define I82875P_REVISION " Ver: 2.0.2 " __DATE__ | 23 | #define I82875P_REVISION " Ver: 2.0.2" |
24 | #define EDAC_MOD_STR "i82875p_edac" | 24 | #define EDAC_MOD_STR "i82875p_edac" |
25 | 25 | ||
26 | #define i82875p_printk(level, fmt, arg...) \ | 26 | #define i82875p_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 3218819b7286..a5da732fe5b2 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/edac.h> | 16 | #include <linux/edac.h> |
17 | #include "edac_core.h" | 17 | #include "edac_core.h" |
18 | 18 | ||
19 | #define I82975X_REVISION " Ver: 1.0.0 " __DATE__ | 19 | #define I82975X_REVISION " Ver: 1.0.0" |
20 | #define EDAC_MOD_STR "i82975x_edac" | 20 | #define EDAC_MOD_STR "i82975x_edac" |
21 | 21 | ||
22 | #define i82975x_printk(level, fmt, arg...) \ | 22 | #define i82975x_printk(level, fmt, arg...) \ |
@@ -160,8 +160,8 @@ NOTE: Only ONE of the three must be enabled | |||
160 | * 3:2 Rank 1 architecture | 160 | * 3:2 Rank 1 architecture |
161 | * 1:0 Rank 0 architecture | 161 | * 1:0 Rank 0 architecture |
162 | * | 162 | * |
163 | * 00 => x16 devices; i.e 4 banks | 163 | * 00 => 4 banks |
164 | * 01 => x8 devices; i.e 8 banks | 164 | * 01 => 8 banks |
165 | */ | 165 | */ |
166 | #define I82975X_C0BNKARC 0x10e | 166 | #define I82975X_C0BNKARC 0x10e |
167 | #define I82975X_C1BNKARC 0x18e | 167 | #define I82975X_C1BNKARC 0x18e |
@@ -278,6 +278,7 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, | |||
278 | struct i82975x_error_info *info, int handle_errors) | 278 | struct i82975x_error_info *info, int handle_errors) |
279 | { | 279 | { |
280 | int row, multi_chan, chan; | 280 | int row, multi_chan, chan; |
281 | unsigned long offst, page; | ||
281 | 282 | ||
282 | multi_chan = mci->csrows[0].nr_channels - 1; | 283 | multi_chan = mci->csrows[0].nr_channels - 1; |
283 | 284 | ||
@@ -292,17 +293,19 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, | |||
292 | info->errsts = info->errsts2; | 293 | info->errsts = info->errsts2; |
293 | } | 294 | } |
294 | 295 | ||
295 | chan = info->eap & 1; | 296 | page = (unsigned long) info->eap; |
296 | info->eap >>= 1; | 297 | if (info->xeap & 1) |
297 | if (info->xeap ) | 298 | page |= 0x100000000ul; |
298 | info->eap |= 0x80000000; | 299 | chan = page & 1; |
299 | info->eap >>= PAGE_SHIFT; | 300 | page >>= 1; |
300 | row = edac_mc_find_csrow_by_page(mci, info->eap); | 301 | offst = page & ((1 << PAGE_SHIFT) - 1); |
302 | page >>= PAGE_SHIFT; | ||
303 | row = edac_mc_find_csrow_by_page(mci, page); | ||
301 | 304 | ||
302 | if (info->errsts & 0x0002) | 305 | if (info->errsts & 0x0002) |
303 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE"); | 306 | edac_mc_handle_ue(mci, page, offst , row, "i82975x UE"); |
304 | else | 307 | else |
305 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, | 308 | edac_mc_handle_ce(mci, page, offst, info->derrsyn, row, |
306 | multi_chan ? chan : 0, | 309 | multi_chan ? chan : 0, |
307 | "i82975x CE"); | 310 | "i82975x CE"); |
308 | 311 | ||
@@ -344,11 +347,7 @@ static int dual_channel_active(void __iomem *mch_window) | |||
344 | static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) | 347 | static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) |
345 | { | 348 | { |
346 | /* | 349 | /* |
347 | * ASUS P5W DH either does not program this register or programs | 350 | * ECC is possible on i92975x ONLY with DEV_X8 |
348 | * it wrong! | ||
349 | * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val' | ||
350 | * for each rank should be 01b - the LSB of the word should be 0x55; | ||
351 | * but it reads 0! | ||
352 | */ | 351 | */ |
353 | return DEV_X8; | 352 | return DEV_X8; |
354 | } | 353 | } |
@@ -356,11 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) | |||
356 | static void i82975x_init_csrows(struct mem_ctl_info *mci, | 355 | static void i82975x_init_csrows(struct mem_ctl_info *mci, |
357 | struct pci_dev *pdev, void __iomem *mch_window) | 356 | struct pci_dev *pdev, void __iomem *mch_window) |
358 | { | 357 | { |
358 | static const char *labels[4] = { | ||
359 | "DIMM A1", "DIMM A2", | ||
360 | "DIMM B1", "DIMM B2" | ||
361 | }; | ||
359 | struct csrow_info *csrow; | 362 | struct csrow_info *csrow; |
360 | unsigned long last_cumul_size; | 363 | unsigned long last_cumul_size; |
361 | u8 value; | 364 | u8 value; |
362 | u32 cumul_size; | 365 | u32 cumul_size; |
363 | int index; | 366 | int index, chan; |
364 | 367 | ||
365 | last_cumul_size = 0; | 368 | last_cumul_size = 0; |
366 | 369 | ||
@@ -369,11 +372,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, | |||
369 | * The dram row boundary (DRB) reg values are boundary address | 372 | * The dram row boundary (DRB) reg values are boundary address |
370 | * for each DRAM row with a granularity of 32 or 64MB (single/dual | 373 | * for each DRAM row with a granularity of 32 or 64MB (single/dual |
371 | * channel operation). DRB regs are cumulative; therefore DRB7 will | 374 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
372 | * contain the total memory contained in all eight rows. | 375 | * contain the total memory contained in all rows. |
373 | * | ||
374 | * FIXME: | ||
375 | * EDAC currently works for Dual-channel Interleaved configuration. | ||
376 | * Other configurations, which the chip supports, need fixing/testing. | ||
377 | * | 376 | * |
378 | */ | 377 | */ |
379 | 378 | ||
@@ -384,8 +383,26 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, | |||
384 | ((index >= 4) ? 0x80 : 0)); | 383 | ((index >= 4) ? 0x80 : 0)); |
385 | cumul_size = value; | 384 | cumul_size = value; |
386 | cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT); | 385 | cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT); |
386 | /* | ||
387 | * Adjust cumul_size w.r.t number of channels | ||
388 | * | ||
389 | */ | ||
390 | if (csrow->nr_channels > 1) | ||
391 | cumul_size <<= 1; | ||
387 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | 392 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
388 | cumul_size); | 393 | cumul_size); |
394 | |||
395 | /* | ||
396 | * Initialise dram labels | ||
397 | * index values: | ||
398 | * [0-7] for single-channel; i.e. csrow->nr_channels = 1 | ||
399 | * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 | ||
400 | */ | ||
401 | for (chan = 0; chan < csrow->nr_channels; chan++) | ||
402 | strncpy(csrow->channels[chan].label, | ||
403 | labels[(index >> 1) + (chan * 2)], | ||
404 | EDAC_MC_LABEL_LEN); | ||
405 | |||
389 | if (cumul_size == last_cumul_size) | 406 | if (cumul_size == last_cumul_size) |
390 | continue; /* not populated */ | 407 | continue; /* not populated */ |
391 | 408 | ||
@@ -393,8 +410,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, | |||
393 | csrow->last_page = cumul_size - 1; | 410 | csrow->last_page = cumul_size - 1; |
394 | csrow->nr_pages = cumul_size - last_cumul_size; | 411 | csrow->nr_pages = cumul_size - last_cumul_size; |
395 | last_cumul_size = cumul_size; | 412 | last_cumul_size = cumul_size; |
396 | csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */ | 413 | csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */ |
397 | csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */ | 414 | csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */ |
398 | csrow->dtype = i82975x_dram_type(mch_window, index); | 415 | csrow->dtype = i82975x_dram_type(mch_window, index); |
399 | csrow->edac_mode = EDAC_SECDED; /* only supported */ | 416 | csrow->edac_mode = EDAC_SECDED; /* only supported */ |
400 | } | 417 | } |
@@ -515,18 +532,20 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) | |||
515 | 532 | ||
516 | debugf3("%s(): init mci\n", __func__); | 533 | debugf3("%s(): init mci\n", __func__); |
517 | mci->dev = &pdev->dev; | 534 | mci->dev = &pdev->dev; |
518 | mci->mtype_cap = MEM_FLAG_DDR; | 535 | mci->mtype_cap = MEM_FLAG_DDR2; |
519 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 536 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
520 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 537 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
521 | mci->mod_name = EDAC_MOD_STR; | 538 | mci->mod_name = EDAC_MOD_STR; |
522 | mci->mod_ver = I82975X_REVISION; | 539 | mci->mod_ver = I82975X_REVISION; |
523 | mci->ctl_name = i82975x_devs[dev_idx].ctl_name; | 540 | mci->ctl_name = i82975x_devs[dev_idx].ctl_name; |
541 | mci->dev_name = pci_name(pdev); | ||
524 | mci->edac_check = i82975x_check; | 542 | mci->edac_check = i82975x_check; |
525 | mci->ctl_page_to_phys = NULL; | 543 | mci->ctl_page_to_phys = NULL; |
526 | debugf3("%s(): init pvt\n", __func__); | 544 | debugf3("%s(): init pvt\n", __func__); |
527 | pvt = (struct i82975x_pvt *) mci->pvt_info; | 545 | pvt = (struct i82975x_pvt *) mci->pvt_info; |
528 | pvt->mch_window = mch_window; | 546 | pvt->mch_window = mch_window; |
529 | i82975x_init_csrows(mci, pdev, mch_window); | 547 | i82975x_init_csrows(mci, pdev, mch_window); |
548 | mci->scrub_mode = SCRUB_HW_SRC; | ||
530 | i82975x_get_error_info(mci, &discard); /* clear counters */ | 549 | i82975x_get_error_info(mci, &discard); /* clear counters */ |
531 | 550 | ||
532 | /* finalize this instance of memory controller with edac core */ | 551 | /* finalize this instance of memory controller with edac core */ |
@@ -664,7 +683,7 @@ module_init(i82975x_init); | |||
664 | module_exit(i82975x_exit); | 683 | module_exit(i82975x_exit); |
665 | 684 | ||
666 | MODULE_LICENSE("GPL"); | 685 | MODULE_LICENSE("GPL"); |
667 | MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); | 686 | MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>"); |
668 | MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); | 687 | MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); |
669 | 688 | ||
670 | module_param(edac_op_state, int, 0444); | 689 | module_param(edac_op_state, int, 0444); |
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c new file mode 100644 index 000000000000..795cfbc0bf50 --- /dev/null +++ b/drivers/edac/mce_amd.c | |||
@@ -0,0 +1,920 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/slab.h> | ||
3 | |||
4 | #include "mce_amd.h" | ||
5 | |||
6 | static struct amd_decoder_ops *fam_ops; | ||
7 | |||
8 | static u8 xec_mask = 0xf; | ||
9 | static u8 nb_err_cpumask = 0xf; | ||
10 | |||
11 | static bool report_gart_errors; | ||
12 | static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg); | ||
13 | |||
14 | void amd_report_gart_errors(bool v) | ||
15 | { | ||
16 | report_gart_errors = v; | ||
17 | } | ||
18 | EXPORT_SYMBOL_GPL(amd_report_gart_errors); | ||
19 | |||
20 | void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32)) | ||
21 | { | ||
22 | nb_bus_decoder = f; | ||
23 | } | ||
24 | EXPORT_SYMBOL_GPL(amd_register_ecc_decoder); | ||
25 | |||
26 | void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32)) | ||
27 | { | ||
28 | if (nb_bus_decoder) { | ||
29 | WARN_ON(nb_bus_decoder != f); | ||
30 | |||
31 | nb_bus_decoder = NULL; | ||
32 | } | ||
33 | } | ||
34 | EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder); | ||
35 | |||
36 | /* | ||
37 | * string representation for the different MCA reported error types, see F3x48 | ||
38 | * or MSR0000_0411. | ||
39 | */ | ||
40 | |||
41 | /* transaction type */ | ||
42 | const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" }; | ||
43 | EXPORT_SYMBOL_GPL(tt_msgs); | ||
44 | |||
45 | /* cache level */ | ||
46 | const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" }; | ||
47 | EXPORT_SYMBOL_GPL(ll_msgs); | ||
48 | |||
49 | /* memory transaction type */ | ||
50 | const char *rrrr_msgs[] = { | ||
51 | "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP" | ||
52 | }; | ||
53 | EXPORT_SYMBOL_GPL(rrrr_msgs); | ||
54 | |||
55 | /* participating processor */ | ||
56 | const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" }; | ||
57 | EXPORT_SYMBOL_GPL(pp_msgs); | ||
58 | |||
59 | /* request timeout */ | ||
60 | const char *to_msgs[] = { "no timeout", "timed out" }; | ||
61 | EXPORT_SYMBOL_GPL(to_msgs); | ||
62 | |||
63 | /* memory or i/o */ | ||
64 | const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" }; | ||
65 | EXPORT_SYMBOL_GPL(ii_msgs); | ||
66 | |||
67 | static const char *f10h_nb_mce_desc[] = { | ||
68 | "HT link data error", | ||
69 | "Protocol error (link, L3, probe filter, etc.)", | ||
70 | "Parity error in NB-internal arrays", | ||
71 | "Link Retry due to IO link transmission error", | ||
72 | "L3 ECC data cache error", | ||
73 | "ECC error in L3 cache tag", | ||
74 | "L3 LRU parity bits error", | ||
75 | "ECC Error in the Probe Filter directory" | ||
76 | }; | ||
77 | |||
78 | static const char * const f15h_ic_mce_desc[] = { | ||
79 | "UC during a demand linefill from L2", | ||
80 | "Parity error during data load from IC", | ||
81 | "Parity error for IC valid bit", | ||
82 | "Main tag parity error", | ||
83 | "Parity error in prediction queue", | ||
84 | "PFB data/address parity error", | ||
85 | "Parity error in the branch status reg", | ||
86 | "PFB promotion address error", | ||
87 | "Tag error during probe/victimization", | ||
88 | "Parity error for IC probe tag valid bit", | ||
89 | "PFB non-cacheable bit parity error", | ||
90 | "PFB valid bit parity error", /* xec = 0xd */ | ||
91 | "patch RAM", /* xec = 010 */ | ||
92 | "uop queue", | ||
93 | "insn buffer", | ||
94 | "predecode buffer", | ||
95 | "fetch address FIFO" | ||
96 | }; | ||
97 | |||
98 | static const char * const f15h_cu_mce_desc[] = { | ||
99 | "Fill ECC error on data fills", /* xec = 0x4 */ | ||
100 | "Fill parity error on insn fills", | ||
101 | "Prefetcher request FIFO parity error", | ||
102 | "PRQ address parity error", | ||
103 | "PRQ data parity error", | ||
104 | "WCC Tag ECC error", | ||
105 | "WCC Data ECC error", | ||
106 | "WCB Data parity error", | ||
107 | "VB Data/ECC error", | ||
108 | "L2 Tag ECC error", /* xec = 0x10 */ | ||
109 | "Hard L2 Tag ECC error", | ||
110 | "Multiple hits on L2 tag", | ||
111 | "XAB parity error", | ||
112 | "PRB address parity error" | ||
113 | }; | ||
114 | |||
115 | static const char * const fr_ex_mce_desc[] = { | ||
116 | "CPU Watchdog timer expire", | ||
117 | "Wakeup array dest tag", | ||
118 | "AG payload array", | ||
119 | "EX payload array", | ||
120 | "IDRF array", | ||
121 | "Retire dispatch queue", | ||
122 | "Mapper checkpoint array", | ||
123 | "Physical register file EX0 port", | ||
124 | "Physical register file EX1 port", | ||
125 | "Physical register file AG0 port", | ||
126 | "Physical register file AG1 port", | ||
127 | "Flag register file", | ||
128 | "DE correctable error could not be corrected" | ||
129 | }; | ||
130 | |||
131 | static bool f12h_dc_mce(u16 ec, u8 xec) | ||
132 | { | ||
133 | bool ret = false; | ||
134 | |||
135 | if (MEM_ERROR(ec)) { | ||
136 | u8 ll = LL(ec); | ||
137 | ret = true; | ||
138 | |||
139 | if (ll == LL_L2) | ||
140 | pr_cont("during L1 linefill from L2.\n"); | ||
141 | else if (ll == LL_L1) | ||
142 | pr_cont("Data/Tag %s error.\n", R4_MSG(ec)); | ||
143 | else | ||
144 | ret = false; | ||
145 | } | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | static bool f10h_dc_mce(u16 ec, u8 xec) | ||
150 | { | ||
151 | if (R4(ec) == R4_GEN && LL(ec) == LL_L1) { | ||
152 | pr_cont("during data scrub.\n"); | ||
153 | return true; | ||
154 | } | ||
155 | return f12h_dc_mce(ec, xec); | ||
156 | } | ||
157 | |||
158 | static bool k8_dc_mce(u16 ec, u8 xec) | ||
159 | { | ||
160 | if (BUS_ERROR(ec)) { | ||
161 | pr_cont("during system linefill.\n"); | ||
162 | return true; | ||
163 | } | ||
164 | |||
165 | return f10h_dc_mce(ec, xec); | ||
166 | } | ||
167 | |||
168 | static bool f14h_dc_mce(u16 ec, u8 xec) | ||
169 | { | ||
170 | u8 r4 = R4(ec); | ||
171 | bool ret = true; | ||
172 | |||
173 | if (MEM_ERROR(ec)) { | ||
174 | |||
175 | if (TT(ec) != TT_DATA || LL(ec) != LL_L1) | ||
176 | return false; | ||
177 | |||
178 | switch (r4) { | ||
179 | case R4_DRD: | ||
180 | case R4_DWR: | ||
181 | pr_cont("Data/Tag parity error due to %s.\n", | ||
182 | (r4 == R4_DRD ? "load/hw prf" : "store")); | ||
183 | break; | ||
184 | case R4_EVICT: | ||
185 | pr_cont("Copyback parity error on a tag miss.\n"); | ||
186 | break; | ||
187 | case R4_SNOOP: | ||
188 | pr_cont("Tag parity error during snoop.\n"); | ||
189 | break; | ||
190 | default: | ||
191 | ret = false; | ||
192 | } | ||
193 | } else if (BUS_ERROR(ec)) { | ||
194 | |||
195 | if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG) | ||
196 | return false; | ||
197 | |||
198 | pr_cont("System read data error on a "); | ||
199 | |||
200 | switch (r4) { | ||
201 | case R4_RD: | ||
202 | pr_cont("TLB reload.\n"); | ||
203 | break; | ||
204 | case R4_DWR: | ||
205 | pr_cont("store.\n"); | ||
206 | break; | ||
207 | case R4_DRD: | ||
208 | pr_cont("load.\n"); | ||
209 | break; | ||
210 | default: | ||
211 | ret = false; | ||
212 | } | ||
213 | } else { | ||
214 | ret = false; | ||
215 | } | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | static bool f15h_dc_mce(u16 ec, u8 xec) | ||
221 | { | ||
222 | bool ret = true; | ||
223 | |||
224 | if (MEM_ERROR(ec)) { | ||
225 | |||
226 | switch (xec) { | ||
227 | case 0x0: | ||
228 | pr_cont("Data Array access error.\n"); | ||
229 | break; | ||
230 | |||
231 | case 0x1: | ||
232 | pr_cont("UC error during a linefill from L2/NB.\n"); | ||
233 | break; | ||
234 | |||
235 | case 0x2: | ||
236 | case 0x11: | ||
237 | pr_cont("STQ access error.\n"); | ||
238 | break; | ||
239 | |||
240 | case 0x3: | ||
241 | pr_cont("SCB access error.\n"); | ||
242 | break; | ||
243 | |||
244 | case 0x10: | ||
245 | pr_cont("Tag error.\n"); | ||
246 | break; | ||
247 | |||
248 | case 0x12: | ||
249 | pr_cont("LDQ access error.\n"); | ||
250 | break; | ||
251 | |||
252 | default: | ||
253 | ret = false; | ||
254 | } | ||
255 | } else if (BUS_ERROR(ec)) { | ||
256 | |||
257 | if (!xec) | ||
258 | pr_cont("during system linefill.\n"); | ||
259 | else | ||
260 | pr_cont(" Internal %s condition.\n", | ||
261 | ((xec == 1) ? "livelock" : "deadlock")); | ||
262 | } else | ||
263 | ret = false; | ||
264 | |||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | static void amd_decode_dc_mce(struct mce *m) | ||
269 | { | ||
270 | u16 ec = EC(m->status); | ||
271 | u8 xec = XEC(m->status, xec_mask); | ||
272 | |||
273 | pr_emerg(HW_ERR "Data Cache Error: "); | ||
274 | |||
275 | /* TLB error signatures are the same across families */ | ||
276 | if (TLB_ERROR(ec)) { | ||
277 | if (TT(ec) == TT_DATA) { | ||
278 | pr_cont("%s TLB %s.\n", LL_MSG(ec), | ||
279 | ((xec == 2) ? "locked miss" | ||
280 | : (xec ? "multimatch" : "parity"))); | ||
281 | return; | ||
282 | } | ||
283 | } else if (fam_ops->dc_mce(ec, xec)) | ||
284 | ; | ||
285 | else | ||
286 | pr_emerg(HW_ERR "Corrupted DC MCE info?\n"); | ||
287 | } | ||
288 | |||
289 | static bool k8_ic_mce(u16 ec, u8 xec) | ||
290 | { | ||
291 | u8 ll = LL(ec); | ||
292 | bool ret = true; | ||
293 | |||
294 | if (!MEM_ERROR(ec)) | ||
295 | return false; | ||
296 | |||
297 | if (ll == 0x2) | ||
298 | pr_cont("during a linefill from L2.\n"); | ||
299 | else if (ll == 0x1) { | ||
300 | switch (R4(ec)) { | ||
301 | case R4_IRD: | ||
302 | pr_cont("Parity error during data load.\n"); | ||
303 | break; | ||
304 | |||
305 | case R4_EVICT: | ||
306 | pr_cont("Copyback Parity/Victim error.\n"); | ||
307 | break; | ||
308 | |||
309 | case R4_SNOOP: | ||
310 | pr_cont("Tag Snoop error.\n"); | ||
311 | break; | ||
312 | |||
313 | default: | ||
314 | ret = false; | ||
315 | break; | ||
316 | } | ||
317 | } else | ||
318 | ret = false; | ||
319 | |||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | static bool f14h_ic_mce(u16 ec, u8 xec) | ||
324 | { | ||
325 | u8 r4 = R4(ec); | ||
326 | bool ret = true; | ||
327 | |||
328 | if (MEM_ERROR(ec)) { | ||
329 | if (TT(ec) != 0 || LL(ec) != 1) | ||
330 | ret = false; | ||
331 | |||
332 | if (r4 == R4_IRD) | ||
333 | pr_cont("Data/tag array parity error for a tag hit.\n"); | ||
334 | else if (r4 == R4_SNOOP) | ||
335 | pr_cont("Tag error during snoop/victimization.\n"); | ||
336 | else | ||
337 | ret = false; | ||
338 | } | ||
339 | return ret; | ||
340 | } | ||
341 | |||
342 | static bool f15h_ic_mce(u16 ec, u8 xec) | ||
343 | { | ||
344 | bool ret = true; | ||
345 | |||
346 | if (!MEM_ERROR(ec)) | ||
347 | return false; | ||
348 | |||
349 | switch (xec) { | ||
350 | case 0x0 ... 0xa: | ||
351 | pr_cont("%s.\n", f15h_ic_mce_desc[xec]); | ||
352 | break; | ||
353 | |||
354 | case 0xd: | ||
355 | pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]); | ||
356 | break; | ||
357 | |||
358 | case 0x10 ... 0x14: | ||
359 | pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]); | ||
360 | break; | ||
361 | |||
362 | default: | ||
363 | ret = false; | ||
364 | } | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static void amd_decode_ic_mce(struct mce *m) | ||
369 | { | ||
370 | u16 ec = EC(m->status); | ||
371 | u8 xec = XEC(m->status, xec_mask); | ||
372 | |||
373 | pr_emerg(HW_ERR "Instruction Cache Error: "); | ||
374 | |||
375 | if (TLB_ERROR(ec)) | ||
376 | pr_cont("%s TLB %s.\n", LL_MSG(ec), | ||
377 | (xec ? "multimatch" : "parity error")); | ||
378 | else if (BUS_ERROR(ec)) { | ||
379 | bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58))); | ||
380 | |||
381 | pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read")); | ||
382 | } else if (fam_ops->ic_mce(ec, xec)) | ||
383 | ; | ||
384 | else | ||
385 | pr_emerg(HW_ERR "Corrupted IC MCE info?\n"); | ||
386 | } | ||
387 | |||
388 | static void amd_decode_bu_mce(struct mce *m) | ||
389 | { | ||
390 | u16 ec = EC(m->status); | ||
391 | u8 xec = XEC(m->status, xec_mask); | ||
392 | |||
393 | pr_emerg(HW_ERR "Bus Unit Error"); | ||
394 | |||
395 | if (xec == 0x1) | ||
396 | pr_cont(" in the write data buffers.\n"); | ||
397 | else if (xec == 0x3) | ||
398 | pr_cont(" in the victim data buffers.\n"); | ||
399 | else if (xec == 0x2 && MEM_ERROR(ec)) | ||
400 | pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec)); | ||
401 | else if (xec == 0x0) { | ||
402 | if (TLB_ERROR(ec)) | ||
403 | pr_cont(": %s error in a Page Descriptor Cache or " | ||
404 | "Guest TLB.\n", TT_MSG(ec)); | ||
405 | else if (BUS_ERROR(ec)) | ||
406 | pr_cont(": %s/ECC error in data read from NB: %s.\n", | ||
407 | R4_MSG(ec), PP_MSG(ec)); | ||
408 | else if (MEM_ERROR(ec)) { | ||
409 | u8 r4 = R4(ec); | ||
410 | |||
411 | if (r4 >= 0x7) | ||
412 | pr_cont(": %s error during data copyback.\n", | ||
413 | R4_MSG(ec)); | ||
414 | else if (r4 <= 0x1) | ||
415 | pr_cont(": %s parity/ECC error during data " | ||
416 | "access from L2.\n", R4_MSG(ec)); | ||
417 | else | ||
418 | goto wrong_bu_mce; | ||
419 | } else | ||
420 | goto wrong_bu_mce; | ||
421 | } else | ||
422 | goto wrong_bu_mce; | ||
423 | |||
424 | return; | ||
425 | |||
426 | wrong_bu_mce: | ||
427 | pr_emerg(HW_ERR "Corrupted BU MCE info?\n"); | ||
428 | } | ||
429 | |||
430 | static void amd_decode_cu_mce(struct mce *m) | ||
431 | { | ||
432 | u16 ec = EC(m->status); | ||
433 | u8 xec = XEC(m->status, xec_mask); | ||
434 | |||
435 | pr_emerg(HW_ERR "Combined Unit Error: "); | ||
436 | |||
437 | if (TLB_ERROR(ec)) { | ||
438 | if (xec == 0x0) | ||
439 | pr_cont("Data parity TLB read error.\n"); | ||
440 | else if (xec == 0x1) | ||
441 | pr_cont("Poison data provided for TLB fill.\n"); | ||
442 | else | ||
443 | goto wrong_cu_mce; | ||
444 | } else if (BUS_ERROR(ec)) { | ||
445 | if (xec > 2) | ||
446 | goto wrong_cu_mce; | ||
447 | |||
448 | pr_cont("Error during attempted NB data read.\n"); | ||
449 | } else if (MEM_ERROR(ec)) { | ||
450 | switch (xec) { | ||
451 | case 0x4 ... 0xc: | ||
452 | pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]); | ||
453 | break; | ||
454 | |||
455 | case 0x10 ... 0x14: | ||
456 | pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]); | ||
457 | break; | ||
458 | |||
459 | default: | ||
460 | goto wrong_cu_mce; | ||
461 | } | ||
462 | } | ||
463 | |||
464 | return; | ||
465 | |||
466 | wrong_cu_mce: | ||
467 | pr_emerg(HW_ERR "Corrupted CU MCE info?\n"); | ||
468 | } | ||
469 | |||
470 | static void amd_decode_ls_mce(struct mce *m) | ||
471 | { | ||
472 | u16 ec = EC(m->status); | ||
473 | u8 xec = XEC(m->status, xec_mask); | ||
474 | |||
475 | if (boot_cpu_data.x86 >= 0x14) { | ||
476 | pr_emerg("You shouldn't be seeing an LS MCE on this cpu family," | ||
477 | " please report on LKML.\n"); | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | pr_emerg(HW_ERR "Load Store Error"); | ||
482 | |||
483 | if (xec == 0x0) { | ||
484 | u8 r4 = R4(ec); | ||
485 | |||
486 | if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) | ||
487 | goto wrong_ls_mce; | ||
488 | |||
489 | pr_cont(" during %s.\n", R4_MSG(ec)); | ||
490 | } else | ||
491 | goto wrong_ls_mce; | ||
492 | |||
493 | return; | ||
494 | |||
495 | wrong_ls_mce: | ||
496 | pr_emerg(HW_ERR "Corrupted LS MCE info?\n"); | ||
497 | } | ||
498 | |||
499 | static bool k8_nb_mce(u16 ec, u8 xec) | ||
500 | { | ||
501 | bool ret = true; | ||
502 | |||
503 | switch (xec) { | ||
504 | case 0x1: | ||
505 | pr_cont("CRC error detected on HT link.\n"); | ||
506 | break; | ||
507 | |||
508 | case 0x5: | ||
509 | pr_cont("Invalid GART PTE entry during GART table walk.\n"); | ||
510 | break; | ||
511 | |||
512 | case 0x6: | ||
513 | pr_cont("Unsupported atomic RMW received from an IO link.\n"); | ||
514 | break; | ||
515 | |||
516 | case 0x0: | ||
517 | case 0x8: | ||
518 | if (boot_cpu_data.x86 == 0x11) | ||
519 | return false; | ||
520 | |||
521 | pr_cont("DRAM ECC error detected on the NB.\n"); | ||
522 | break; | ||
523 | |||
524 | case 0xd: | ||
525 | pr_cont("Parity error on the DRAM addr/ctl signals.\n"); | ||
526 | break; | ||
527 | |||
528 | default: | ||
529 | ret = false; | ||
530 | break; | ||
531 | } | ||
532 | |||
533 | return ret; | ||
534 | } | ||
535 | |||
536 | static bool f10h_nb_mce(u16 ec, u8 xec) | ||
537 | { | ||
538 | bool ret = true; | ||
539 | u8 offset = 0; | ||
540 | |||
541 | if (k8_nb_mce(ec, xec)) | ||
542 | return true; | ||
543 | |||
544 | switch(xec) { | ||
545 | case 0xa ... 0xc: | ||
546 | offset = 10; | ||
547 | break; | ||
548 | |||
549 | case 0xe: | ||
550 | offset = 11; | ||
551 | break; | ||
552 | |||
553 | case 0xf: | ||
554 | if (TLB_ERROR(ec)) | ||
555 | pr_cont("GART Table Walk data error.\n"); | ||
556 | else if (BUS_ERROR(ec)) | ||
557 | pr_cont("DMA Exclusion Vector Table Walk error.\n"); | ||
558 | else | ||
559 | ret = false; | ||
560 | |||
561 | goto out; | ||
562 | break; | ||
563 | |||
564 | case 0x19: | ||
565 | if (boot_cpu_data.x86 == 0x15) | ||
566 | pr_cont("Compute Unit Data Error.\n"); | ||
567 | else | ||
568 | ret = false; | ||
569 | |||
570 | goto out; | ||
571 | break; | ||
572 | |||
573 | case 0x1c ... 0x1f: | ||
574 | offset = 24; | ||
575 | break; | ||
576 | |||
577 | default: | ||
578 | ret = false; | ||
579 | |||
580 | goto out; | ||
581 | break; | ||
582 | } | ||
583 | |||
584 | pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]); | ||
585 | |||
586 | out: | ||
587 | return ret; | ||
588 | } | ||
589 | |||
590 | static bool nb_noop_mce(u16 ec, u8 xec) | ||
591 | { | ||
592 | return false; | ||
593 | } | ||
594 | |||
595 | void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) | ||
596 | { | ||
597 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
598 | u16 ec = EC(m->status); | ||
599 | u8 xec = XEC(m->status, 0x1f); | ||
600 | u32 nbsh = (u32)(m->status >> 32); | ||
601 | int core = -1; | ||
602 | |||
603 | pr_emerg(HW_ERR "Northbridge Error (node %d", node_id); | ||
604 | |||
605 | /* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */ | ||
606 | if (c->x86 == 0x10 && c->x86_model > 7) { | ||
607 | if (nbsh & NBSH_ERR_CPU_VAL) | ||
608 | core = nbsh & nb_err_cpumask; | ||
609 | } else { | ||
610 | u8 assoc_cpus = nbsh & nb_err_cpumask; | ||
611 | |||
612 | if (assoc_cpus > 0) | ||
613 | core = fls(assoc_cpus) - 1; | ||
614 | } | ||
615 | |||
616 | if (core >= 0) | ||
617 | pr_cont(", core %d): ", core); | ||
618 | else | ||
619 | pr_cont("): "); | ||
620 | |||
621 | switch (xec) { | ||
622 | case 0x2: | ||
623 | pr_cont("Sync error (sync packets on HT link detected).\n"); | ||
624 | return; | ||
625 | |||
626 | case 0x3: | ||
627 | pr_cont("HT Master abort.\n"); | ||
628 | return; | ||
629 | |||
630 | case 0x4: | ||
631 | pr_cont("HT Target abort.\n"); | ||
632 | return; | ||
633 | |||
634 | case 0x7: | ||
635 | pr_cont("NB Watchdog timeout.\n"); | ||
636 | return; | ||
637 | |||
638 | case 0x9: | ||
639 | pr_cont("SVM DMA Exclusion Vector error.\n"); | ||
640 | return; | ||
641 | |||
642 | default: | ||
643 | break; | ||
644 | } | ||
645 | |||
646 | if (!fam_ops->nb_mce(ec, xec)) | ||
647 | goto wrong_nb_mce; | ||
648 | |||
649 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15) | ||
650 | if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) | ||
651 | nb_bus_decoder(node_id, m, nbcfg); | ||
652 | |||
653 | return; | ||
654 | |||
655 | wrong_nb_mce: | ||
656 | pr_emerg(HW_ERR "Corrupted NB MCE info?\n"); | ||
657 | } | ||
658 | EXPORT_SYMBOL_GPL(amd_decode_nb_mce); | ||
659 | |||
660 | static void amd_decode_fr_mce(struct mce *m) | ||
661 | { | ||
662 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
663 | u8 xec = XEC(m->status, xec_mask); | ||
664 | |||
665 | if (c->x86 == 0xf || c->x86 == 0x11) | ||
666 | goto wrong_fr_mce; | ||
667 | |||
668 | if (c->x86 != 0x15 && xec != 0x0) | ||
669 | goto wrong_fr_mce; | ||
670 | |||
671 | pr_emerg(HW_ERR "%s Error: ", | ||
672 | (c->x86 == 0x15 ? "Execution Unit" : "FIROB")); | ||
673 | |||
674 | if (xec == 0x0 || xec == 0xc) | ||
675 | pr_cont("%s.\n", fr_ex_mce_desc[xec]); | ||
676 | else if (xec < 0xd) | ||
677 | pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]); | ||
678 | else | ||
679 | goto wrong_fr_mce; | ||
680 | |||
681 | return; | ||
682 | |||
683 | wrong_fr_mce: | ||
684 | pr_emerg(HW_ERR "Corrupted FR MCE info?\n"); | ||
685 | } | ||
686 | |||
687 | static void amd_decode_fp_mce(struct mce *m) | ||
688 | { | ||
689 | u8 xec = XEC(m->status, xec_mask); | ||
690 | |||
691 | pr_emerg(HW_ERR "Floating Point Unit Error: "); | ||
692 | |||
693 | switch (xec) { | ||
694 | case 0x1: | ||
695 | pr_cont("Free List"); | ||
696 | break; | ||
697 | |||
698 | case 0x2: | ||
699 | pr_cont("Physical Register File"); | ||
700 | break; | ||
701 | |||
702 | case 0x3: | ||
703 | pr_cont("Retire Queue"); | ||
704 | break; | ||
705 | |||
706 | case 0x4: | ||
707 | pr_cont("Scheduler table"); | ||
708 | break; | ||
709 | |||
710 | case 0x5: | ||
711 | pr_cont("Status Register File"); | ||
712 | break; | ||
713 | |||
714 | default: | ||
715 | goto wrong_fp_mce; | ||
716 | break; | ||
717 | } | ||
718 | |||
719 | pr_cont(" parity error.\n"); | ||
720 | |||
721 | return; | ||
722 | |||
723 | wrong_fp_mce: | ||
724 | pr_emerg(HW_ERR "Corrupted FP MCE info?\n"); | ||
725 | } | ||
726 | |||
727 | static inline void amd_decode_err_code(u16 ec) | ||
728 | { | ||
729 | |||
730 | pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec)); | ||
731 | |||
732 | if (BUS_ERROR(ec)) | ||
733 | pr_cont(", mem/io: %s", II_MSG(ec)); | ||
734 | else | ||
735 | pr_cont(", tx: %s", TT_MSG(ec)); | ||
736 | |||
737 | if (MEM_ERROR(ec) || BUS_ERROR(ec)) { | ||
738 | pr_cont(", mem-tx: %s", R4_MSG(ec)); | ||
739 | |||
740 | if (BUS_ERROR(ec)) | ||
741 | pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec)); | ||
742 | } | ||
743 | |||
744 | pr_cont("\n"); | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * Filter out unwanted MCE signatures here. | ||
749 | */ | ||
750 | static bool amd_filter_mce(struct mce *m) | ||
751 | { | ||
752 | u8 xec = (m->status >> 16) & 0x1f; | ||
753 | |||
754 | /* | ||
755 | * NB GART TLB error reporting is disabled by default. | ||
756 | */ | ||
757 | if (m->bank == 4 && xec == 0x5 && !report_gart_errors) | ||
758 | return true; | ||
759 | |||
760 | return false; | ||
761 | } | ||
762 | |||
763 | int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) | ||
764 | { | ||
765 | struct mce *m = (struct mce *)data; | ||
766 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
767 | int node, ecc; | ||
768 | |||
769 | if (amd_filter_mce(m)) | ||
770 | return NOTIFY_STOP; | ||
771 | |||
772 | pr_emerg(HW_ERR "MC%d_STATUS[%s|%s|%s|%s|%s", | ||
773 | m->bank, | ||
774 | ((m->status & MCI_STATUS_OVER) ? "Over" : "-"), | ||
775 | ((m->status & MCI_STATUS_UC) ? "UE" : "CE"), | ||
776 | ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"), | ||
777 | ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"), | ||
778 | ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-")); | ||
779 | |||
780 | if (c->x86 == 0x15) | ||
781 | pr_cont("|%s|%s", | ||
782 | ((m->status & BIT_64(44)) ? "Deferred" : "-"), | ||
783 | ((m->status & BIT_64(43)) ? "Poison" : "-")); | ||
784 | |||
785 | /* do the two bits[14:13] together */ | ||
786 | ecc = (m->status >> 45) & 0x3; | ||
787 | if (ecc) | ||
788 | pr_cont("|%sECC", ((ecc == 2) ? "C" : "U")); | ||
789 | |||
790 | pr_cont("]: 0x%016llx\n", m->status); | ||
791 | |||
792 | |||
793 | switch (m->bank) { | ||
794 | case 0: | ||
795 | amd_decode_dc_mce(m); | ||
796 | break; | ||
797 | |||
798 | case 1: | ||
799 | amd_decode_ic_mce(m); | ||
800 | break; | ||
801 | |||
802 | case 2: | ||
803 | if (c->x86 == 0x15) | ||
804 | amd_decode_cu_mce(m); | ||
805 | else | ||
806 | amd_decode_bu_mce(m); | ||
807 | break; | ||
808 | |||
809 | case 3: | ||
810 | amd_decode_ls_mce(m); | ||
811 | break; | ||
812 | |||
813 | case 4: | ||
814 | node = amd_get_nb_id(m->extcpu); | ||
815 | amd_decode_nb_mce(node, m, 0); | ||
816 | break; | ||
817 | |||
818 | case 5: | ||
819 | amd_decode_fr_mce(m); | ||
820 | break; | ||
821 | |||
822 | case 6: | ||
823 | amd_decode_fp_mce(m); | ||
824 | break; | ||
825 | |||
826 | default: | ||
827 | break; | ||
828 | } | ||
829 | |||
830 | amd_decode_err_code(m->status & 0xffff); | ||
831 | |||
832 | return NOTIFY_STOP; | ||
833 | } | ||
834 | EXPORT_SYMBOL_GPL(amd_decode_mce); | ||
835 | |||
836 | static struct notifier_block amd_mce_dec_nb = { | ||
837 | .notifier_call = amd_decode_mce, | ||
838 | }; | ||
839 | |||
840 | static int __init mce_amd_init(void) | ||
841 | { | ||
842 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
843 | |||
844 | if (c->x86_vendor != X86_VENDOR_AMD) | ||
845 | return 0; | ||
846 | |||
847 | if ((c->x86 < 0xf || c->x86 > 0x12) && | ||
848 | (c->x86 != 0x14 || c->x86_model > 0xf) && | ||
849 | (c->x86 != 0x15 || c->x86_model > 0xf)) | ||
850 | return 0; | ||
851 | |||
852 | fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); | ||
853 | if (!fam_ops) | ||
854 | return -ENOMEM; | ||
855 | |||
856 | switch (c->x86) { | ||
857 | case 0xf: | ||
858 | fam_ops->dc_mce = k8_dc_mce; | ||
859 | fam_ops->ic_mce = k8_ic_mce; | ||
860 | fam_ops->nb_mce = k8_nb_mce; | ||
861 | break; | ||
862 | |||
863 | case 0x10: | ||
864 | fam_ops->dc_mce = f10h_dc_mce; | ||
865 | fam_ops->ic_mce = k8_ic_mce; | ||
866 | fam_ops->nb_mce = f10h_nb_mce; | ||
867 | break; | ||
868 | |||
869 | case 0x11: | ||
870 | fam_ops->dc_mce = k8_dc_mce; | ||
871 | fam_ops->ic_mce = k8_ic_mce; | ||
872 | fam_ops->nb_mce = f10h_nb_mce; | ||
873 | break; | ||
874 | |||
875 | case 0x12: | ||
876 | fam_ops->dc_mce = f12h_dc_mce; | ||
877 | fam_ops->ic_mce = k8_ic_mce; | ||
878 | fam_ops->nb_mce = nb_noop_mce; | ||
879 | break; | ||
880 | |||
881 | case 0x14: | ||
882 | nb_err_cpumask = 0x3; | ||
883 | fam_ops->dc_mce = f14h_dc_mce; | ||
884 | fam_ops->ic_mce = f14h_ic_mce; | ||
885 | fam_ops->nb_mce = nb_noop_mce; | ||
886 | break; | ||
887 | |||
888 | case 0x15: | ||
889 | xec_mask = 0x1f; | ||
890 | fam_ops->dc_mce = f15h_dc_mce; | ||
891 | fam_ops->ic_mce = f15h_ic_mce; | ||
892 | fam_ops->nb_mce = f10h_nb_mce; | ||
893 | break; | ||
894 | |||
895 | default: | ||
896 | printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86); | ||
897 | kfree(fam_ops); | ||
898 | return -EINVAL; | ||
899 | } | ||
900 | |||
901 | pr_info("MCE: In-kernel MCE decoding enabled.\n"); | ||
902 | |||
903 | atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); | ||
904 | |||
905 | return 0; | ||
906 | } | ||
907 | early_initcall(mce_amd_init); | ||
908 | |||
909 | #ifdef MODULE | ||
910 | static void __exit mce_amd_exit(void) | ||
911 | { | ||
912 | atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); | ||
913 | kfree(fam_ops); | ||
914 | } | ||
915 | |||
916 | MODULE_DESCRIPTION("AMD MCE decoder"); | ||
917 | MODULE_ALIAS("edac-mce-amd"); | ||
918 | MODULE_LICENSE("GPL"); | ||
919 | module_exit(mce_amd_exit); | ||
920 | #endif | ||
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h new file mode 100644 index 000000000000..795a3206acf5 --- /dev/null +++ b/drivers/edac/mce_amd.h | |||
@@ -0,0 +1,94 @@ | |||
1 | #ifndef _EDAC_MCE_AMD_H | ||
2 | #define _EDAC_MCE_AMD_H | ||
3 | |||
4 | #include <linux/notifier.h> | ||
5 | |||
6 | #include <asm/mce.h> | ||
7 | |||
8 | #define BIT_64(n) (U64_C(1) << (n)) | ||
9 | |||
10 | #define EC(x) ((x) & 0xffff) | ||
11 | #define XEC(x, mask) (((x) >> 16) & mask) | ||
12 | |||
13 | #define LOW_SYNDROME(x) (((x) >> 15) & 0xff) | ||
14 | #define HIGH_SYNDROME(x) (((x) >> 24) & 0xff) | ||
15 | |||
16 | #define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010) | ||
17 | #define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100) | ||
18 | #define BUS_ERROR(x) (((x) & 0xF800) == 0x0800) | ||
19 | |||
20 | #define TT(x) (((x) >> 2) & 0x3) | ||
21 | #define TT_MSG(x) tt_msgs[TT(x)] | ||
22 | #define II(x) (((x) >> 2) & 0x3) | ||
23 | #define II_MSG(x) ii_msgs[II(x)] | ||
24 | #define LL(x) ((x) & 0x3) | ||
25 | #define LL_MSG(x) ll_msgs[LL(x)] | ||
26 | #define TO(x) (((x) >> 8) & 0x1) | ||
27 | #define TO_MSG(x) to_msgs[TO(x)] | ||
28 | #define PP(x) (((x) >> 9) & 0x3) | ||
29 | #define PP_MSG(x) pp_msgs[PP(x)] | ||
30 | |||
31 | #define R4(x) (((x) >> 4) & 0xf) | ||
32 | #define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") | ||
33 | |||
34 | /* | ||
35 | * F3x4C bits (MCi_STATUS' high half) | ||
36 | */ | ||
37 | #define NBSH_ERR_CPU_VAL BIT(24) | ||
38 | |||
39 | enum tt_ids { | ||
40 | TT_INSTR = 0, | ||
41 | TT_DATA, | ||
42 | TT_GEN, | ||
43 | TT_RESV, | ||
44 | }; | ||
45 | |||
46 | enum ll_ids { | ||
47 | LL_RESV = 0, | ||
48 | LL_L1, | ||
49 | LL_L2, | ||
50 | LL_LG, | ||
51 | }; | ||
52 | |||
53 | enum ii_ids { | ||
54 | II_MEM = 0, | ||
55 | II_RESV, | ||
56 | II_IO, | ||
57 | II_GEN, | ||
58 | }; | ||
59 | |||
60 | enum rrrr_ids { | ||
61 | R4_GEN = 0, | ||
62 | R4_RD, | ||
63 | R4_WR, | ||
64 | R4_DRD, | ||
65 | R4_DWR, | ||
66 | R4_IRD, | ||
67 | R4_PREF, | ||
68 | R4_EVICT, | ||
69 | R4_SNOOP, | ||
70 | }; | ||
71 | |||
72 | extern const char *tt_msgs[]; | ||
73 | extern const char *ll_msgs[]; | ||
74 | extern const char *rrrr_msgs[]; | ||
75 | extern const char *pp_msgs[]; | ||
76 | extern const char *to_msgs[]; | ||
77 | extern const char *ii_msgs[]; | ||
78 | |||
79 | /* | ||
80 | * per-family decoder ops | ||
81 | */ | ||
82 | struct amd_decoder_ops { | ||
83 | bool (*dc_mce)(u16, u8); | ||
84 | bool (*ic_mce)(u16, u8); | ||
85 | bool (*nb_mce)(u16, u8); | ||
86 | }; | ||
87 | |||
88 | void amd_report_gart_errors(bool); | ||
89 | void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32)); | ||
90 | void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32)); | ||
91 | void amd_decode_nb_mce(int, struct mce *, u32); | ||
92 | int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data); | ||
93 | |||
94 | #endif /* _EDAC_MCE_AMD_H */ | ||
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c new file mode 100644 index 000000000000..a4987e03f59e --- /dev/null +++ b/drivers/edac/mce_amd_inj.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * A simple MCE injection facility for testing the MCE decoding code. This | ||
3 | * driver should be built as module so that it can be loaded on production | ||
4 | * kernels for testing purposes. | ||
5 | * | ||
6 | * This file may be distributed under the terms of the GNU General Public | ||
7 | * License version 2. | ||
8 | * | ||
9 | * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com> | ||
10 | * Advanced Micro Devices Inc. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kobject.h> | ||
14 | #include <linux/sysdev.h> | ||
15 | #include <linux/edac.h> | ||
16 | #include <asm/mce.h> | ||
17 | |||
18 | #include "mce_amd.h" | ||
19 | |||
20 | struct edac_mce_attr { | ||
21 | struct attribute attr; | ||
22 | ssize_t (*show) (struct kobject *kobj, struct edac_mce_attr *attr, char *buf); | ||
23 | ssize_t (*store)(struct kobject *kobj, struct edac_mce_attr *attr, | ||
24 | const char *buf, size_t count); | ||
25 | }; | ||
26 | |||
27 | #define EDAC_MCE_ATTR(_name, _mode, _show, _store) \ | ||
28 | static struct edac_mce_attr mce_attr_##_name = __ATTR(_name, _mode, _show, _store) | ||
29 | |||
30 | static struct kobject *mce_kobj; | ||
31 | |||
32 | /* | ||
33 | * Collect all the MCi_XXX settings | ||
34 | */ | ||
35 | static struct mce i_mce; | ||
36 | |||
37 | #define MCE_INJECT_STORE(reg) \ | ||
38 | static ssize_t edac_inject_##reg##_store(struct kobject *kobj, \ | ||
39 | struct edac_mce_attr *attr, \ | ||
40 | const char *data, size_t count)\ | ||
41 | { \ | ||
42 | int ret = 0; \ | ||
43 | unsigned long value; \ | ||
44 | \ | ||
45 | ret = strict_strtoul(data, 16, &value); \ | ||
46 | if (ret < 0) \ | ||
47 | printk(KERN_ERR "Error writing MCE " #reg " field.\n"); \ | ||
48 | \ | ||
49 | i_mce.reg = value; \ | ||
50 | \ | ||
51 | return count; \ | ||
52 | } | ||
53 | |||
54 | MCE_INJECT_STORE(status); | ||
55 | MCE_INJECT_STORE(misc); | ||
56 | MCE_INJECT_STORE(addr); | ||
57 | |||
58 | #define MCE_INJECT_SHOW(reg) \ | ||
59 | static ssize_t edac_inject_##reg##_show(struct kobject *kobj, \ | ||
60 | struct edac_mce_attr *attr, \ | ||
61 | char *buf) \ | ||
62 | { \ | ||
63 | return sprintf(buf, "0x%016llx\n", i_mce.reg); \ | ||
64 | } | ||
65 | |||
66 | MCE_INJECT_SHOW(status); | ||
67 | MCE_INJECT_SHOW(misc); | ||
68 | MCE_INJECT_SHOW(addr); | ||
69 | |||
70 | EDAC_MCE_ATTR(status, 0644, edac_inject_status_show, edac_inject_status_store); | ||
71 | EDAC_MCE_ATTR(misc, 0644, edac_inject_misc_show, edac_inject_misc_store); | ||
72 | EDAC_MCE_ATTR(addr, 0644, edac_inject_addr_show, edac_inject_addr_store); | ||
73 | |||
74 | /* | ||
75 | * This denotes into which bank we're injecting and triggers | ||
76 | * the injection, at the same time. | ||
77 | */ | ||
78 | static ssize_t edac_inject_bank_store(struct kobject *kobj, | ||
79 | struct edac_mce_attr *attr, | ||
80 | const char *data, size_t count) | ||
81 | { | ||
82 | int ret = 0; | ||
83 | unsigned long value; | ||
84 | |||
85 | ret = strict_strtoul(data, 10, &value); | ||
86 | if (ret < 0) { | ||
87 | printk(KERN_ERR "Invalid bank value!\n"); | ||
88 | return -EINVAL; | ||
89 | } | ||
90 | |||
91 | if (value > 5) | ||
92 | if (boot_cpu_data.x86 != 0x15 || value > 6) { | ||
93 | printk(KERN_ERR "Non-existent MCE bank: %lu\n", value); | ||
94 | return -EINVAL; | ||
95 | } | ||
96 | |||
97 | i_mce.bank = value; | ||
98 | |||
99 | amd_decode_mce(NULL, 0, &i_mce); | ||
100 | |||
101 | return count; | ||
102 | } | ||
103 | |||
104 | static ssize_t edac_inject_bank_show(struct kobject *kobj, | ||
105 | struct edac_mce_attr *attr, char *buf) | ||
106 | { | ||
107 | return sprintf(buf, "%d\n", i_mce.bank); | ||
108 | } | ||
109 | |||
110 | EDAC_MCE_ATTR(bank, 0644, edac_inject_bank_show, edac_inject_bank_store); | ||
111 | |||
112 | static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc, | ||
113 | &mce_attr_addr, &mce_attr_bank | ||
114 | }; | ||
115 | |||
116 | static int __init edac_init_mce_inject(void) | ||
117 | { | ||
118 | struct sysdev_class *edac_class = NULL; | ||
119 | int i, err = 0; | ||
120 | |||
121 | edac_class = edac_get_sysfs_class(); | ||
122 | if (!edac_class) | ||
123 | return -EINVAL; | ||
124 | |||
125 | mce_kobj = kobject_create_and_add("mce", &edac_class->kset.kobj); | ||
126 | if (!mce_kobj) { | ||
127 | printk(KERN_ERR "Error creating a mce kset.\n"); | ||
128 | err = -ENOMEM; | ||
129 | goto err_mce_kobj; | ||
130 | } | ||
131 | |||
132 | for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) { | ||
133 | err = sysfs_create_file(mce_kobj, &sysfs_attrs[i]->attr); | ||
134 | if (err) { | ||
135 | printk(KERN_ERR "Error creating %s in sysfs.\n", | ||
136 | sysfs_attrs[i]->attr.name); | ||
137 | goto err_sysfs_create; | ||
138 | } | ||
139 | } | ||
140 | return 0; | ||
141 | |||
142 | err_sysfs_create: | ||
143 | while (--i >= 0) | ||
144 | sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); | ||
145 | |||
146 | kobject_del(mce_kobj); | ||
147 | |||
148 | err_mce_kobj: | ||
149 | edac_put_sysfs_class(); | ||
150 | |||
151 | return err; | ||
152 | } | ||
153 | |||
154 | static void __exit edac_exit_mce_inject(void) | ||
155 | { | ||
156 | int i; | ||
157 | |||
158 | for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) | ||
159 | sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); | ||
160 | |||
161 | kobject_del(mce_kobj); | ||
162 | |||
163 | edac_put_sysfs_class(); | ||
164 | } | ||
165 | |||
166 | module_init(edac_init_mce_inject); | ||
167 | module_exit(edac_exit_mce_inject); | ||
168 | |||
169 | MODULE_LICENSE("GPL"); | ||
170 | MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>"); | ||
171 | MODULE_AUTHOR("AMD Inc."); | ||
172 | MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); | ||
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index b123bb308a4a..38ab8e2cd7f4 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -200,8 +200,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) | |||
200 | return IRQ_HANDLED; | 200 | return IRQ_HANDLED; |
201 | } | 201 | } |
202 | 202 | ||
203 | static int __devinit mpc85xx_pci_err_probe(struct platform_device *op, | 203 | static int __devinit mpc85xx_pci_err_probe(struct platform_device *op) |
204 | const struct of_device_id *match) | ||
205 | { | 204 | { |
206 | struct edac_pci_ctl_info *pci; | 205 | struct edac_pci_ctl_info *pci; |
207 | struct mpc85xx_pci_pdata *pdata; | 206 | struct mpc85xx_pci_pdata *pdata; |
@@ -338,7 +337,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = { | |||
338 | }; | 337 | }; |
339 | MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match); | 338 | MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match); |
340 | 339 | ||
341 | static struct of_platform_driver mpc85xx_pci_err_driver = { | 340 | static struct platform_driver mpc85xx_pci_err_driver = { |
342 | .probe = mpc85xx_pci_err_probe, | 341 | .probe = mpc85xx_pci_err_probe, |
343 | .remove = __devexit_p(mpc85xx_pci_err_remove), | 342 | .remove = __devexit_p(mpc85xx_pci_err_remove), |
344 | .driver = { | 343 | .driver = { |
@@ -503,8 +502,7 @@ static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id) | |||
503 | return IRQ_HANDLED; | 502 | return IRQ_HANDLED; |
504 | } | 503 | } |
505 | 504 | ||
506 | static int __devinit mpc85xx_l2_err_probe(struct platform_device *op, | 505 | static int __devinit mpc85xx_l2_err_probe(struct platform_device *op) |
507 | const struct of_device_id *match) | ||
508 | { | 506 | { |
509 | struct edac_device_ctl_info *edac_dev; | 507 | struct edac_device_ctl_info *edac_dev; |
510 | struct mpc85xx_l2_pdata *pdata; | 508 | struct mpc85xx_l2_pdata *pdata; |
@@ -656,7 +654,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = { | |||
656 | }; | 654 | }; |
657 | MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match); | 655 | MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match); |
658 | 656 | ||
659 | static struct of_platform_driver mpc85xx_l2_err_driver = { | 657 | static struct platform_driver mpc85xx_l2_err_driver = { |
660 | .probe = mpc85xx_l2_err_probe, | 658 | .probe = mpc85xx_l2_err_probe, |
661 | .remove = mpc85xx_l2_err_remove, | 659 | .remove = mpc85xx_l2_err_remove, |
662 | .driver = { | 660 | .driver = { |
@@ -956,8 +954,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
956 | } | 954 | } |
957 | } | 955 | } |
958 | 956 | ||
959 | static int __devinit mpc85xx_mc_err_probe(struct platform_device *op, | 957 | static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) |
960 | const struct of_device_id *match) | ||
961 | { | 958 | { |
962 | struct mem_ctl_info *mci; | 959 | struct mem_ctl_info *mci; |
963 | struct mpc85xx_mc_pdata *pdata; | 960 | struct mpc85xx_mc_pdata *pdata; |
@@ -1136,7 +1133,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = { | |||
1136 | }; | 1133 | }; |
1137 | MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); | 1134 | MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); |
1138 | 1135 | ||
1139 | static struct of_platform_driver mpc85xx_mc_err_driver = { | 1136 | static struct platform_driver mpc85xx_mc_err_driver = { |
1140 | .probe = mpc85xx_mc_err_probe, | 1137 | .probe = mpc85xx_mc_err_probe, |
1141 | .remove = mpc85xx_mc_err_remove, | 1138 | .remove = mpc85xx_mc_err_remove, |
1142 | .driver = { | 1139 | .driver = { |
@@ -1150,13 +1147,14 @@ static struct of_platform_driver mpc85xx_mc_err_driver = { | |||
1150 | static void __init mpc85xx_mc_clear_rfxe(void *data) | 1147 | static void __init mpc85xx_mc_clear_rfxe(void *data) |
1151 | { | 1148 | { |
1152 | orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1); | 1149 | orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1); |
1153 | mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000)); | 1150 | mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE)); |
1154 | } | 1151 | } |
1155 | #endif | 1152 | #endif |
1156 | 1153 | ||
1157 | static int __init mpc85xx_mc_init(void) | 1154 | static int __init mpc85xx_mc_init(void) |
1158 | { | 1155 | { |
1159 | int res = 0; | 1156 | int res = 0; |
1157 | u32 pvr = 0; | ||
1160 | 1158 | ||
1161 | printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, " | 1159 | printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, " |
1162 | "(C) 2006 Montavista Software\n"); | 1160 | "(C) 2006 Montavista Software\n"); |
@@ -1171,27 +1169,32 @@ static int __init mpc85xx_mc_init(void) | |||
1171 | break; | 1169 | break; |
1172 | } | 1170 | } |
1173 | 1171 | ||
1174 | res = of_register_platform_driver(&mpc85xx_mc_err_driver); | 1172 | res = platform_driver_register(&mpc85xx_mc_err_driver); |
1175 | if (res) | 1173 | if (res) |
1176 | printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n"); | 1174 | printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n"); |
1177 | 1175 | ||
1178 | res = of_register_platform_driver(&mpc85xx_l2_err_driver); | 1176 | res = platform_driver_register(&mpc85xx_l2_err_driver); |
1179 | if (res) | 1177 | if (res) |
1180 | printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); | 1178 | printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); |
1181 | 1179 | ||
1182 | #ifdef CONFIG_PCI | 1180 | #ifdef CONFIG_PCI |
1183 | res = of_register_platform_driver(&mpc85xx_pci_err_driver); | 1181 | res = platform_driver_register(&mpc85xx_pci_err_driver); |
1184 | if (res) | 1182 | if (res) |
1185 | printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); | 1183 | printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); |
1186 | #endif | 1184 | #endif |
1187 | 1185 | ||
1188 | #ifdef CONFIG_FSL_SOC_BOOKE | 1186 | #ifdef CONFIG_FSL_SOC_BOOKE |
1189 | /* | 1187 | pvr = mfspr(SPRN_PVR); |
1190 | * need to clear HID1[RFXE] to disable machine check int | 1188 | |
1191 | * so we can catch it | 1189 | if ((PVR_VER(pvr) == PVR_VER_E500V1) || |
1192 | */ | 1190 | (PVR_VER(pvr) == PVR_VER_E500V2)) { |
1193 | if (edac_op_state == EDAC_OPSTATE_INT) | 1191 | /* |
1194 | on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0); | 1192 | * need to clear HID1[RFXE] to disable machine check int |
1193 | * so we can catch it | ||
1194 | */ | ||
1195 | if (edac_op_state == EDAC_OPSTATE_INT) | ||
1196 | on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0); | ||
1197 | } | ||
1195 | #endif | 1198 | #endif |
1196 | 1199 | ||
1197 | return 0; | 1200 | return 0; |
@@ -1209,13 +1212,18 @@ static void __exit mpc85xx_mc_restore_hid1(void *data) | |||
1209 | static void __exit mpc85xx_mc_exit(void) | 1212 | static void __exit mpc85xx_mc_exit(void) |
1210 | { | 1213 | { |
1211 | #ifdef CONFIG_FSL_SOC_BOOKE | 1214 | #ifdef CONFIG_FSL_SOC_BOOKE |
1212 | on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); | 1215 | u32 pvr = mfspr(SPRN_PVR); |
1216 | |||
1217 | if ((PVR_VER(pvr) == PVR_VER_E500V1) || | ||
1218 | (PVR_VER(pvr) == PVR_VER_E500V2)) { | ||
1219 | on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); | ||
1220 | } | ||
1213 | #endif | 1221 | #endif |
1214 | #ifdef CONFIG_PCI | 1222 | #ifdef CONFIG_PCI |
1215 | of_unregister_platform_driver(&mpc85xx_pci_err_driver); | 1223 | platform_driver_unregister(&mpc85xx_pci_err_driver); |
1216 | #endif | 1224 | #endif |
1217 | of_unregister_platform_driver(&mpc85xx_l2_err_driver); | 1225 | platform_driver_unregister(&mpc85xx_l2_err_driver); |
1218 | of_unregister_platform_driver(&mpc85xx_mc_err_driver); | 1226 | platform_driver_unregister(&mpc85xx_mc_err_driver); |
1219 | } | 1227 | } |
1220 | 1228 | ||
1221 | module_exit(mpc85xx_mc_exit); | 1229 | module_exit(mpc85xx_mc_exit); |
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h index cb24df839460..932016f2cf06 100644 --- a/drivers/edac/mpc85xx_edac.h +++ b/drivers/edac/mpc85xx_edac.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifndef _MPC85XX_EDAC_H_ | 11 | #ifndef _MPC85XX_EDAC_H_ |
12 | #define _MPC85XX_EDAC_H_ | 12 | #define _MPC85XX_EDAC_H_ |
13 | 13 | ||
14 | #define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__ | 14 | #define MPC85XX_REVISION " Ver: 2.0.0" |
15 | #define EDAC_MOD_STR "MPC85xx_edac" | 15 | #define EDAC_MOD_STR "MPC85xx_edac" |
16 | 16 | ||
17 | #define mpc85xx_printk(level, fmt, arg...) \ | 17 | #define mpc85xx_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h index e042e2daa8f4..c7f209c92a1a 100644 --- a/drivers/edac/mv64x60_edac.h +++ b/drivers/edac/mv64x60_edac.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #ifndef _MV64X60_EDAC_H_ | 12 | #ifndef _MV64X60_EDAC_H_ |
13 | #define _MV64X60_EDAC_H_ | 13 | #define _MV64X60_EDAC_H_ |
14 | 14 | ||
15 | #define MV64x60_REVISION " Ver: 2.0.0 " __DATE__ | 15 | #define MV64x60_REVISION " Ver: 2.0.0" |
16 | #define EDAC_MOD_STR "MV64x60_edac" | 16 | #define EDAC_MOD_STR "MV64x60_edac" |
17 | 17 | ||
18 | #define mv64x60_printk(level, fmt, arg...) \ | 18 | #define mv64x60_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 070cea41b661..0de7d8770891 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c | |||
@@ -113,7 +113,7 @@ | |||
113 | #define EDAC_OPSTATE_UNKNOWN_STR "unknown" | 113 | #define EDAC_OPSTATE_UNKNOWN_STR "unknown" |
114 | 114 | ||
115 | #define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" | 115 | #define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" |
116 | #define PPC4XX_EDAC_MODULE_REVISION "v1.0.0 " __DATE__ | 116 | #define PPC4XX_EDAC_MODULE_REVISION "v1.0.0" |
117 | 117 | ||
118 | #define PPC4XX_EDAC_MESSAGE_SIZE 256 | 118 | #define PPC4XX_EDAC_MESSAGE_SIZE 256 |
119 | 119 | ||
@@ -184,8 +184,7 @@ struct ppc4xx_ecc_status { | |||
184 | 184 | ||
185 | /* Function Prototypes */ | 185 | /* Function Prototypes */ |
186 | 186 | ||
187 | static int ppc4xx_edac_probe(struct platform_device *device, | 187 | static int ppc4xx_edac_probe(struct platform_device *device) |
188 | const struct of_device_id *device_id); | ||
189 | static int ppc4xx_edac_remove(struct platform_device *device); | 188 | static int ppc4xx_edac_remove(struct platform_device *device); |
190 | 189 | ||
191 | /* Global Variables */ | 190 | /* Global Variables */ |
@@ -201,7 +200,7 @@ static struct of_device_id ppc4xx_edac_match[] = { | |||
201 | { } | 200 | { } |
202 | }; | 201 | }; |
203 | 202 | ||
204 | static struct of_platform_driver ppc4xx_edac_driver = { | 203 | static struct platform_driver ppc4xx_edac_driver = { |
205 | .probe = ppc4xx_edac_probe, | 204 | .probe = ppc4xx_edac_probe, |
206 | .remove = ppc4xx_edac_remove, | 205 | .remove = ppc4xx_edac_remove, |
207 | .driver = { | 206 | .driver = { |
@@ -873,7 +872,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1) | |||
873 | } | 872 | } |
874 | 873 | ||
875 | /** | 874 | /** |
876 | * ppc4xx_edac_init_csrows - intialize driver instance rows | 875 | * ppc4xx_edac_init_csrows - initialize driver instance rows |
877 | * @mci: A pointer to the EDAC memory controller instance | 876 | * @mci: A pointer to the EDAC memory controller instance |
878 | * associated with the ibm,sdram-4xx-ddr2 controller for which | 877 | * associated with the ibm,sdram-4xx-ddr2 controller for which |
879 | * the csrows (i.e. banks/ranks) are being initialized. | 878 | * the csrows (i.e. banks/ranks) are being initialized. |
@@ -881,7 +880,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1) | |||
881 | * currently set for the controller, from which bank width | 880 | * currently set for the controller, from which bank width |
882 | * and memory typ information is derived. | 881 | * and memory typ information is derived. |
883 | * | 882 | * |
884 | * This routine intializes the virtual "chip select rows" associated | 883 | * This routine initializes the virtual "chip select rows" associated |
885 | * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2 | 884 | * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2 |
886 | * controller bank/rank is mapped to a row. | 885 | * controller bank/rank is mapped to a row. |
887 | * | 886 | * |
@@ -992,14 +991,11 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | |||
992 | } | 991 | } |
993 | 992 | ||
994 | /** | 993 | /** |
995 | * ppc4xx_edac_mc_init - intialize driver instance | 994 | * ppc4xx_edac_mc_init - initialize driver instance |
996 | * @mci: A pointer to the EDAC memory controller instance being | 995 | * @mci: A pointer to the EDAC memory controller instance being |
997 | * initialized. | 996 | * initialized. |
998 | * @op: A pointer to the OpenFirmware device tree node associated | 997 | * @op: A pointer to the OpenFirmware device tree node associated |
999 | * with the controller this EDAC instance is bound to. | 998 | * with the controller this EDAC instance is bound to. |
1000 | * @match: A pointer to the OpenFirmware device tree match | ||
1001 | * information associated with the controller this EDAC instance | ||
1002 | * is bound to. | ||
1003 | * @dcr_host: A pointer to the DCR data containing the DCR mapping | 999 | * @dcr_host: A pointer to the DCR data containing the DCR mapping |
1004 | * for this controller instance. | 1000 | * for this controller instance. |
1005 | * @mcopt1: The 32-bit Memory Controller Option 1 register value | 1001 | * @mcopt1: The 32-bit Memory Controller Option 1 register value |
@@ -1015,7 +1011,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) | |||
1015 | static int __devinit | 1011 | static int __devinit |
1016 | ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | 1012 | ppc4xx_edac_mc_init(struct mem_ctl_info *mci, |
1017 | struct platform_device *op, | 1013 | struct platform_device *op, |
1018 | const struct of_device_id *match, | ||
1019 | const dcr_host_t *dcr_host, | 1014 | const dcr_host_t *dcr_host, |
1020 | u32 mcopt1) | 1015 | u32 mcopt1) |
1021 | { | 1016 | { |
@@ -1024,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | |||
1024 | struct ppc4xx_edac_pdata *pdata = NULL; | 1019 | struct ppc4xx_edac_pdata *pdata = NULL; |
1025 | const struct device_node *np = op->dev.of_node; | 1020 | const struct device_node *np = op->dev.of_node; |
1026 | 1021 | ||
1027 | if (match == NULL) | 1022 | if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) |
1028 | return -EINVAL; | 1023 | return -EINVAL; |
1029 | 1024 | ||
1030 | /* Initial driver pointers and private data */ | 1025 | /* Initial driver pointers and private data */ |
@@ -1227,9 +1222,6 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host) | |||
1227 | * ppc4xx_edac_probe - check controller and bind driver | 1222 | * ppc4xx_edac_probe - check controller and bind driver |
1228 | * @op: A pointer to the OpenFirmware device tree node associated | 1223 | * @op: A pointer to the OpenFirmware device tree node associated |
1229 | * with the controller being probed for driver binding. | 1224 | * with the controller being probed for driver binding. |
1230 | * @match: A pointer to the OpenFirmware device tree match | ||
1231 | * information associated with the controller being probed | ||
1232 | * for driver binding. | ||
1233 | * | 1225 | * |
1234 | * This routine probes a specific ibm,sdram-4xx-ddr2 controller | 1226 | * This routine probes a specific ibm,sdram-4xx-ddr2 controller |
1235 | * instance for binding with the driver. | 1227 | * instance for binding with the driver. |
@@ -1237,8 +1229,7 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host) | |||
1237 | * Returns 0 if the controller instance was successfully bound to the | 1229 | * Returns 0 if the controller instance was successfully bound to the |
1238 | * driver; otherwise, < 0 on error. | 1230 | * driver; otherwise, < 0 on error. |
1239 | */ | 1231 | */ |
1240 | static int __devinit | 1232 | static int __devinit ppc4xx_edac_probe(struct platform_device *op) |
1241 | ppc4xx_edac_probe(struct platform_device *op, const struct of_device_id *match) | ||
1242 | { | 1233 | { |
1243 | int status = 0; | 1234 | int status = 0; |
1244 | u32 mcopt1, memcheck; | 1235 | u32 mcopt1, memcheck; |
@@ -1304,7 +1295,7 @@ ppc4xx_edac_probe(struct platform_device *op, const struct of_device_id *match) | |||
1304 | goto done; | 1295 | goto done; |
1305 | } | 1296 | } |
1306 | 1297 | ||
1307 | status = ppc4xx_edac_mc_init(mci, op, match, &dcr_host, mcopt1); | 1298 | status = ppc4xx_edac_mc_init(mci, op, &dcr_host, mcopt1); |
1308 | 1299 | ||
1309 | if (status) { | 1300 | if (status) { |
1310 | ppc4xx_edac_mc_printk(KERN_ERR, mci, | 1301 | ppc4xx_edac_mc_printk(KERN_ERR, mci, |
@@ -1421,7 +1412,7 @@ ppc4xx_edac_init(void) | |||
1421 | 1412 | ||
1422 | ppc4xx_edac_opstate_init(); | 1413 | ppc4xx_edac_opstate_init(); |
1423 | 1414 | ||
1424 | return of_register_platform_driver(&ppc4xx_edac_driver); | 1415 | return platform_driver_register(&ppc4xx_edac_driver); |
1425 | } | 1416 | } |
1426 | 1417 | ||
1427 | /** | 1418 | /** |
@@ -1434,7 +1425,7 @@ ppc4xx_edac_init(void) | |||
1434 | static void __exit | 1425 | static void __exit |
1435 | ppc4xx_edac_exit(void) | 1426 | ppc4xx_edac_exit(void) |
1436 | { | 1427 | { |
1437 | of_unregister_platform_driver(&ppc4xx_edac_driver); | 1428 | platform_driver_unregister(&ppc4xx_edac_driver); |
1438 | } | 1429 | } |
1439 | 1430 | ||
1440 | module_init(ppc4xx_edac_init); | 1431 | module_init(ppc4xx_edac_init); |
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index 6a822c631ef5..b153674431f1 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/edac.h> | 22 | #include <linux/edac.h> |
23 | #include "edac_core.h" | 23 | #include "edac_core.h" |
24 | 24 | ||
25 | #define R82600_REVISION " Ver: 2.0.2 " __DATE__ | 25 | #define R82600_REVISION " Ver: 2.0.2" |
26 | #define EDAC_MOD_STR "r82600_edac" | 26 | #define EDAC_MOD_STR "r82600_edac" |
27 | 27 | ||
28 | #define r82600_printk(level, fmt, arg...) \ | 28 | #define r82600_printk(level, fmt, arg...) \ |
@@ -120,7 +120,7 @@ | |||
120 | * write 0=NOP | 120 | * write 0=NOP |
121 | */ | 121 | */ |
122 | 122 | ||
123 | #define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address | 123 | #define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundary Address |
124 | * Registers | 124 | * Registers |
125 | * | 125 | * |
126 | * 7:0 Address lines 30:24 - upper limit of | 126 | * 7:0 Address lines 30:24 - upper limit of |
@@ -217,7 +217,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
217 | { | 217 | { |
218 | struct csrow_info *csrow; | 218 | struct csrow_info *csrow; |
219 | int index; | 219 | int index; |
220 | u8 drbar; /* SDRAM Row Boundry Address Register */ | 220 | u8 drbar; /* SDRAM Row Boundary Address Register */ |
221 | u32 row_high_limit, row_high_limit_last; | 221 | u32 row_high_limit, row_high_limit_last; |
222 | u32 reg_sdram, ecc_on, row_base; | 222 | u32 reg_sdram, ecc_on, row_base; |
223 | 223 | ||
@@ -236,7 +236,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | |||
236 | row_high_limit = ((u32) drbar << 24); | 236 | row_high_limit = ((u32) drbar << 24); |
237 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | 237 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ |
238 | 238 | ||
239 | debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n", | 239 | debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n", |
240 | __func__, index, row_high_limit, row_high_limit_last); | 240 | __func__, index, row_high_limit, row_high_limit_last); |
241 | 241 | ||
242 | /* Empty row [p.57] */ | 242 | /* Empty row [p.57] */ |
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c new file mode 100644 index 000000000000..1d5cf06f6c6b --- /dev/null +++ b/drivers/edac/tile_edac.c | |||
@@ -0,0 +1,254 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * Tilera-specific EDAC driver. | ||
14 | * | ||
15 | * This source code is derived from the following driver: | ||
16 | * | ||
17 | * Cell MIC driver for ECC counting | ||
18 | * | ||
19 | * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. | ||
20 | * <benh@kernel.crashing.org> | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/uaccess.h> | ||
29 | #include <linux/edac.h> | ||
30 | #include <hv/hypervisor.h> | ||
31 | #include <hv/drv_mshim_intf.h> | ||
32 | |||
33 | #include "edac_core.h" | ||
34 | |||
35 | #define DRV_NAME "tile-edac" | ||
36 | |||
37 | /* Number of cs_rows needed per memory controller on TILEPro. */ | ||
38 | #define TILE_EDAC_NR_CSROWS 1 | ||
39 | |||
40 | /* Number of channels per memory controller on TILEPro. */ | ||
41 | #define TILE_EDAC_NR_CHANS 1 | ||
42 | |||
43 | /* Granularity of reported error in bytes on TILEPro. */ | ||
44 | #define TILE_EDAC_ERROR_GRAIN 8 | ||
45 | |||
46 | /* TILE processor has multiple independent memory controllers. */ | ||
47 | struct platform_device *mshim_pdev[TILE_MAX_MSHIMS]; | ||
48 | |||
49 | struct tile_edac_priv { | ||
50 | int hv_devhdl; /* Hypervisor device handle. */ | ||
51 | int node; /* Memory controller instance #. */ | ||
52 | unsigned int ce_count; /* | ||
53 | * Correctable-error counter | ||
54 | * kept by the driver. | ||
55 | */ | ||
56 | }; | ||
57 | |||
58 | static void tile_edac_check(struct mem_ctl_info *mci) | ||
59 | { | ||
60 | struct tile_edac_priv *priv = mci->pvt_info; | ||
61 | struct mshim_mem_error mem_error; | ||
62 | |||
63 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error, | ||
64 | sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) != | ||
65 | sizeof(struct mshim_mem_error)) { | ||
66 | pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n"); | ||
67 | return; | ||
68 | } | ||
69 | |||
70 | /* Check if the current error count is different from the saved one. */ | ||
71 | if (mem_error.sbe_count != priv->ce_count) { | ||
72 | dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); | ||
73 | priv->ce_count = mem_error.sbe_count; | ||
74 | edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name); | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Initialize the 'csrows' table within the mci control structure with the | ||
80 | * addressing of memory. | ||
81 | */ | ||
82 | static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) | ||
83 | { | ||
84 | struct csrow_info *csrow = &mci->csrows[0]; | ||
85 | struct tile_edac_priv *priv = mci->pvt_info; | ||
86 | struct mshim_mem_info mem_info; | ||
87 | |||
88 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, | ||
89 | sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != | ||
90 | sizeof(struct mshim_mem_info)) { | ||
91 | pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n"); | ||
92 | return -1; | ||
93 | } | ||
94 | |||
95 | if (mem_info.mem_ecc) | ||
96 | csrow->edac_mode = EDAC_SECDED; | ||
97 | else | ||
98 | csrow->edac_mode = EDAC_NONE; | ||
99 | switch (mem_info.mem_type) { | ||
100 | case DDR2: | ||
101 | csrow->mtype = MEM_DDR2; | ||
102 | break; | ||
103 | |||
104 | case DDR3: | ||
105 | csrow->mtype = MEM_DDR3; | ||
106 | break; | ||
107 | |||
108 | default: | ||
109 | return -1; | ||
110 | } | ||
111 | |||
112 | csrow->first_page = 0; | ||
113 | csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT; | ||
114 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | ||
115 | csrow->grain = TILE_EDAC_ERROR_GRAIN; | ||
116 | csrow->dtype = DEV_UNKNOWN; | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int __devinit tile_edac_mc_probe(struct platform_device *pdev) | ||
122 | { | ||
123 | char hv_file[32]; | ||
124 | int hv_devhdl; | ||
125 | struct mem_ctl_info *mci; | ||
126 | struct tile_edac_priv *priv; | ||
127 | int rc; | ||
128 | |||
129 | sprintf(hv_file, "mshim/%d", pdev->id); | ||
130 | hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0); | ||
131 | if (hv_devhdl < 0) | ||
132 | return -EINVAL; | ||
133 | |||
134 | /* A TILE MC has a single channel and one chip-select row. */ | ||
135 | mci = edac_mc_alloc(sizeof(struct tile_edac_priv), | ||
136 | TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id); | ||
137 | if (mci == NULL) | ||
138 | return -ENOMEM; | ||
139 | priv = mci->pvt_info; | ||
140 | priv->node = pdev->id; | ||
141 | priv->hv_devhdl = hv_devhdl; | ||
142 | |||
143 | mci->dev = &pdev->dev; | ||
144 | mci->mtype_cap = MEM_FLAG_DDR2; | ||
145 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | ||
146 | |||
147 | mci->mod_name = DRV_NAME; | ||
148 | mci->ctl_name = "TILEPro_Memory_Controller"; | ||
149 | mci->dev_name = dev_name(&pdev->dev); | ||
150 | mci->edac_check = tile_edac_check; | ||
151 | |||
152 | /* | ||
153 | * Initialize the MC control structure 'csrows' table | ||
154 | * with the mapping and control information. | ||
155 | */ | ||
156 | if (tile_edac_init_csrows(mci)) { | ||
157 | /* No csrows found. */ | ||
158 | mci->edac_cap = EDAC_FLAG_NONE; | ||
159 | } else { | ||
160 | mci->edac_cap = EDAC_FLAG_SECDED; | ||
161 | } | ||
162 | |||
163 | platform_set_drvdata(pdev, mci); | ||
164 | |||
165 | /* Register with EDAC core */ | ||
166 | rc = edac_mc_add_mc(mci); | ||
167 | if (rc) { | ||
168 | dev_err(&pdev->dev, "failed to register with EDAC core\n"); | ||
169 | edac_mc_free(mci); | ||
170 | return rc; | ||
171 | } | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int __devexit tile_edac_mc_remove(struct platform_device *pdev) | ||
177 | { | ||
178 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); | ||
179 | |||
180 | edac_mc_del_mc(&pdev->dev); | ||
181 | if (mci) | ||
182 | edac_mc_free(mci); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static struct platform_driver tile_edac_mc_driver = { | ||
187 | .driver = { | ||
188 | .name = DRV_NAME, | ||
189 | .owner = THIS_MODULE, | ||
190 | }, | ||
191 | .probe = tile_edac_mc_probe, | ||
192 | .remove = __devexit_p(tile_edac_mc_remove), | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * Driver init routine. | ||
197 | */ | ||
198 | static int __init tile_edac_init(void) | ||
199 | { | ||
200 | char hv_file[32]; | ||
201 | struct platform_device *pdev; | ||
202 | int i, err, num = 0; | ||
203 | |||
204 | /* Only support POLL mode. */ | ||
205 | edac_op_state = EDAC_OPSTATE_POLL; | ||
206 | |||
207 | err = platform_driver_register(&tile_edac_mc_driver); | ||
208 | if (err) | ||
209 | return err; | ||
210 | |||
211 | for (i = 0; i < TILE_MAX_MSHIMS; i++) { | ||
212 | /* | ||
213 | * Not all memory controllers are configured such as in the | ||
214 | * case of a simulator. So we register only those mshims | ||
215 | * that are configured by the hypervisor. | ||
216 | */ | ||
217 | sprintf(hv_file, "mshim/%d", i); | ||
218 | if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0) | ||
219 | continue; | ||
220 | |||
221 | pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0); | ||
222 | if (IS_ERR(pdev)) | ||
223 | continue; | ||
224 | mshim_pdev[i] = pdev; | ||
225 | num++; | ||
226 | } | ||
227 | |||
228 | if (num == 0) { | ||
229 | platform_driver_unregister(&tile_edac_mc_driver); | ||
230 | return -ENODEV; | ||
231 | } | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * Driver cleanup routine. | ||
237 | */ | ||
238 | static void __exit tile_edac_exit(void) | ||
239 | { | ||
240 | int i; | ||
241 | |||
242 | for (i = 0; i < TILE_MAX_MSHIMS; i++) { | ||
243 | struct platform_device *pdev = mshim_pdev[i]; | ||
244 | if (!pdev) | ||
245 | continue; | ||
246 | |||
247 | platform_set_drvdata(pdev, NULL); | ||
248 | platform_device_unregister(pdev); | ||
249 | } | ||
250 | platform_driver_unregister(&tile_edac_mc_driver); | ||
251 | } | ||
252 | |||
253 | module_init(tile_edac_init); | ||
254 | module_exit(tile_edac_exit); | ||