aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 13:33:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 13:33:36 -0400
commitc29f5ec022451546be1e0b24c330a0368e63e4a7 (patch)
treeaf3c2fc0ba3236fd4c1c2d1a4303fb5a3dc396ab
parentd3d07d941fd80c173b6d690ded00ee5fb8302e06 (diff)
parentc476c23b45a41eb4e3ea63af786cc4d74762fe11 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: (26 commits) amd64_edac: add MAINTAINERS entry EDAC: do not enable modules by default amd64_edac: do not enable module by default amd64_edac: add module registration routines amd64_edac: add ECC reporting initializers amd64_edac: add EDAC core-related initializers amd64_edac: add error decoding logic amd64_edac: add ECC chipkill syndrome mapping table amd64_edac: add per-family descriptors amd64_edac: add F10h-and-later methods-p3 amd64_edac: add F10h-and-later methods-p2 amd64_edac: add F10h-and-later methods-p1 amd64_edac: add k8-specific methods amd64_edac: assign DRAM chip select base and mask in a family-specific way amd64_edac: add helper to dump relevant registers amd64_edac: add DRAM address type conversion facilities amd64_edac: add functionality to compute the DRAM hole amd64_edac: add sys addr to memory controller mapping helpers amd64_edac: add memory scrubber interface amd64_edac: add MCA error types ...
-rw-r--r--MAINTAINERS10
-rw-r--r--arch/x86/include/asm/msr.h23
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/msr-on-cpu.c97
-rw-r--r--arch/x86/lib/msr.c183
-rw-r--r--drivers/edac/Kconfig26
-rw-r--r--drivers/edac/Makefile7
-rw-r--r--drivers/edac/amd64_edac.c3354
-rw-r--r--drivers/edac/amd64_edac.h644
-rw-r--r--drivers/edac/amd64_edac_dbg.c255
-rw-r--r--drivers/edac/amd64_edac_err_types.c161
-rw-r--r--drivers/edac/amd64_edac_inj.c185
-rw-r--r--drivers/edac/edac_core.h9
13 files changed, 4853 insertions, 103 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 84285b5ba359..ccdb57524e3c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1979,6 +1979,16 @@ F: Documentation/edac.txt
1979F: drivers/edac/edac_* 1979F: drivers/edac/edac_*
1980F: include/linux/edac.h 1980F: include/linux/edac.h
1981 1981
1982EDAC-AMD64
1983P: Doug Thompson
1984M: dougthompson@xmission.com
1985P: Borislav Petkov
1986M: borislav.petkov@amd.com
1987L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
1988W: bluesmoke.sourceforge.net
1989S: Supported
1990F: drivers/edac/amd64_edac*
1991
1982EDAC-E752X 1992EDAC-E752X
1983P: Mark Gross 1993P: Mark Gross
1984M: mark.gross@intel.com 1994M: mark.gross@intel.com
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 638bf6241807..22603764e7db 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -12,6 +12,17 @@
12 12
13#include <asm/asm.h> 13#include <asm/asm.h>
14#include <asm/errno.h> 14#include <asm/errno.h>
15#include <asm/cpumask.h>
16
17struct msr {
18 union {
19 struct {
20 u32 l;
21 u32 h;
22 };
23 u64 q;
24 };
25};
15 26
16static inline unsigned long long native_read_tscp(unsigned int *aux) 27static inline unsigned long long native_read_tscp(unsigned int *aux)
17{ 28{
@@ -216,6 +227,8 @@ do { \
216#ifdef CONFIG_SMP 227#ifdef CONFIG_SMP
217int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 228int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
218int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 229int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
230void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
231void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
219int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 232int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
220int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 233int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
221#else /* CONFIG_SMP */ 234#else /* CONFIG_SMP */
@@ -229,6 +242,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
229 wrmsr(msr_no, l, h); 242 wrmsr(msr_no, l, h);
230 return 0; 243 return 0;
231} 244}
245static inline void rdmsr_on_cpus(const cpumask_t *m, u32 msr_no,
246 struct msr *msrs)
247{
248 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
249}
250static inline void wrmsr_on_cpus(const cpumask_t *m, u32 msr_no,
251 struct msr *msrs)
252{
253 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
254}
232static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 255static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
233 u32 *l, u32 *h) 256 u32 *l, u32 *h)
234{ 257{
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 55e11aa6d66c..f9d35632666b 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for x86 specific library files. 2# Makefile for x86 specific library files.
3# 3#
4 4
5obj-$(CONFIG_SMP) := msr-on-cpu.o 5obj-$(CONFIG_SMP) := msr.o
6 6
7lib-y := delay.o 7lib-y := delay.o
8lib-y += thunk_$(BITS).o 8lib-y += thunk_$(BITS).o
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
deleted file mode 100644
index 321cf720dbb6..000000000000
--- a/arch/x86/lib/msr-on-cpu.c
+++ /dev/null
@@ -1,97 +0,0 @@
1#include <linux/module.h>
2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h>
5
6struct msr_info {
7 u32 msr_no;
8 u32 l, h;
9 int err;
10};
11
12static void __rdmsr_on_cpu(void *info)
13{
14 struct msr_info *rv = info;
15
16 rdmsr(rv->msr_no, rv->l, rv->h);
17}
18
19static void __wrmsr_on_cpu(void *info)
20{
21 struct msr_info *rv = info;
22
23 wrmsr(rv->msr_no, rv->l, rv->h);
24}
25
26int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
27{
28 int err;
29 struct msr_info rv;
30
31 rv.msr_no = msr_no;
32 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
33 *l = rv.l;
34 *h = rv.h;
35
36 return err;
37}
38
39int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
40{
41 int err;
42 struct msr_info rv;
43
44 rv.msr_no = msr_no;
45 rv.l = l;
46 rv.h = h;
47 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
48
49 return err;
50}
51
52/* These "safe" variants are slower and should be used when the target MSR
53 may not actually exist. */
54static void __rdmsr_safe_on_cpu(void *info)
55{
56 struct msr_info *rv = info;
57
58 rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
59}
60
61static void __wrmsr_safe_on_cpu(void *info)
62{
63 struct msr_info *rv = info;
64
65 rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
66}
67
68int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
69{
70 int err;
71 struct msr_info rv;
72
73 rv.msr_no = msr_no;
74 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
75 *l = rv.l;
76 *h = rv.h;
77
78 return err ? err : rv.err;
79}
80
81int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
82{
83 int err;
84 struct msr_info rv;
85
86 rv.msr_no = msr_no;
87 rv.l = l;
88 rv.h = h;
89 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
90
91 return err ? err : rv.err;
92}
93
94EXPORT_SYMBOL(rdmsr_on_cpu);
95EXPORT_SYMBOL(wrmsr_on_cpu);
96EXPORT_SYMBOL(rdmsr_safe_on_cpu);
97EXPORT_SYMBOL(wrmsr_safe_on_cpu);
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
new file mode 100644
index 000000000000..1440b9c0547e
--- /dev/null
+++ b/arch/x86/lib/msr.c
@@ -0,0 +1,183 @@
1#include <linux/module.h>
2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h>
5
6struct msr_info {
7 u32 msr_no;
8 struct msr reg;
9 struct msr *msrs;
10 int off;
11 int err;
12};
13
14static void __rdmsr_on_cpu(void *info)
15{
16 struct msr_info *rv = info;
17 struct msr *reg;
18 int this_cpu = raw_smp_processor_id();
19
20 if (rv->msrs)
21 reg = &rv->msrs[this_cpu - rv->off];
22 else
23 reg = &rv->reg;
24
25 rdmsr(rv->msr_no, reg->l, reg->h);
26}
27
28static void __wrmsr_on_cpu(void *info)
29{
30 struct msr_info *rv = info;
31 struct msr *reg;
32 int this_cpu = raw_smp_processor_id();
33
34 if (rv->msrs)
35 reg = &rv->msrs[this_cpu - rv->off];
36 else
37 reg = &rv->reg;
38
39 wrmsr(rv->msr_no, reg->l, reg->h);
40}
41
42int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
43{
44 int err;
45 struct msr_info rv;
46
47 memset(&rv, 0, sizeof(rv));
48
49 rv.msr_no = msr_no;
50 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
51 *l = rv.reg.l;
52 *h = rv.reg.h;
53
54 return err;
55}
56EXPORT_SYMBOL(rdmsr_on_cpu);
57
58int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
59{
60 int err;
61 struct msr_info rv;
62
63 memset(&rv, 0, sizeof(rv));
64
65 rv.msr_no = msr_no;
66 rv.reg.l = l;
67 rv.reg.h = h;
68 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
69
70 return err;
71}
72EXPORT_SYMBOL(wrmsr_on_cpu);
73
74/* rdmsr on a bunch of CPUs
75 *
76 * @mask: which CPUs
77 * @msr_no: which MSR
78 * @msrs: array of MSR values
79 *
80 */
81void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
82{
83 struct msr_info rv;
84 int this_cpu;
85
86 memset(&rv, 0, sizeof(rv));
87
88 rv.off = cpumask_first(mask);
89 rv.msrs = msrs;
90 rv.msr_no = msr_no;
91
92 preempt_disable();
93 /*
94 * FIXME: handle the CPU we're executing on separately for now until
95 * smp_call_function_many has been fixed to not skip it.
96 */
97 this_cpu = raw_smp_processor_id();
98 smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);
99
100 smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
101 preempt_enable();
102}
103EXPORT_SYMBOL(rdmsr_on_cpus);
104
105/*
106 * wrmsr on a bunch of CPUs
107 *
108 * @mask: which CPUs
109 * @msr_no: which MSR
110 * @msrs: array of MSR values
111 *
112 */
113void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
114{
115 struct msr_info rv;
116 int this_cpu;
117
118 memset(&rv, 0, sizeof(rv));
119
120 rv.off = cpumask_first(mask);
121 rv.msrs = msrs;
122 rv.msr_no = msr_no;
123
124 preempt_disable();
125 /*
126 * FIXME: handle the CPU we're executing on separately for now until
127 * smp_call_function_many has been fixed to not skip it.
128 */
129 this_cpu = raw_smp_processor_id();
130 smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);
131
132 smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
133 preempt_enable();
134}
135EXPORT_SYMBOL(wrmsr_on_cpus);
136
137/* These "safe" variants are slower and should be used when the target MSR
138 may not actually exist. */
139static void __rdmsr_safe_on_cpu(void *info)
140{
141 struct msr_info *rv = info;
142
143 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
144}
145
146static void __wrmsr_safe_on_cpu(void *info)
147{
148 struct msr_info *rv = info;
149
150 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
151}
152
153int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
154{
155 int err;
156 struct msr_info rv;
157
158 memset(&rv, 0, sizeof(rv));
159
160 rv.msr_no = msr_no;
161 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
162 *l = rv.reg.l;
163 *h = rv.reg.h;
164
165 return err ? err : rv.err;
166}
167EXPORT_SYMBOL(rdmsr_safe_on_cpu);
168
169int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
170{
171 int err;
172 struct msr_info rv;
173
174 memset(&rv, 0, sizeof(rv));
175
176 rv.msr_no = msr_no;
177 rv.reg.l = l;
178 rv.reg.h = h;
179 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
180
181 return err ? err : rv.err;
182}
183EXPORT_SYMBOL(wrmsr_safe_on_cpu);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 956982f8739b..ab4f3592a11c 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -49,7 +49,6 @@ config EDAC_DEBUG_VERBOSE
49 49
50config EDAC_MM_EDAC 50config EDAC_MM_EDAC
51 tristate "Main Memory EDAC (Error Detection And Correction) reporting" 51 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
52 default y
53 help 52 help
54 Some systems are able to detect and correct errors in main 53 Some systems are able to detect and correct errors in main
55 memory. EDAC can report statistics on memory error 54 memory. EDAC can report statistics on memory error
@@ -58,6 +57,31 @@ config EDAC_MM_EDAC
58 occurred so that a particular failing memory module can be 57 occurred so that a particular failing memory module can be
59 replaced. If unsure, select 'Y'. 58 replaced. If unsure, select 'Y'.
60 59
60config EDAC_AMD64
61 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
62 depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI
63 help
64 Support for error detection and correction on the AMD 64
65 Families of Memory Controllers (K8, F10h and F11h)
66
67config EDAC_AMD64_ERROR_INJECTION
68 bool "Sysfs Error Injection facilities"
69 depends on EDAC_AMD64
70 help
71 Recent Opterons (Family 10h and later) provide for Memory Error
72 Injection into the ECC detection circuits. The amd64_edac module
73 allows the operator/user to inject Uncorrectable and Correctable
74 errors into DRAM.
75
76 When enabled, in each of the respective memory controller directories
77 (/sys/devices/system/edac/mc/mcX), there are 3 input files:
78
79 - inject_section (0..3, 16-byte section of 64-byte cacheline),
80 - inject_word (0..8, 16-bit word of 16-byte section),
81 - inject_ecc_vector (hex ecc vector: select bits of inject word)
82
83 In addition, there are two control files, inject_read and inject_write,
84 which trigger the DRAM ECC Read and Write respectively.
61 85
62config EDAC_AMD76X 86config EDAC_AMD76X
63 tristate "AMD 76x (760, 762, 768)" 87 tristate "AMD 76x (760, 762, 768)"
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 59076819135d..633dc5604ee3 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -30,6 +30,13 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
30obj-$(CONFIG_EDAC_X38) += x38_edac.o 30obj-$(CONFIG_EDAC_X38) += x38_edac.o
31obj-$(CONFIG_EDAC_I82860) += i82860_edac.o 31obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
32obj-$(CONFIG_EDAC_R82600) += r82600_edac.o 32obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
33
34amd64_edac_mod-y := amd64_edac_err_types.o amd64_edac.o
35amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
36amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
37
38obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
39
33obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o 40obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
34obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o 41obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
35obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o 42obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
new file mode 100644
index 000000000000..c36bf40568cf
--- /dev/null
+++ b/drivers/edac/amd64_edac.c
@@ -0,0 +1,3354 @@
1#include "amd64_edac.h"
2#include <asm/k8.h>
3
4static struct edac_pci_ctl_info *amd64_ctl_pci;
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
16/* Lookup table for all possible MC control instances */
17struct amd64_pvt;
18static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
20
21/*
22 * Memory scrubber control interface. For K8, memory scrubbing is handled by
23 * hardware and can involve L2 cache, dcache as well as the main memory. With
24 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
25 * functionality.
26 *
27 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
28 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
29 * bytes/sec for the setting.
30 *
31 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
32 * other archs, we might not have access to the caches directly.
33 */
34
35/*
36 * scan the scrub rate mapping table for a close or matching bandwidth value to
37 * issue. If requested is too big, then use last maximum value found.
38 */
39static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
40 u32 min_scrubrate)
41{
42 u32 scrubval;
43 int i;
44
45 /*
46 * map the configured rate (new_bw) to a value specific to the AMD64
47 * memory controller and apply to register. Search for the first
48 * bandwidth entry that is greater or equal than the setting requested
49 * and program that. If at last entry, turn off DRAM scrubbing.
50 */
51 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
52 /*
53 * skip scrub rates which aren't recommended
54 * (see F10 BKDG, F3x58)
55 */
56 if (scrubrates[i].scrubval < min_scrubrate)
57 continue;
58
59 if (scrubrates[i].bandwidth <= new_bw)
60 break;
61
62 /*
63 * if no suitable bandwidth found, turn off DRAM scrubbing
64 * entirely by falling back to the last element in the
65 * scrubrates array.
66 */
67 }
68
69 scrubval = scrubrates[i].scrubval;
70 if (scrubval)
71 edac_printk(KERN_DEBUG, EDAC_MC,
72 "Setting scrub rate bandwidth: %u\n",
73 scrubrates[i].bandwidth);
74 else
75 edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
76
77 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
78
79 return 0;
80}
81
82static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
83{
84 struct amd64_pvt *pvt = mci->pvt_info;
85 u32 min_scrubrate = 0x0;
86
87 switch (boot_cpu_data.x86) {
88 case 0xf:
89 min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
90 break;
91 case 0x10:
92 min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
93 break;
94 case 0x11:
95 min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
96 break;
97
98 default:
99 amd64_printk(KERN_ERR, "Unsupported family!\n");
100 break;
101 }
102 return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
103 min_scrubrate);
104}
105
106static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
107{
108 struct amd64_pvt *pvt = mci->pvt_info;
109 u32 scrubval = 0;
110 int status = -1, i, ret = 0;
111
112 ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
113 if (ret)
114 debugf0("Reading K8_SCRCTRL failed\n");
115
116 scrubval = scrubval & 0x001F;
117
118 edac_printk(KERN_DEBUG, EDAC_MC,
119 "pci-read, sdram scrub control value: %d \n", scrubval);
120
121 for (i = 0; ARRAY_SIZE(scrubrates); i++) {
122 if (scrubrates[i].scrubval == scrubval) {
123 *bw = scrubrates[i].bandwidth;
124 status = 0;
125 break;
126 }
127 }
128
129 return status;
130}
131
132/* Map from a CSROW entry to the mask entry that operates on it */
133static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
134{
135 return csrow >> (pvt->num_dcsm >> 3);
136}
137
138/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
139static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
140{
141 if (dct == 0)
142 return pvt->dcsb0[csrow];
143 else
144 return pvt->dcsb1[csrow];
145}
146
147/*
148 * Return the 'mask' address the i'th CS entry. This function is needed because
149 * there number of DCSM registers on Rev E and prior vs Rev F and later is
150 * different.
151 */
152static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
153{
154 if (dct == 0)
155 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
156 else
157 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
158}
159
160
161/*
162 * In *base and *limit, pass back the full 40-bit base and limit physical
163 * addresses for the node given by node_id. This information is obtained from
164 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
165 * base and limit addresses are of type SysAddr, as defined at the start of
166 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
167 * in the address range they represent.
168 */
169static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
170 u64 *base, u64 *limit)
171{
172 *base = pvt->dram_base[node_id];
173 *limit = pvt->dram_limit[node_id];
174}
175
176/*
177 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
178 * with node_id
179 */
180static int amd64_base_limit_match(struct amd64_pvt *pvt,
181 u64 sys_addr, int node_id)
182{
183 u64 base, limit, addr;
184
185 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
186
187 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
188 * all ones if the most significant implemented address bit is 1.
189 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
190 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
191 * Application Programming.
192 */
193 addr = sys_addr & 0x000000ffffffffffull;
194
195 return (addr >= base) && (addr <= limit);
196}
197
198/*
199 * Attempt to map a SysAddr to a node. On success, return a pointer to the
200 * mem_ctl_info structure for the node that the SysAddr maps to.
201 *
202 * On failure, return NULL.
203 */
204static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
205 u64 sys_addr)
206{
207 struct amd64_pvt *pvt;
208 int node_id;
209 u32 intlv_en, bits;
210
211 /*
212 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
213 * 3.4.4.2) registers to map the SysAddr to a node ID.
214 */
215 pvt = mci->pvt_info;
216
217 /*
218 * The value of this field should be the same for all DRAM Base
219 * registers. Therefore we arbitrarily choose to read it from the
220 * register for node 0.
221 */
222 intlv_en = pvt->dram_IntlvEn[0];
223
224 if (intlv_en == 0) {
225 for (node_id = 0; ; ) {
226 if (amd64_base_limit_match(pvt, sys_addr, node_id))
227 break;
228
229 if (++node_id >= DRAM_REG_COUNT)
230 goto err_no_match;
231 }
232 goto found;
233 }
234
235 if (unlikely((intlv_en != (0x01 << 8)) &&
236 (intlv_en != (0x03 << 8)) &&
237 (intlv_en != (0x07 << 8)))) {
238 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
239 "IntlvEn field of DRAM Base Register for node 0: "
240 "This probably indicates a BIOS bug.\n", intlv_en);
241 return NULL;
242 }
243
244 bits = (((u32) sys_addr) >> 12) & intlv_en;
245
246 for (node_id = 0; ; ) {
247 if ((pvt->dram_limit[node_id] & intlv_en) == bits)
248 break; /* intlv_sel field matches */
249
250 if (++node_id >= DRAM_REG_COUNT)
251 goto err_no_match;
252 }
253
254 /* sanity test for sys_addr */
255 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
256 amd64_printk(KERN_WARNING,
257 "%s(): sys_addr 0x%lx falls outside base/limit "
258 "address range for node %d with node interleaving "
259 "enabled.\n", __func__, (unsigned long)sys_addr,
260 node_id);
261 return NULL;
262 }
263
264found:
265 return edac_mc_find(node_id);
266
267err_no_match:
268 debugf2("sys_addr 0x%lx doesn't match any node\n",
269 (unsigned long)sys_addr);
270
271 return NULL;
272}
273
274/*
275 * Extract the DRAM CS base address from selected csrow register.
276 */
277static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
278{
279 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
280 pvt->dcs_shift;
281}
282
283/*
284 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
285 */
286static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
287{
288 u64 dcsm_bits, other_bits;
289 u64 mask;
290
291 /* Extract bits from DRAM CS Mask. */
292 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
293
294 other_bits = pvt->dcsm_mask;
295 other_bits = ~(other_bits << pvt->dcs_shift);
296
297 /*
298 * The extracted bits from DCSM belong in the spaces represented by
299 * the cleared bits in other_bits.
300 */
301 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
302
303 return mask;
304}
305
306/*
307 * @input_addr is an InputAddr associated with the node given by mci. Return the
308 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
309 */
310static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
311{
312 struct amd64_pvt *pvt;
313 int csrow;
314 u64 base, mask;
315
316 pvt = mci->pvt_info;
317
318 /*
319 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
320 * base/mask register pair, test the condition shown near the start of
321 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
322 */
323 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
324
325 /* This DRAM chip select is disabled on this node */
326 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
327 continue;
328
329 base = base_from_dct_base(pvt, csrow);
330 mask = ~mask_from_dct_mask(pvt, csrow);
331
332 if ((input_addr & mask) == (base & mask)) {
333 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
334 (unsigned long)input_addr, csrow,
335 pvt->mc_node_id);
336
337 return csrow;
338 }
339 }
340
341 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
342 (unsigned long)input_addr, pvt->mc_node_id);
343
344 return -1;
345}
346
347/*
348 * Return the base value defined by the DRAM Base register for the node
349 * represented by mci. This function returns the full 40-bit value despite the
350 * fact that the register only stores bits 39-24 of the value. See section
351 * 3.4.4.1 (BKDG #26094, K8, revA-E)
352 */
353static inline u64 get_dram_base(struct mem_ctl_info *mci)
354{
355 struct amd64_pvt *pvt = mci->pvt_info;
356
357 return pvt->dram_base[pvt->mc_node_id];
358}
359
360/*
361 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
362 * for the node represented by mci. Info is passed back in *hole_base,
363 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
364 * info is invalid. Info may be invalid for either of the following reasons:
365 *
366 * - The revision of the node is not E or greater. In this case, the DRAM Hole
367 * Address Register does not exist.
368 *
369 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
370 * indicating that its contents are not valid.
371 *
372 * The values passed back in *hole_base, *hole_offset, and *hole_size are
373 * complete 32-bit values despite the fact that the bitfields in the DHAR
374 * only represent bits 31-24 of the base and offset values.
375 */
376int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
377 u64 *hole_offset, u64 *hole_size)
378{
379 struct amd64_pvt *pvt = mci->pvt_info;
380 u64 base;
381
382 /* only revE and later have the DRAM Hole Address Register */
383 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
384 debugf1(" revision %d for node %d does not support DHAR\n",
385 pvt->ext_model, pvt->mc_node_id);
386 return 1;
387 }
388
389 /* only valid for Fam10h */
390 if (boot_cpu_data.x86 == 0x10 &&
391 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
392 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
393 return 1;
394 }
395
396 if ((pvt->dhar & DHAR_VALID) == 0) {
397 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
398 pvt->mc_node_id);
399 return 1;
400 }
401
402 /* This node has Memory Hoisting */
403
404 /* +------------------+--------------------+--------------------+-----
405 * | memory | DRAM hole | relocated |
406 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
407 * | | | DRAM hole |
408 * | | | [0x100000000, |
409 * | | | (0x100000000+ |
410 * | | | (0xffffffff-x))] |
411 * +------------------+--------------------+--------------------+-----
412 *
413 * Above is a diagram of physical memory showing the DRAM hole and the
414 * relocated addresses from the DRAM hole. As shown, the DRAM hole
415 * starts at address x (the base address) and extends through address
416 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
417 * addresses in the hole so that they start at 0x100000000.
418 */
419
420 base = dhar_base(pvt->dhar);
421
422 *hole_base = base;
423 *hole_size = (0x1ull << 32) - base;
424
425 if (boot_cpu_data.x86 > 0xf)
426 *hole_offset = f10_dhar_offset(pvt->dhar);
427 else
428 *hole_offset = k8_dhar_offset(pvt->dhar);
429
430 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
431 pvt->mc_node_id, (unsigned long)*hole_base,
432 (unsigned long)*hole_offset, (unsigned long)*hole_size);
433
434 return 0;
435}
436EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
437
438/*
439 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
440 * assumed that sys_addr maps to the node given by mci.
441 *
442 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
443 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
444 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
445 * then it is also involved in translating a SysAddr to a DramAddr. Sections
446 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
447 * These parts of the documentation are unclear. I interpret them as follows:
448 *
449 * When node n receives a SysAddr, it processes the SysAddr as follows:
450 *
451 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
452 * Limit registers for node n. If the SysAddr is not within the range
453 * specified by the base and limit values, then node n ignores the Sysaddr
454 * (since it does not map to node n). Otherwise continue to step 2 below.
455 *
456 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
457 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
458 * the range of relocated addresses (starting at 0x100000000) from the DRAM
459 * hole. If not, skip to step 3 below. Else get the value of the
460 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
461 * offset defined by this value from the SysAddr.
462 *
463 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
464 * Base register for node n. To obtain the DramAddr, subtract the base
465 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
466 */
467static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
468{
469 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
470 int ret = 0;
471
472 dram_base = get_dram_base(mci);
473
474 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
475 &hole_size);
476 if (!ret) {
477 if ((sys_addr >= (1ull << 32)) &&
478 (sys_addr < ((1ull << 32) + hole_size))) {
479 /* use DHAR to translate SysAddr to DramAddr */
480 dram_addr = sys_addr - hole_offset;
481
482 debugf2("using DHAR to translate SysAddr 0x%lx to "
483 "DramAddr 0x%lx\n",
484 (unsigned long)sys_addr,
485 (unsigned long)dram_addr);
486
487 return dram_addr;
488 }
489 }
490
491 /*
492 * Translate the SysAddr to a DramAddr as shown near the start of
493 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
494 * only deals with 40-bit values. Therefore we discard bits 63-40 of
495 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
496 * discard are all 1s. Otherwise the bits we discard are all 0s. See
497 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
498 * Programmer's Manual Volume 1 Application Programming.
499 */
500 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
501
502 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
503 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
504 (unsigned long)dram_addr);
505 return dram_addr;
506}
507
508/*
509 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
510 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
511 * for node interleaving.
512 */
513static int num_node_interleave_bits(unsigned intlv_en)
514{
515 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
516 int n;
517
518 BUG_ON(intlv_en > 7);
519 n = intlv_shift_table[intlv_en];
520 return n;
521}
522
523/* Translate the DramAddr given by @dram_addr to an InputAddr. */
524static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
525{
526 struct amd64_pvt *pvt;
527 int intlv_shift;
528 u64 input_addr;
529
530 pvt = mci->pvt_info;
531
532 /*
533 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
534 * concerning translating a DramAddr to an InputAddr.
535 */
536 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
537 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
538 (dram_addr & 0xfff);
539
540 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
541 intlv_shift, (unsigned long)dram_addr,
542 (unsigned long)input_addr);
543
544 return input_addr;
545}
546
547/*
548 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
549 * assumed that @sys_addr maps to the node given by mci.
550 */
551static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
552{
553 u64 input_addr;
554
555 input_addr =
556 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
557
558 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
559 (unsigned long)sys_addr, (unsigned long)input_addr);
560
561 return input_addr;
562}
563
564
565/*
566 * @input_addr is an InputAddr associated with the node represented by mci.
567 * Translate @input_addr to a DramAddr and return the result.
568 */
569static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
570{
571 struct amd64_pvt *pvt;
572 int node_id, intlv_shift;
573 u64 bits, dram_addr;
574 u32 intlv_sel;
575
576 /*
577 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
578 * shows how to translate a DramAddr to an InputAddr. Here we reverse
579 * this procedure. When translating from a DramAddr to an InputAddr, the
580 * bits used for node interleaving are discarded. Here we recover these
581 * bits from the IntlvSel field of the DRAM Limit register (section
582 * 3.4.4.2) for the node that input_addr is associated with.
583 */
584 pvt = mci->pvt_info;
585 node_id = pvt->mc_node_id;
586 BUG_ON((node_id < 0) || (node_id > 7));
587
588 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
589
590 if (intlv_shift == 0) {
591 debugf1(" InputAddr 0x%lx translates to DramAddr of "
592 "same value\n", (unsigned long)input_addr);
593
594 return input_addr;
595 }
596
597 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
598 (input_addr & 0xfff);
599
600 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
601 dram_addr = bits + (intlv_sel << 12);
602
603 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
604 "(%d node interleave bits)\n", (unsigned long)input_addr,
605 (unsigned long)dram_addr, intlv_shift);
606
607 return dram_addr;
608}
609
610/*
611 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
612 * @dram_addr to a SysAddr.
613 */
614static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
615{
616 struct amd64_pvt *pvt = mci->pvt_info;
617 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
618 int ret = 0;
619
620 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
621 &hole_size);
622 if (!ret) {
623 if ((dram_addr >= hole_base) &&
624 (dram_addr < (hole_base + hole_size))) {
625 sys_addr = dram_addr + hole_offset;
626
627 debugf1("using DHAR to translate DramAddr 0x%lx to "
628 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
629 (unsigned long)sys_addr);
630
631 return sys_addr;
632 }
633 }
634
635 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
636 sys_addr = dram_addr + base;
637
638 /*
639 * The sys_addr we have computed up to this point is a 40-bit value
640 * because the k8 deals with 40-bit values. However, the value we are
641 * supposed to return is a full 64-bit physical address. The AMD
642 * x86-64 architecture specifies that the most significant implemented
643 * address bit through bit 63 of a physical address must be either all
644 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
645 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
646 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
647 * Programming.
648 */
649 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
650
651 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
652 pvt->mc_node_id, (unsigned long)dram_addr,
653 (unsigned long)sys_addr);
654
655 return sys_addr;
656}
657
658/*
659 * @input_addr is an InputAddr associated with the node given by mci. Translate
660 * @input_addr to a SysAddr.
661 */
662static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
663 u64 input_addr)
664{
665 return dram_addr_to_sys_addr(mci,
666 input_addr_to_dram_addr(mci, input_addr));
667}
668
669/*
670 * Find the minimum and maximum InputAddr values that map to the given @csrow.
671 * Pass back these values in *input_addr_min and *input_addr_max.
672 */
673static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
674 u64 *input_addr_min, u64 *input_addr_max)
675{
676 struct amd64_pvt *pvt;
677 u64 base, mask;
678
679 pvt = mci->pvt_info;
680 BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
681
682 base = base_from_dct_base(pvt, csrow);
683 mask = mask_from_dct_mask(pvt, csrow);
684
685 *input_addr_min = base & ~mask;
686 *input_addr_max = base | mask | pvt->dcs_mask_notused;
687}
688
689/*
690 * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
691 * Address High (section 3.6.4.6) register values and return the result. Address
692 * is located in the info structure (nbeah and nbeal), the encoding is device
693 * specific.
694 */
695static u64 extract_error_address(struct mem_ctl_info *mci,
696 struct amd64_error_info_regs *info)
697{
698 struct amd64_pvt *pvt = mci->pvt_info;
699
700 return pvt->ops->get_error_address(mci, info);
701}
702
703
704/* Map the Error address to a PAGE and PAGE OFFSET. */
705static inline void error_address_to_page_and_offset(u64 error_address,
706 u32 *page, u32 *offset)
707{
708 *page = (u32) (error_address >> PAGE_SHIFT);
709 *offset = ((u32) error_address) & ~PAGE_MASK;
710}
711
712/*
713 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
714 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
715 * of a node that detected an ECC memory error. mci represents the node that
716 * the error address maps to (possibly different from the node that detected
717 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
718 * error.
719 */
720static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
721{
722 int csrow;
723
724 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
725
726 if (csrow == -1)
727 amd64_mc_printk(mci, KERN_ERR,
728 "Failed to translate InputAddr to csrow for "
729 "address 0x%lx\n", (unsigned long)sys_addr);
730 return csrow;
731}
732
733static int get_channel_from_ecc_syndrome(unsigned short syndrome);
734
735static void amd64_cpu_display_info(struct amd64_pvt *pvt)
736{
737 if (boot_cpu_data.x86 == 0x11)
738 edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
739 else if (boot_cpu_data.x86 == 0x10)
740 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
741 else if (boot_cpu_data.x86 == 0xf)
742 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
743 (pvt->ext_model >= OPTERON_CPU_REV_F) ?
744 "Rev F or later" : "Rev E or earlier");
745 else
746 /* we'll hardly ever ever get here */
747 edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
748}
749
750/*
751 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
752 * are ECC capable.
753 */
754static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
755{
756 int bit;
757 enum dev_type edac_cap = EDAC_NONE;
758
759 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
760 ? 19
761 : 17;
762
763 if (pvt->dclr0 >> BIT(bit))
764 edac_cap = EDAC_FLAG_SECDED;
765
766 return edac_cap;
767}
768
769
770static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
771 int ganged);
772
773/* Display and decode various NB registers for debug purposes. */
774static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
775{
776 int ganged;
777
778 debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
779 pvt->nbcap,
780 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
781 (pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
782 (pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
783 debugf1(" ECC Capable=%s ChipKill Capable=%s\n",
784 (pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
785 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
786 debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
787 pvt->dclr0,
788 (pvt->dclr0 & BIT(19)) ? "Enabled" : "Disabled",
789 (pvt->dclr0 & BIT(8)) ? "Enabled" : "Disabled",
790 (pvt->dclr0 & BIT(11)) ? "128b" : "64b");
791 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n",
792 (pvt->dclr0 & BIT(12)) ? "Y" : "N",
793 (pvt->dclr0 & BIT(13)) ? "Y" : "N",
794 (pvt->dclr0 & BIT(14)) ? "Y" : "N",
795 (pvt->dclr0 & BIT(15)) ? "Y" : "N",
796 (pvt->dclr0 & BIT(16)) ? "UN-Buffered" : "Buffered");
797
798
799 debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare);
800
801 if (boot_cpu_data.x86 == 0xf) {
802 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
803 pvt->dhar, dhar_base(pvt->dhar),
804 k8_dhar_offset(pvt->dhar));
805 debugf1(" DramHoleValid=%s\n",
806 (pvt->dhar & DHAR_VALID) ? "True" : "False");
807
808 debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0);
809
810 /* everything below this point is Fam10h and above */
811 return;
812
813 } else {
814 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
815 pvt->dhar, dhar_base(pvt->dhar),
816 f10_dhar_offset(pvt->dhar));
817 debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
818 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
819 "True" : "False",
820 (pvt->dhar & DHAR_VALID) ?
821 "True" : "False");
822 }
823
824 /* Only if NOT ganged does dcl1 have valid info */
825 if (!dct_ganging_enabled(pvt)) {
826 debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
827 "Width=%s\n", pvt->dclr1,
828 (pvt->dclr1 & BIT(19)) ? "Enabled" : "Disabled",
829 (pvt->dclr1 & BIT(8)) ? "Enabled" : "Disabled",
830 (pvt->dclr1 & BIT(11)) ? "128b" : "64b");
831 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s "
832 "DIMM Type=%s\n",
833 (pvt->dclr1 & BIT(12)) ? "Y" : "N",
834 (pvt->dclr1 & BIT(13)) ? "Y" : "N",
835 (pvt->dclr1 & BIT(14)) ? "Y" : "N",
836 (pvt->dclr1 & BIT(15)) ? "Y" : "N",
837 (pvt->dclr1 & BIT(16)) ? "UN-Buffered" : "Buffered");
838 }
839
840 /*
841 * Determine if ganged and then dump memory sizes for first controller,
842 * and if NOT ganged dump info for 2nd controller.
843 */
844 ganged = dct_ganging_enabled(pvt);
845
846 f10_debug_display_dimm_sizes(0, pvt, ganged);
847
848 if (!ganged)
849 f10_debug_display_dimm_sizes(1, pvt, ganged);
850}
851
852/* Read in both of DBAM registers */
853static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
854{
855 int err = 0;
856 unsigned int reg;
857
858 reg = DBAM0;
859 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
860 if (err)
861 goto err_reg;
862
863 if (boot_cpu_data.x86 >= 0x10) {
864 reg = DBAM1;
865 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
866
867 if (err)
868 goto err_reg;
869 }
870
871err_reg:
872 debugf0("Error reading F2x%03x.\n", reg);
873}
874
875/*
876 * NOTE: CPU Revision Dependent code: Rev E and Rev F
877 *
878 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
879 * set the shift factor for the DCSB and DCSM values.
880 *
881 * ->dcs_mask_notused, RevE:
882 *
883 * To find the max InputAddr for the csrow, start with the base address and set
884 * all bits that are "don't care" bits in the test at the start of section
885 * 3.5.4 (p. 84).
886 *
887 * The "don't care" bits are all set bits in the mask and all bits in the gaps
888 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
889 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
890 * gaps.
891 *
892 * ->dcs_mask_notused, RevF and later:
893 *
894 * To find the max InputAddr for the csrow, start with the base address and set
895 * all bits that are "don't care" bits in the test at the start of NPT section
896 * 4.5.4 (p. 87).
897 *
898 * The "don't care" bits are all set bits in the mask and all bits in the gaps
899 * between bit ranges [36:27] and [21:13].
900 *
901 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
902 * which are all bits in the above-mentioned gaps.
903 */
904static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
905{
906 if (pvt->ext_model >= OPTERON_CPU_REV_F) {
907 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
908 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
909 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
910 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
911
912 switch (boot_cpu_data.x86) {
913 case 0xf:
914 pvt->num_dcsm = REV_F_DCSM_COUNT;
915 break;
916
917 case 0x10:
918 pvt->num_dcsm = F10_DCSM_COUNT;
919 break;
920
921 case 0x11:
922 pvt->num_dcsm = F11_DCSM_COUNT;
923 break;
924
925 default:
926 amd64_printk(KERN_ERR, "Unsupported family!\n");
927 break;
928 }
929 } else {
930 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
931 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
932 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
933 pvt->dcs_shift = REV_E_DCS_SHIFT;
934 pvt->num_dcsm = REV_E_DCSM_COUNT;
935 }
936}
937
938/*
939 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
940 */
941static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
942{
943 int cs, reg, err = 0;
944
945 amd64_set_dct_base_and_mask(pvt);
946
947 for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
948 reg = K8_DCSB0 + (cs * 4);
949 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
950 &pvt->dcsb0[cs]);
951 if (unlikely(err))
952 debugf0("Reading K8_DCSB0[%d] failed\n", cs);
953 else
954 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
955 cs, pvt->dcsb0[cs], reg);
956
957 /* If DCT are NOT ganged, then read in DCT1's base */
958 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
959 reg = F10_DCSB1 + (cs * 4);
960 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
961 &pvt->dcsb1[cs]);
962 if (unlikely(err))
963 debugf0("Reading F10_DCSB1[%d] failed\n", cs);
964 else
965 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
966 cs, pvt->dcsb1[cs], reg);
967 } else {
968 pvt->dcsb1[cs] = 0;
969 }
970 }
971
972 for (cs = 0; cs < pvt->num_dcsm; cs++) {
973 reg = K8_DCSB0 + (cs * 4);
974 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
975 &pvt->dcsm0[cs]);
976 if (unlikely(err))
977 debugf0("Reading K8_DCSM0 failed\n");
978 else
979 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
980 cs, pvt->dcsm0[cs], reg);
981
982 /* If DCT are NOT ganged, then read in DCT1's mask */
983 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
984 reg = F10_DCSM1 + (cs * 4);
985 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
986 &pvt->dcsm1[cs]);
987 if (unlikely(err))
988 debugf0("Reading F10_DCSM1[%d] failed\n", cs);
989 else
990 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
991 cs, pvt->dcsm1[cs], reg);
992 } else
993 pvt->dcsm1[cs] = 0;
994 }
995}
996
997static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
998{
999 enum mem_type type;
1000
1001 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
1002 /* Rev F and later */
1003 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1004 } else {
1005 /* Rev E and earlier */
1006 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1007 }
1008
1009 debugf1(" Memory type is: %s\n",
1010 (type == MEM_DDR2) ? "MEM_DDR2" :
1011 (type == MEM_RDDR2) ? "MEM_RDDR2" :
1012 (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
1013
1014 return type;
1015}
1016
1017/*
1018 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1019 * and the later RevF memory controllers (DDR vs DDR2)
1020 *
1021 * Return:
1022 * number of memory channels in operation
1023 * Pass back:
1024 * contents of the DCL0_LOW register
1025 */
1026static int k8_early_channel_count(struct amd64_pvt *pvt)
1027{
1028 int flag, err = 0;
1029
1030 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1031 if (err)
1032 return err;
1033
1034 if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
1035 /* RevF (NPT) and later */
1036 flag = pvt->dclr0 & F10_WIDTH_128;
1037 } else {
1038 /* RevE and earlier */
1039 flag = pvt->dclr0 & REVE_WIDTH_128;
1040 }
1041
1042 /* not used */
1043 pvt->dclr1 = 0;
1044
1045 return (flag) ? 2 : 1;
1046}
1047
1048/* extract the ERROR ADDRESS for the K8 CPUs */
1049static u64 k8_get_error_address(struct mem_ctl_info *mci,
1050 struct amd64_error_info_regs *info)
1051{
1052 return (((u64) (info->nbeah & 0xff)) << 32) +
1053 (info->nbeal & ~0x03);
1054}
1055
1056/*
1057 * Read the Base and Limit registers for K8 based Memory controllers; extract
1058 * fields from the 'raw' reg into separate data fields
1059 *
1060 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1061 */
1062static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1063{
1064 u32 low;
1065 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1066 int err;
1067
1068 err = pci_read_config_dword(pvt->addr_f1_ctl,
1069 K8_DRAM_BASE_LOW + off, &low);
1070 if (err)
1071 debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1072
1073 /* Extract parts into separate data entries */
1074 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1075 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1076 pvt->dram_rw_en[dram] = (low & 0x3);
1077
1078 err = pci_read_config_dword(pvt->addr_f1_ctl,
1079 K8_DRAM_LIMIT_LOW + off, &low);
1080 if (err)
1081 debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
1082
1083 /*
1084 * Extract parts into separate data entries. Limit is the HIGHEST memory
1085 * location of the region, so lower 24 bits need to be all ones
1086 */
1087 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1088 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1089 pvt->dram_DstNode[dram] = (low & 0x7);
1090}
1091
1092static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1093 struct amd64_error_info_regs *info,
1094 u64 SystemAddress)
1095{
1096 struct mem_ctl_info *src_mci;
1097 unsigned short syndrome;
1098 int channel, csrow;
1099 u32 page, offset;
1100
1101 /* Extract the syndrome parts and form a 16-bit syndrome */
1102 syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
1103 syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
1104
1105 /* CHIPKILL enabled */
1106 if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1107 channel = get_channel_from_ecc_syndrome(syndrome);
1108 if (channel < 0) {
1109 /*
1110 * Syndrome didn't map, so we don't know which of the
1111 * 2 DIMMs is in error. So we need to ID 'both' of them
1112 * as suspect.
1113 */
1114 amd64_mc_printk(mci, KERN_WARNING,
1115 "unknown syndrome 0x%x - possible error "
1116 "reporting race\n", syndrome);
1117 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1118 return;
1119 }
1120 } else {
1121 /*
1122 * non-chipkill ecc mode
1123 *
1124 * The k8 documentation is unclear about how to determine the
1125 * channel number when using non-chipkill memory. This method
1126 * was obtained from email communication with someone at AMD.
1127 * (Wish the email was placed in this comment - norsk)
1128 */
1129 channel = ((SystemAddress & BIT(3)) != 0);
1130 }
1131
1132 /*
1133 * Find out which node the error address belongs to. This may be
1134 * different from the node that detected the error.
1135 */
1136 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1137 if (src_mci) {
1138 amd64_mc_printk(mci, KERN_ERR,
1139 "failed to map error address 0x%lx to a node\n",
1140 (unsigned long)SystemAddress);
1141 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1142 return;
1143 }
1144
1145 /* Now map the SystemAddress to a CSROW */
1146 csrow = sys_addr_to_csrow(src_mci, SystemAddress);
1147 if (csrow < 0) {
1148 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1149 } else {
1150 error_address_to_page_and_offset(SystemAddress, &page, &offset);
1151
1152 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1153 channel, EDAC_MOD_STR);
1154 }
1155}
1156
1157/*
1158 * determrine the number of PAGES in for this DIMM's size based on its DRAM
1159 * Address Mapping.
1160 *
1161 * First step is to calc the number of bits to shift a value of 1 left to
1162 * indicate show many pages. Start with the DBAM value as the starting bits,
1163 * then proceed to adjust those shift bits, based on CPU rev and the table.
1164 * See BKDG on the DBAM
1165 */
1166static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1167{
1168 int nr_pages;
1169
1170 if (pvt->ext_model >= OPTERON_CPU_REV_F) {
1171 nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1172 } else {
1173 /*
1174 * RevE and less section; this line is tricky. It collapses the
1175 * table used by RevD and later to one that matches revisions CG
1176 * and earlier.
1177 */
1178 dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
1179 (dram_map > 8 ? 4 : (dram_map > 5 ?
1180 3 : (dram_map > 2 ? 1 : 0))) : 0;
1181
1182 /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
1183 nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
1184 }
1185
1186 return nr_pages;
1187}
1188
1189/*
1190 * Get the number of DCT channels in use.
1191 *
1192 * Return:
1193 * number of Memory Channels in operation
1194 * Pass back:
1195 * contents of the DCL0_LOW register
1196 */
1197static int f10_early_channel_count(struct amd64_pvt *pvt)
1198{
1199 int err = 0, channels = 0;
1200 u32 dbam;
1201
1202 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1203 if (err)
1204 goto err_reg;
1205
1206 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
1207 if (err)
1208 goto err_reg;
1209
1210 /* If we are in 128 bit mode, then we are using 2 channels */
1211 if (pvt->dclr0 & F10_WIDTH_128) {
1212 debugf0("Data WIDTH is 128 bits - 2 channels\n");
1213 channels = 2;
1214 return channels;
1215 }
1216
1217 /*
1218 * Need to check if in UN-ganged mode: In such, there are 2 channels,
1219 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
1220 * will be OFF.
1221 *
1222 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1223 * their CSEnable bit on. If so, then SINGLE DIMM case.
1224 */
1225 debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
1226
1227 /*
1228 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1229 * is more than just one DIMM present in unganged mode. Need to check
1230 * both controllers since DIMMs can be placed in either one.
1231 */
1232 channels = 0;
1233 err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM0, &dbam);
1234 if (err)
1235 goto err_reg;
1236
1237 if (DBAM_DIMM(0, dbam) > 0)
1238 channels++;
1239 if (DBAM_DIMM(1, dbam) > 0)
1240 channels++;
1241 if (DBAM_DIMM(2, dbam) > 0)
1242 channels++;
1243 if (DBAM_DIMM(3, dbam) > 0)
1244 channels++;
1245
1246 /* If more than 2 DIMMs are present, then we have 2 channels */
1247 if (channels > 2)
1248 channels = 2;
1249 else if (channels == 0) {
1250 /* No DIMMs on DCT0, so look at DCT1 */
1251 err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM1, &dbam);
1252 if (err)
1253 goto err_reg;
1254
1255 if (DBAM_DIMM(0, dbam) > 0)
1256 channels++;
1257 if (DBAM_DIMM(1, dbam) > 0)
1258 channels++;
1259 if (DBAM_DIMM(2, dbam) > 0)
1260 channels++;
1261 if (DBAM_DIMM(3, dbam) > 0)
1262 channels++;
1263
1264 if (channels > 2)
1265 channels = 2;
1266 }
1267
1268 /* If we found ALL 0 values, then assume just ONE DIMM-ONE Channel */
1269 if (channels == 0)
1270 channels = 1;
1271
1272 debugf0("DIMM count= %d\n", channels);
1273
1274 return channels;
1275
1276err_reg:
1277 return -1;
1278
1279}
1280
1281static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1282{
1283 return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1284}
1285
1286/* Enable extended configuration access via 0xCF8 feature */
1287static void amd64_setup(struct amd64_pvt *pvt)
1288{
1289 u32 reg;
1290
1291 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1292
1293 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1294 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1295 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1296}
1297
1298/* Restore the extended configuration access via 0xCF8 feature */
1299static void amd64_teardown(struct amd64_pvt *pvt)
1300{
1301 u32 reg;
1302
1303 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1304
1305 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1306 if (pvt->flags.cf8_extcfg)
1307 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1308 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1309}
1310
1311static u64 f10_get_error_address(struct mem_ctl_info *mci,
1312 struct amd64_error_info_regs *info)
1313{
1314 return (((u64) (info->nbeah & 0xffff)) << 32) +
1315 (info->nbeal & ~0x01);
1316}
1317
1318/*
1319 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1320 * fields from the 'raw' reg into separate data fields.
1321 *
1322 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1323 */
1324static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1325{
1326 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1327
1328 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1329 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1330
1331 /* read the 'raw' DRAM BASE Address register */
1332 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
1333
1334 /* Read from the ECS data register */
1335 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
1336
1337 /* Extract parts into separate data entries */
1338 pvt->dram_rw_en[dram] = (low_base & 0x3);
1339
1340 if (pvt->dram_rw_en[dram] == 0)
1341 return;
1342
1343 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1344
1345 pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
1346 ((u64) low_base & 0xFFFF0000))) << 8;
1347
1348 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1349 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1350
1351 /* read the 'raw' LIMIT registers */
1352 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
1353
1354 /* Read from the ECS data register for the HIGH portion */
1355 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
1356
1357 debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
1358 high_base, low_base, high_limit, low_limit);
1359
1360 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1361 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1362
1363 /*
1364 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1365 * memory location of the region, so low 24 bits need to be all ones.
1366 */
1367 low_limit |= 0x0000FFFF;
1368 pvt->dram_limit[dram] =
1369 ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
1370}
1371
1372static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1373{
1374 int err = 0;
1375
1376 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1377 &pvt->dram_ctl_select_low);
1378 if (err) {
1379 debugf0("Reading F10_DCTL_SEL_LOW failed\n");
1380 } else {
1381 debugf0("DRAM_DCTL_SEL_LOW=0x%x DctSelBaseAddr=0x%x\n",
1382 pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
1383
1384 debugf0(" DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
1385 "sel-hi-range=%s\n",
1386 (dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
1387 (dct_dram_enabled(pvt) ? "Enabled" : "Disabled"),
1388 (dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
1389
1390 debugf0(" DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
1391 (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
1392 (dct_memory_cleared(pvt) ? "True " : "False "),
1393 dct_sel_interleave_addr(pvt));
1394 }
1395
1396 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1397 &pvt->dram_ctl_select_high);
1398 if (err)
1399 debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
1400}
1401
1402/*
1403 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1404 * Interleaving Modes.
1405 */
1406static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1407 int hi_range_sel, u32 intlv_en)
1408{
1409 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1410
1411 if (dct_ganging_enabled(pvt))
1412 cs = 0;
1413 else if (hi_range_sel)
1414 cs = dct_sel_high;
1415 else if (dct_interleave_enabled(pvt)) {
1416 /*
1417 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1418 */
1419 if (dct_sel_interleave_addr(pvt) == 0)
1420 cs = sys_addr >> 6 & 1;
1421 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1422 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1423
1424 if (dct_sel_interleave_addr(pvt) & 1)
1425 cs = (sys_addr >> 9 & 1) ^ temp;
1426 else
1427 cs = (sys_addr >> 6 & 1) ^ temp;
1428 } else if (intlv_en & 4)
1429 cs = sys_addr >> 15 & 1;
1430 else if (intlv_en & 2)
1431 cs = sys_addr >> 14 & 1;
1432 else if (intlv_en & 1)
1433 cs = sys_addr >> 13 & 1;
1434 else
1435 cs = sys_addr >> 12 & 1;
1436 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1437 cs = ~dct_sel_high & 1;
1438 else
1439 cs = 0;
1440
1441 return cs;
1442}
1443
1444static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1445{
1446 if (intlv_en == 1)
1447 return 1;
1448 else if (intlv_en == 3)
1449 return 2;
1450 else if (intlv_en == 7)
1451 return 3;
1452
1453 return 0;
1454}
1455
1456/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1457static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1458 u32 dct_sel_base_addr,
1459 u64 dct_sel_base_off,
1460 u32 hole_valid, u32 hole_off,
1461 u64 dram_base)
1462{
1463 u64 chan_off;
1464
1465 if (hi_range_sel) {
1466 if (!(dct_sel_base_addr & 0xFFFFF800) &&
1467 hole_valid && (sys_addr >= 0x100000000ULL))
1468 chan_off = hole_off << 16;
1469 else
1470 chan_off = dct_sel_base_off;
1471 } else {
1472 if (hole_valid && (sys_addr >= 0x100000000ULL))
1473 chan_off = hole_off << 16;
1474 else
1475 chan_off = dram_base & 0xFFFFF8000000ULL;
1476 }
1477
1478 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1479 (chan_off & 0x0000FFFFFF800000ULL);
1480}
1481
1482/* Hack for the time being - Can we get this from BIOS?? */
1483#define CH0SPARE_RANK 0
1484#define CH1SPARE_RANK 1
1485
1486/*
1487 * checks if the csrow passed in is marked as SPARED, if so returns the new
1488 * spare row
1489 */
1490static inline int f10_process_possible_spare(int csrow,
1491 u32 cs, struct amd64_pvt *pvt)
1492{
1493 u32 swap_done;
1494 u32 bad_dram_cs;
1495
1496 /* Depending on channel, isolate respective SPARING info */
1497 if (cs) {
1498 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1499 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1500 if (swap_done && (csrow == bad_dram_cs))
1501 csrow = CH1SPARE_RANK;
1502 } else {
1503 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1504 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1505 if (swap_done && (csrow == bad_dram_cs))
1506 csrow = CH0SPARE_RANK;
1507 }
1508 return csrow;
1509}
1510
1511/*
1512 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1513 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1514 *
1515 * Return:
1516 * -EINVAL: NOT FOUND
1517 * 0..csrow = Chip-Select Row
1518 */
1519static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1520{
1521 struct mem_ctl_info *mci;
1522 struct amd64_pvt *pvt;
1523 u32 cs_base, cs_mask;
1524 int cs_found = -EINVAL;
1525 int csrow;
1526
1527 mci = mci_lookup[nid];
1528 if (!mci)
1529 return cs_found;
1530
1531 pvt = mci->pvt_info;
1532
1533 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1534
1535 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
1536
1537 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1538 if (!(cs_base & K8_DCSB_CS_ENABLE))
1539 continue;
1540
1541 /*
1542 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1543 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1544 * of the actual address.
1545 */
1546 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1547
1548 /*
1549 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1550 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1551 */
1552 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1553
1554 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1555 csrow, cs_base, cs_mask);
1556
1557 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1558
1559 debugf1(" Final CSMask=0x%x\n", cs_mask);
1560 debugf1(" (InputAddr & ~CSMask)=0x%x "
1561 "(CSBase & ~CSMask)=0x%x\n",
1562 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1563
1564 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1565 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1566
1567 debugf1(" MATCH csrow=%d\n", cs_found);
1568 break;
1569 }
1570 }
1571 return cs_found;
1572}
1573
1574/* For a given @dram_range, check if @sys_addr falls within it. */
1575static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1576 u64 sys_addr, int *nid, int *chan_sel)
1577{
1578 int node_id, cs_found = -EINVAL, high_range = 0;
1579 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1580 u32 hole_valid, tmp, dct_sel_base, channel;
1581 u64 dram_base, chan_addr, dct_sel_base_off;
1582
1583 dram_base = pvt->dram_base[dram_range];
1584 intlv_en = pvt->dram_IntlvEn[dram_range];
1585
1586 node_id = pvt->dram_DstNode[dram_range];
1587 intlv_sel = pvt->dram_IntlvSel[dram_range];
1588
1589 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1590 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1591
1592 /*
1593 * This assumes that one node's DHAR is the same as all the other
1594 * nodes' DHAR.
1595 */
1596 hole_off = (pvt->dhar & 0x0000FF80);
1597 hole_valid = (pvt->dhar & 0x1);
1598 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1599
1600 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1601 hole_off, hole_valid, intlv_sel);
1602
1603 if (intlv_en ||
1604 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1605 return -EINVAL;
1606
1607 dct_sel_base = dct_sel_baseaddr(pvt);
1608
1609 /*
1610 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1611 * select between DCT0 and DCT1.
1612 */
1613 if (dct_high_range_enabled(pvt) &&
1614 !dct_ganging_enabled(pvt) &&
1615 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1616 high_range = 1;
1617
1618 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1619
1620 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1621 dct_sel_base_off, hole_valid,
1622 hole_off, dram_base);
1623
1624 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1625
1626 /* remove Node ID (in case of memory interleaving) */
1627 tmp = chan_addr & 0xFC0;
1628
1629 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1630
1631 /* remove channel interleave and hash */
1632 if (dct_interleave_enabled(pvt) &&
1633 !dct_high_range_enabled(pvt) &&
1634 !dct_ganging_enabled(pvt)) {
1635 if (dct_sel_interleave_addr(pvt) != 1)
1636 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1637 else {
1638 tmp = chan_addr & 0xFC0;
1639 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1640 | tmp;
1641 }
1642 }
1643
1644 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1645 chan_addr, (u32)(chan_addr >> 8));
1646
1647 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1648
1649 if (cs_found >= 0) {
1650 *nid = node_id;
1651 *chan_sel = channel;
1652 }
1653 return cs_found;
1654}
1655
1656static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1657 int *node, int *chan_sel)
1658{
1659 int dram_range, cs_found = -EINVAL;
1660 u64 dram_base, dram_limit;
1661
1662 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1663
1664 if (!pvt->dram_rw_en[dram_range])
1665 continue;
1666
1667 dram_base = pvt->dram_base[dram_range];
1668 dram_limit = pvt->dram_limit[dram_range];
1669
1670 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1671
1672 cs_found = f10_match_to_this_node(pvt, dram_range,
1673 sys_addr, node,
1674 chan_sel);
1675 if (cs_found >= 0)
1676 break;
1677 }
1678 }
1679 return cs_found;
1680}
1681
1682/*
1683 * This the F10h reference code from AMD to map a @sys_addr to NodeID,
1684 * CSROW, Channel.
1685 *
1686 * The @sys_addr is usually an error address received from the hardware.
1687 */
1688static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1689 struct amd64_error_info_regs *info,
1690 u64 sys_addr)
1691{
1692 struct amd64_pvt *pvt = mci->pvt_info;
1693 u32 page, offset;
1694 unsigned short syndrome;
1695 int nid, csrow, chan = 0;
1696
1697 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1698
1699 if (csrow >= 0) {
1700 error_address_to_page_and_offset(sys_addr, &page, &offset);
1701
1702 syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
1703 syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
1704
1705 /*
1706 * Is CHIPKILL on? If so, then we can attempt to use the
1707 * syndrome to isolate which channel the error was on.
1708 */
1709 if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
1710 chan = get_channel_from_ecc_syndrome(syndrome);
1711
1712 if (chan >= 0) {
1713 edac_mc_handle_ce(mci, page, offset, syndrome,
1714 csrow, chan, EDAC_MOD_STR);
1715 } else {
1716 /*
1717 * Channel unknown, report all channels on this
1718 * CSROW as failed.
1719 */
1720 for (chan = 0; chan < mci->csrows[csrow].nr_channels;
1721 chan++) {
1722 edac_mc_handle_ce(mci, page, offset,
1723 syndrome,
1724 csrow, chan,
1725 EDAC_MOD_STR);
1726 }
1727 }
1728
1729 } else {
1730 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1731 }
1732}
1733
1734/*
1735 * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
1736 * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
1737 * indicates an empty DIMM slot, as reported by Hardware on empty slots.
1738 *
1739 * Normalize to 128MB by subracting 27 bit shift.
1740 */
1741static int map_dbam_to_csrow_size(int index)
1742{
1743 int mega_bytes = 0;
1744
1745 if (index > 0 && index <= DBAM_MAX_VALUE)
1746 mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
1747
1748 return mega_bytes;
1749}
1750
1751/*
1752 * debug routine to display the memory sizes of a DIMM (ganged or not) and it
1753 * CSROWs as well
1754 */
1755static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
1756 int ganged)
1757{
1758 int dimm, size0, size1;
1759 u32 dbam;
1760 u32 *dcsb;
1761
1762 debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl,
1763 ctrl ? pvt->dbam1 : pvt->dbam0,
1764 ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
1765
1766 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1767 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1768
1769 /* Dump memory sizes for DIMM and its CSROWs */
1770 for (dimm = 0; dimm < 4; dimm++) {
1771
1772 size0 = 0;
1773 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1774 size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1775
1776 size1 = 0;
1777 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1778 size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1779
1780 debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB "
1781 "CSROW-%d=%5dMB\n",
1782 ctrl,
1783 dimm,
1784 size0 + size1,
1785 dimm * 2,
1786 size0,
1787 dimm * 2 + 1,
1788 size1);
1789 }
1790}
1791
1792/*
1793 * Very early hardware probe on pci_probe thread to determine if this module
1794 * supports the hardware.
1795 *
1796 * Return:
1797 * 0 for OK
1798 * 1 for error
1799 */
1800static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
1801{
1802 int ret = 0;
1803
1804 /*
1805 * If we are on a DDR3 machine, we don't know yet if
1806 * we support that properly at this time
1807 */
1808 if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
1809 (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
1810
1811 amd64_printk(KERN_WARNING,
1812 "%s() This machine is running with DDR3 memory. "
1813 "This is not currently supported. "
1814 "DCHR0=0x%x DCHR1=0x%x\n",
1815 __func__, pvt->dchr0, pvt->dchr1);
1816
1817 amd64_printk(KERN_WARNING,
1818 " Contact '%s' module MAINTAINER to help add"
1819 " support.\n",
1820 EDAC_MOD_STR);
1821
1822 ret = 1;
1823
1824 }
1825 return ret;
1826}
1827
1828/*
1829 * There currently are 3 types type of MC devices for AMD Athlon/Opterons
1830 * (as per PCI DEVICE_IDs):
1831 *
1832 * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
1833 * DEVICE ID, even though there is differences between the different Revisions
1834 * (CG,D,E,F).
1835 *
1836 * Family F10h and F11h.
1837 *
1838 */
1839static struct amd64_family_type amd64_family_types[] = {
1840 [K8_CPUS] = {
1841 .ctl_name = "RevF",
1842 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1843 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1844 .ops = {
1845 .early_channel_count = k8_early_channel_count,
1846 .get_error_address = k8_get_error_address,
1847 .read_dram_base_limit = k8_read_dram_base_limit,
1848 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1849 .dbam_map_to_pages = k8_dbam_map_to_pages,
1850 }
1851 },
1852 [F10_CPUS] = {
1853 .ctl_name = "Family 10h",
1854 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1855 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1856 .ops = {
1857 .probe_valid_hardware = f10_probe_valid_hardware,
1858 .early_channel_count = f10_early_channel_count,
1859 .get_error_address = f10_get_error_address,
1860 .read_dram_base_limit = f10_read_dram_base_limit,
1861 .read_dram_ctl_register = f10_read_dram_ctl_register,
1862 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1863 .dbam_map_to_pages = f10_dbam_map_to_pages,
1864 }
1865 },
1866 [F11_CPUS] = {
1867 .ctl_name = "Family 11h",
1868 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1869 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1870 .ops = {
1871 .probe_valid_hardware = f10_probe_valid_hardware,
1872 .early_channel_count = f10_early_channel_count,
1873 .get_error_address = f10_get_error_address,
1874 .read_dram_base_limit = f10_read_dram_base_limit,
1875 .read_dram_ctl_register = f10_read_dram_ctl_register,
1876 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1877 .dbam_map_to_pages = f10_dbam_map_to_pages,
1878 }
1879 },
1880};
1881
1882static struct pci_dev *pci_get_related_function(unsigned int vendor,
1883 unsigned int device,
1884 struct pci_dev *related)
1885{
1886 struct pci_dev *dev = NULL;
1887
1888 dev = pci_get_device(vendor, device, dev);
1889 while (dev) {
1890 if ((dev->bus->number == related->bus->number) &&
1891 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1892 break;
1893 dev = pci_get_device(vendor, device, dev);
1894 }
1895
1896 return dev;
1897}
1898
1899/*
1900 * syndrome mapping table for ECC ChipKill devices
1901 *
1902 * The comment in each row is the token (nibble) number that is in error.
1903 * The least significant nibble of the syndrome is the mask for the bits
1904 * that are in error (need to be toggled) for the particular nibble.
1905 *
1906 * Each row contains 16 entries.
1907 * The first entry (0th) is the channel number for that row of syndromes.
1908 * The remaining 15 entries are the syndromes for the respective Error
1909 * bit mask index.
1910 *
1911 * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
1912 * bit in error.
1913 * The 2nd index entry is 0x0010 that the second bit is damaged.
1914 * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
1915 * are damaged.
1916 * Thus so on until index 15, 0x1111, whose entry has the syndrome
1917 * indicating that all 4 bits are damaged.
1918 *
1919 * A search is performed on this table looking for a given syndrome.
1920 *
1921 * See the AMD documentation for ECC syndromes. This ECC table is valid
1922 * across all the versions of the AMD64 processors.
1923 *
1924 * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
1925 * COLUMN index, then search all ROWS of that column, looking for a match
1926 * with the input syndrome. The ROW value will be the token number.
1927 *
1928 * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
1929 * error.
1930 */
1931#define NUMBER_ECC_ROWS 36
1932static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
1933 /* Channel 0 syndromes */
1934 {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
1935 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
1936 {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
1937 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
1938 {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
1939 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
1940 {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
1941 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
1942 {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
1943 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
1944 {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
1945 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
1946 {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
1947 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
1948 {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
1949 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
1950 {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
1951 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
1952 {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
1953 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
1954 {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
1955 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
1956 {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
1957 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
1958 {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
1959 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
1960 {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
1961 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
1962 {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
1963 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
1964 {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
1965 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
1966
1967 /* Channel 1 syndromes */
1968 {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
1969 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
1970 {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
1971 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
1972 {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
1973 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
1974 {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
1975 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
1976 {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
1977 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
1978 {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
1979 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
1980 {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
1981 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
1982 {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
1983 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
1984 {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
1985 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
1986 {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
1987 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
1988 {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
1989 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
1990 {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
1991 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
1992 {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
1993 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
1994 {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
1995 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
1996 {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
1997 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
1998 {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
1999 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
2000
2001 /* ECC bits are also in the set of tokens and they too can go bad
2002 * first 2 cover channel 0, while the second 2 cover channel 1
2003 */
2004 {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
2005 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
2006 {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
2007 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
2008 {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
2009 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
2010 {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
2011 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
2012};
2013
2014/*
2015 * Given the syndrome argument, scan each of the channel tables for a syndrome
2016 * match. Depending on which table it is found, return the channel number.
2017 */
2018static int get_channel_from_ecc_syndrome(unsigned short syndrome)
2019{
2020 int row;
2021 int column;
2022
2023 /* Determine column to scan */
2024 column = syndrome & 0xF;
2025
2026 /* Scan all rows, looking for syndrome, or end of table */
2027 for (row = 0; row < NUMBER_ECC_ROWS; row++) {
2028 if (ecc_chipkill_syndromes[row][column] == syndrome)
2029 return ecc_chipkill_syndromes[row][0];
2030 }
2031
2032 debugf0("syndrome(%x) not found\n", syndrome);
2033 return -1;
2034}
2035
2036/*
2037 * Check for valid error in the NB Status High register. If so, proceed to read
2038 * NB Status Low, NB Address Low and NB Address High registers and store data
2039 * into error structure.
2040 *
2041 * Returns:
2042 * - 1: if hardware regs contains valid error info
2043 * - 0: if no valid error is indicated
2044 */
2045static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
2046 struct amd64_error_info_regs *regs)
2047{
2048 struct amd64_pvt *pvt;
2049 struct pci_dev *misc_f3_ctl;
2050 int err = 0;
2051
2052 pvt = mci->pvt_info;
2053 misc_f3_ctl = pvt->misc_f3_ctl;
2054
2055 err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
2056 if (err)
2057 goto err_reg;
2058
2059 if (!(regs->nbsh & K8_NBSH_VALID_BIT))
2060 return 0;
2061
2062 /* valid error, read remaining error information registers */
2063 err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
2064 if (err)
2065 goto err_reg;
2066
2067 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
2068 if (err)
2069 goto err_reg;
2070
2071 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
2072 if (err)
2073 goto err_reg;
2074
2075 err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
2076 if (err)
2077 goto err_reg;
2078
2079 return 1;
2080
2081err_reg:
2082 debugf0("Reading error info register failed\n");
2083 return 0;
2084}
2085
2086/*
2087 * This function is called to retrieve the error data from hardware and store it
2088 * in the info structure.
2089 *
2090 * Returns:
2091 * - 1: if a valid error is found
2092 * - 0: if no error is found
2093 */
2094static int amd64_get_error_info(struct mem_ctl_info *mci,
2095 struct amd64_error_info_regs *info)
2096{
2097 struct amd64_pvt *pvt;
2098 struct amd64_error_info_regs regs;
2099
2100 pvt = mci->pvt_info;
2101
2102 if (!amd64_get_error_info_regs(mci, info))
2103 return 0;
2104
2105 /*
2106 * Here's the problem with the K8's EDAC reporting: There are four
2107 * registers which report pieces of error information. They are shared
2108 * between CEs and UEs. Furthermore, contrary to what is stated in the
2109 * BKDG, the overflow bit is never used! Every error always updates the
2110 * reporting registers.
2111 *
2112 * Can you see the race condition? All four error reporting registers
2113 * must be read before a new error updates them! There is no way to read
2114 * all four registers atomically. The best than can be done is to detect
2115 * that a race has occured and then report the error without any kind of
2116 * precision.
2117 *
2118 * What is still positive is that errors are still reported and thus
2119 * problems can still be detected - just not localized because the
2120 * syndrome and address are spread out across registers.
2121 *
2122 * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev.
2123 * UEs and CEs should have separate register sets with proper overflow
2124 * bits that are used! At very least the problem can be fixed by
2125 * honoring the ErrValid bit in 'nbsh' and not updating registers - just
2126 * set the overflow bit - unless the current error is CE and the new
2127 * error is UE which would be the only situation for overwriting the
2128 * current values.
2129 */
2130
2131 regs = *info;
2132
2133 /* Use info from the second read - most current */
2134 if (unlikely(!amd64_get_error_info_regs(mci, info)))
2135 return 0;
2136
2137 /* clear the error bits in hardware */
2138 pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
2139
2140 /* Check for the possible race condition */
2141 if ((regs.nbsh != info->nbsh) ||
2142 (regs.nbsl != info->nbsl) ||
2143 (regs.nbeah != info->nbeah) ||
2144 (regs.nbeal != info->nbeal)) {
2145 amd64_mc_printk(mci, KERN_WARNING,
2146 "hardware STATUS read access race condition "
2147 "detected!\n");
2148 return 0;
2149 }
2150 return 1;
2151}
2152
2153static inline void amd64_decode_gart_tlb_error(struct mem_ctl_info *mci,
2154 struct amd64_error_info_regs *info)
2155{
2156 u32 err_code;
2157 u32 ec_tt; /* error code transaction type (2b) */
2158 u32 ec_ll; /* error code cache level (2b) */
2159
2160 err_code = EXTRACT_ERROR_CODE(info->nbsl);
2161 ec_ll = EXTRACT_LL_CODE(err_code);
2162 ec_tt = EXTRACT_TT_CODE(err_code);
2163
2164 amd64_mc_printk(mci, KERN_ERR,
2165 "GART TLB event: transaction type(%s), "
2166 "cache level(%s)\n", tt_msgs[ec_tt], ll_msgs[ec_ll]);
2167}
2168
2169static inline void amd64_decode_mem_cache_error(struct mem_ctl_info *mci,
2170 struct amd64_error_info_regs *info)
2171{
2172 u32 err_code;
2173 u32 ec_rrrr; /* error code memory transaction (4b) */
2174 u32 ec_tt; /* error code transaction type (2b) */
2175 u32 ec_ll; /* error code cache level (2b) */
2176
2177 err_code = EXTRACT_ERROR_CODE(info->nbsl);
2178 ec_ll = EXTRACT_LL_CODE(err_code);
2179 ec_tt = EXTRACT_TT_CODE(err_code);
2180 ec_rrrr = EXTRACT_RRRR_CODE(err_code);
2181
2182 amd64_mc_printk(mci, KERN_ERR,
2183 "cache hierarchy error: memory transaction type(%s), "
2184 "transaction type(%s), cache level(%s)\n",
2185 rrrr_msgs[ec_rrrr], tt_msgs[ec_tt], ll_msgs[ec_ll]);
2186}
2187
2188
2189/*
2190 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
2191 * ADDRESS and process.
2192 */
2193static void amd64_handle_ce(struct mem_ctl_info *mci,
2194 struct amd64_error_info_regs *info)
2195{
2196 struct amd64_pvt *pvt = mci->pvt_info;
2197 u64 SystemAddress;
2198
2199 /* Ensure that the Error Address is VALID */
2200 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2201 amd64_mc_printk(mci, KERN_ERR,
2202 "HW has no ERROR_ADDRESS available\n");
2203 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
2204 return;
2205 }
2206
2207 SystemAddress = extract_error_address(mci, info);
2208
2209 amd64_mc_printk(mci, KERN_ERR,
2210 "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
2211
2212 pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
2213}
2214
2215/* Handle any Un-correctable Errors (UEs) */
2216static void amd64_handle_ue(struct mem_ctl_info *mci,
2217 struct amd64_error_info_regs *info)
2218{
2219 int csrow;
2220 u64 SystemAddress;
2221 u32 page, offset;
2222 struct mem_ctl_info *log_mci, *src_mci = NULL;
2223
2224 log_mci = mci;
2225
2226 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2227 amd64_mc_printk(mci, KERN_CRIT,
2228 "HW has no ERROR_ADDRESS available\n");
2229 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2230 return;
2231 }
2232
2233 SystemAddress = extract_error_address(mci, info);
2234
2235 /*
2236 * Find out which node the error address belongs to. This may be
2237 * different from the node that detected the error.
2238 */
2239 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
2240 if (!src_mci) {
2241 amd64_mc_printk(mci, KERN_CRIT,
2242 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
2243 (unsigned long)SystemAddress);
2244 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2245 return;
2246 }
2247
2248 log_mci = src_mci;
2249
2250 csrow = sys_addr_to_csrow(log_mci, SystemAddress);
2251 if (csrow < 0) {
2252 amd64_mc_printk(mci, KERN_CRIT,
2253 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
2254 (unsigned long)SystemAddress);
2255 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2256 } else {
2257 error_address_to_page_and_offset(SystemAddress, &page, &offset);
2258 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
2259 }
2260}
2261
2262static void amd64_decode_bus_error(struct mem_ctl_info *mci,
2263 struct amd64_error_info_regs *info)
2264{
2265 u32 err_code, ext_ec;
2266 u32 ec_pp; /* error code participating processor (2p) */
2267 u32 ec_to; /* error code timed out (1b) */
2268 u32 ec_rrrr; /* error code memory transaction (4b) */
2269 u32 ec_ii; /* error code memory or I/O (2b) */
2270 u32 ec_ll; /* error code cache level (2b) */
2271
2272 ext_ec = EXTRACT_EXT_ERROR_CODE(info->nbsl);
2273 err_code = EXTRACT_ERROR_CODE(info->nbsl);
2274
2275 ec_ll = EXTRACT_LL_CODE(err_code);
2276 ec_ii = EXTRACT_II_CODE(err_code);
2277 ec_rrrr = EXTRACT_RRRR_CODE(err_code);
2278 ec_to = EXTRACT_TO_CODE(err_code);
2279 ec_pp = EXTRACT_PP_CODE(err_code);
2280
2281 amd64_mc_printk(mci, KERN_ERR,
2282 "BUS ERROR:\n"
2283 " time-out(%s) mem or i/o(%s)\n"
2284 " participating processor(%s)\n"
2285 " memory transaction type(%s)\n"
2286 " cache level(%s) Error Found by: %s\n",
2287 to_msgs[ec_to],
2288 ii_msgs[ec_ii],
2289 pp_msgs[ec_pp],
2290 rrrr_msgs[ec_rrrr],
2291 ll_msgs[ec_ll],
2292 (info->nbsh & K8_NBSH_ERR_SCRUBER) ?
2293 "Scrubber" : "Normal Operation");
2294
2295 /* If this was an 'observed' error, early out */
2296 if (ec_pp == K8_NBSL_PP_OBS)
2297 return; /* We aren't the node involved */
2298
2299 /* Parse out the extended error code for ECC events */
2300 switch (ext_ec) {
2301 /* F10 changed to one Extended ECC error code */
2302 case F10_NBSL_EXT_ERR_RES: /* Reserved field */
2303 case F10_NBSL_EXT_ERR_ECC: /* F10 ECC ext err code */
2304 break;
2305
2306 default:
2307 amd64_mc_printk(mci, KERN_ERR, "NOT ECC: no special error "
2308 "handling for this error\n");
2309 return;
2310 }
2311
2312 if (info->nbsh & K8_NBSH_CECC)
2313 amd64_handle_ce(mci, info);
2314 else if (info->nbsh & K8_NBSH_UECC)
2315 amd64_handle_ue(mci, info);
2316
2317 /*
2318 * If main error is CE then overflow must be CE. If main error is UE
2319 * then overflow is unknown. We'll call the overflow a CE - if
2320 * panic_on_ue is set then we're already panic'ed and won't arrive
2321 * here. Else, then apparently someone doesn't think that UE's are
2322 * catastrophic.
2323 */
2324 if (info->nbsh & K8_NBSH_OVERFLOW)
2325 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR
2326 "Error Overflow set");
2327}
2328
2329int amd64_process_error_info(struct mem_ctl_info *mci,
2330 struct amd64_error_info_regs *info,
2331 int handle_errors)
2332{
2333 struct amd64_pvt *pvt;
2334 struct amd64_error_info_regs *regs;
2335 u32 err_code, ext_ec;
2336 int gart_tlb_error = 0;
2337
2338 pvt = mci->pvt_info;
2339
2340 /* If caller doesn't want us to process the error, return */
2341 if (!handle_errors)
2342 return 1;
2343
2344 regs = info;
2345
2346 debugf1("NorthBridge ERROR: mci(0x%p)\n", mci);
2347 debugf1(" MC node(%d) Error-Address(0x%.8x-%.8x)\n",
2348 pvt->mc_node_id, regs->nbeah, regs->nbeal);
2349 debugf1(" nbsh(0x%.8x) nbsl(0x%.8x)\n",
2350 regs->nbsh, regs->nbsl);
2351 debugf1(" Valid Error=%s Overflow=%s\n",
2352 (regs->nbsh & K8_NBSH_VALID_BIT) ? "True" : "False",
2353 (regs->nbsh & K8_NBSH_OVERFLOW) ? "True" : "False");
2354 debugf1(" Err Uncorrected=%s MCA Error Reporting=%s\n",
2355 (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) ?
2356 "True" : "False",
2357 (regs->nbsh & K8_NBSH_ERR_ENABLE) ?
2358 "True" : "False");
2359 debugf1(" MiscErr Valid=%s ErrAddr Valid=%s PCC=%s\n",
2360 (regs->nbsh & K8_NBSH_MISC_ERR_VALID) ?
2361 "True" : "False",
2362 (regs->nbsh & K8_NBSH_VALID_ERROR_ADDR) ?
2363 "True" : "False",
2364 (regs->nbsh & K8_NBSH_PCC) ?
2365 "True" : "False");
2366 debugf1(" CECC=%s UECC=%s Found by Scruber=%s\n",
2367 (regs->nbsh & K8_NBSH_CECC) ?
2368 "True" : "False",
2369 (regs->nbsh & K8_NBSH_UECC) ?
2370 "True" : "False",
2371 (regs->nbsh & K8_NBSH_ERR_SCRUBER) ?
2372 "True" : "False");
2373 debugf1(" CORE0=%s CORE1=%s CORE2=%s CORE3=%s\n",
2374 (regs->nbsh & K8_NBSH_CORE0) ? "True" : "False",
2375 (regs->nbsh & K8_NBSH_CORE1) ? "True" : "False",
2376 (regs->nbsh & K8_NBSH_CORE2) ? "True" : "False",
2377 (regs->nbsh & K8_NBSH_CORE3) ? "True" : "False");
2378
2379
2380 err_code = EXTRACT_ERROR_CODE(regs->nbsl);
2381
2382 /* Determine which error type:
2383 * 1) GART errors - non-fatal, developmental events
2384 * 2) MEMORY errors
2385 * 3) BUS errors
2386 * 4) Unknown error
2387 */
2388 if (TEST_TLB_ERROR(err_code)) {
2389 /*
2390 * GART errors are intended to help graphics driver developers
2391 * to detect bad GART PTEs. It is recommended by AMD to disable
2392 * GART table walk error reporting by default[1] (currently
2393 * being disabled in mce_cpu_quirks()) and according to the
2394 * comment in mce_cpu_quirks(), such GART errors can be
2395 * incorrectly triggered. We may see these errors anyway and
2396 * unless requested by the user, they won't be reported.
2397 *
2398 * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
2399 * AMD NPT family 0Fh processors
2400 */
2401 if (report_gart_errors == 0)
2402 return 1;
2403
2404 /*
2405 * Only if GART error reporting is requested should we generate
2406 * any logs.
2407 */
2408 gart_tlb_error = 1;
2409
2410 debugf1("GART TLB error\n");
2411 amd64_decode_gart_tlb_error(mci, info);
2412 } else if (TEST_MEM_ERROR(err_code)) {
2413 debugf1("Memory/Cache error\n");
2414 amd64_decode_mem_cache_error(mci, info);
2415 } else if (TEST_BUS_ERROR(err_code)) {
2416 debugf1("Bus (Link/DRAM) error\n");
2417 amd64_decode_bus_error(mci, info);
2418 } else {
2419 /* shouldn't reach here! */
2420 amd64_mc_printk(mci, KERN_WARNING,
2421 "%s(): unknown MCE error 0x%x\n", __func__,
2422 err_code);
2423 }
2424
2425 ext_ec = EXTRACT_EXT_ERROR_CODE(regs->nbsl);
2426 amd64_mc_printk(mci, KERN_ERR,
2427 "ExtErr=(0x%x) %s\n", ext_ec, ext_msgs[ext_ec]);
2428
2429 if (((ext_ec >= F10_NBSL_EXT_ERR_CRC &&
2430 ext_ec <= F10_NBSL_EXT_ERR_TGT) ||
2431 (ext_ec == F10_NBSL_EXT_ERR_RMW)) &&
2432 EXTRACT_LDT_LINK(info->nbsh)) {
2433
2434 amd64_mc_printk(mci, KERN_ERR,
2435 "Error on hypertransport link: %s\n",
2436 htlink_msgs[
2437 EXTRACT_LDT_LINK(info->nbsh)]);
2438 }
2439
2440 /*
2441 * Check the UE bit of the NB status high register, if set generate some
2442 * logs. If NOT a GART error, then process the event as a NO-INFO event.
2443 * If it was a GART error, skip that process.
2444 */
2445 if (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) {
2446 amd64_mc_printk(mci, KERN_CRIT, "uncorrected error\n");
2447 if (!gart_tlb_error)
2448 edac_mc_handle_ue_no_info(mci, "UE bit is set\n");
2449 }
2450
2451 if (regs->nbsh & K8_NBSH_PCC)
2452 amd64_mc_printk(mci, KERN_CRIT,
2453 "PCC (processor context corrupt) set\n");
2454
2455 return 1;
2456}
2457EXPORT_SYMBOL_GPL(amd64_process_error_info);
2458
2459/*
2460 * The main polling 'check' function, called FROM the edac core to perform the
2461 * error checking and if an error is encountered, error processing.
2462 */
2463static void amd64_check(struct mem_ctl_info *mci)
2464{
2465 struct amd64_error_info_regs info;
2466
2467 if (amd64_get_error_info(mci, &info))
2468 amd64_process_error_info(mci, &info, 1);
2469}
2470
2471/*
2472 * Input:
2473 * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2474 * 2) AMD Family index value
2475 *
2476 * Ouput:
2477 * Upon return of 0, the following filled in:
2478 *
2479 * struct pvt->addr_f1_ctl
2480 * struct pvt->misc_f3_ctl
2481 *
2482 * Filled in with related device funcitions of 'dram_f2_ctl'
2483 * These devices are "reserved" via the pci_get_device()
2484 *
2485 * Upon return of 1 (error status):
2486 *
2487 * Nothing reserved
2488 */
2489static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2490{
2491 const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
2492
2493 /* Reserve the ADDRESS MAP Device */
2494 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2495 amd64_dev->addr_f1_ctl,
2496 pvt->dram_f2_ctl);
2497
2498 if (!pvt->addr_f1_ctl) {
2499 amd64_printk(KERN_ERR, "error address map device not found: "
2500 "vendor %x device 0x%x (broken BIOS?)\n",
2501 PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
2502 return 1;
2503 }
2504
2505 /* Reserve the MISC Device */
2506 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2507 amd64_dev->misc_f3_ctl,
2508 pvt->dram_f2_ctl);
2509
2510 if (!pvt->misc_f3_ctl) {
2511 pci_dev_put(pvt->addr_f1_ctl);
2512 pvt->addr_f1_ctl = NULL;
2513
2514 amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2515 "vendor %x device 0x%x (broken BIOS?)\n",
2516 PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
2517 return 1;
2518 }
2519
2520 debugf1(" Addr Map device PCI Bus ID:\t%s\n",
2521 pci_name(pvt->addr_f1_ctl));
2522 debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
2523 pci_name(pvt->dram_f2_ctl));
2524 debugf1(" Misc device PCI Bus ID:\t%s\n",
2525 pci_name(pvt->misc_f3_ctl));
2526
2527 return 0;
2528}
2529
2530static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2531{
2532 pci_dev_put(pvt->addr_f1_ctl);
2533 pci_dev_put(pvt->misc_f3_ctl);
2534}
2535
2536/*
2537 * Retrieve the hardware registers of the memory controller (this includes the
2538 * 'Address Map' and 'Misc' device regs)
2539 */
2540static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2541{
2542 u64 msr_val;
2543 int dram, err = 0;
2544
2545 /*
2546 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2547 * those are Read-As-Zero
2548 */
2549 rdmsrl(MSR_K8_TOP_MEM1, msr_val);
2550 pvt->top_mem = msr_val >> 23;
2551 debugf0(" TOP_MEM=0x%08llx\n", pvt->top_mem);
2552
2553 /* check first whether TOP_MEM2 is enabled */
2554 rdmsrl(MSR_K8_SYSCFG, msr_val);
2555 if (msr_val & (1U << 21)) {
2556 rdmsrl(MSR_K8_TOP_MEM2, msr_val);
2557 pvt->top_mem2 = msr_val >> 23;
2558 debugf0(" TOP_MEM2=0x%08llx\n", pvt->top_mem2);
2559 } else
2560 debugf0(" TOP_MEM2 disabled.\n");
2561
2562 amd64_cpu_display_info(pvt);
2563
2564 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2565 if (err)
2566 goto err_reg;
2567
2568 if (pvt->ops->read_dram_ctl_register)
2569 pvt->ops->read_dram_ctl_register(pvt);
2570
2571 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2572 /*
2573 * Call CPU specific READ function to get the DRAM Base and
2574 * Limit values from the DCT.
2575 */
2576 pvt->ops->read_dram_base_limit(pvt, dram);
2577
2578 /*
2579 * Only print out debug info on rows with both R and W Enabled.
2580 * Normal processing, compiler should optimize this whole 'if'
2581 * debug output block away.
2582 */
2583 if (pvt->dram_rw_en[dram] != 0) {
2584 debugf1(" DRAM_BASE[%d]: 0x%8.08x-%8.08x "
2585 "DRAM_LIMIT: 0x%8.08x-%8.08x\n",
2586 dram,
2587 (u32)(pvt->dram_base[dram] >> 32),
2588 (u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
2589 (u32)(pvt->dram_limit[dram] >> 32),
2590 (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
2591 debugf1(" IntlvEn=%s %s %s "
2592 "IntlvSel=%d DstNode=%d\n",
2593 pvt->dram_IntlvEn[dram] ?
2594 "Enabled" : "Disabled",
2595 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2596 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2597 pvt->dram_IntlvSel[dram],
2598 pvt->dram_DstNode[dram]);
2599 }
2600 }
2601
2602 amd64_read_dct_base_mask(pvt);
2603
2604 err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2605 if (err)
2606 goto err_reg;
2607
2608 amd64_read_dbam_reg(pvt);
2609
2610 err = pci_read_config_dword(pvt->misc_f3_ctl,
2611 F10_ONLINE_SPARE, &pvt->online_spare);
2612 if (err)
2613 goto err_reg;
2614
2615 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2616 if (err)
2617 goto err_reg;
2618
2619 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2620 if (err)
2621 goto err_reg;
2622
2623 if (!dct_ganging_enabled(pvt)) {
2624 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
2625 &pvt->dclr1);
2626 if (err)
2627 goto err_reg;
2628
2629 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
2630 &pvt->dchr1);
2631 if (err)
2632 goto err_reg;
2633 }
2634
2635 amd64_dump_misc_regs(pvt);
2636
2637err_reg:
2638 debugf0("Reading an MC register failed\n");
2639
2640}
2641
2642/*
2643 * NOTE: CPU Revision Dependent code
2644 *
2645 * Input:
2646 * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
2647 * k8 private pointer to -->
2648 * DRAM Bank Address mapping register
2649 * node_id
2650 * DCL register where dual_channel_active is
2651 *
2652 * The DBAM register consists of 4 sets of 4 bits each definitions:
2653 *
2654 * Bits: CSROWs
2655 * 0-3 CSROWs 0 and 1
2656 * 4-7 CSROWs 2 and 3
2657 * 8-11 CSROWs 4 and 5
2658 * 12-15 CSROWs 6 and 7
2659 *
2660 * Values range from: 0 to 15
2661 * The meaning of the values depends on CPU revision and dual-channel state,
2662 * see relevant BKDG more info.
2663 *
2664 * The memory controller provides for total of only 8 CSROWs in its current
2665 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2666 * single channel or two (2) DIMMs in dual channel mode.
2667 *
2668 * The following code logic collapses the various tables for CSROW based on CPU
2669 * revision.
2670 *
2671 * Returns:
2672 * The number of PAGE_SIZE pages on the specified CSROW number it
2673 * encompasses
2674 *
2675 */
2676static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2677{
2678 u32 dram_map, nr_pages;
2679
2680 /*
2681 * The math on this doesn't look right on the surface because x/2*4 can
2682 * be simplified to x*2 but this expression makes use of the fact that
2683 * it is integral math where 1/2=0. This intermediate value becomes the
2684 * number of bits to shift the DBAM register to extract the proper CSROW
2685 * field.
2686 */
2687 dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2688
2689 nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
2690
2691 /*
2692 * If dual channel then double the memory size of single channel.
2693 * Channel count is 1 or 2
2694 */
2695 nr_pages <<= (pvt->channel_count - 1);
2696
2697 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
2698 debugf0(" nr_pages= %u channel-count = %d\n",
2699 nr_pages, pvt->channel_count);
2700
2701 return nr_pages;
2702}
2703
2704/*
2705 * Initialize the array of csrow attribute instances, based on the values
2706 * from pci config hardware registers.
2707 */
2708static int amd64_init_csrows(struct mem_ctl_info *mci)
2709{
2710 struct csrow_info *csrow;
2711 struct amd64_pvt *pvt;
2712 u64 input_addr_min, input_addr_max, sys_addr;
2713 int i, err = 0, empty = 1;
2714
2715 pvt = mci->pvt_info;
2716
2717 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2718 if (err)
2719 debugf0("Reading K8_NBCFG failed\n");
2720
2721 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2722 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2723 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2724 );
2725
2726 for (i = 0; i < CHIPSELECT_COUNT; i++) {
2727 csrow = &mci->csrows[i];
2728
2729 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2730 debugf1("----CSROW %d EMPTY for node %d\n", i,
2731 pvt->mc_node_id);
2732 continue;
2733 }
2734
2735 debugf1("----CSROW %d VALID for MC node %d\n",
2736 i, pvt->mc_node_id);
2737
2738 empty = 0;
2739 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2740 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2741 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2742 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2743 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2744 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2745 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2746 /* 8 bytes of resolution */
2747
2748 csrow->mtype = amd64_determine_memory_type(pvt);
2749
2750 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2751 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2752 (unsigned long)input_addr_min,
2753 (unsigned long)input_addr_max);
2754 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2755 (unsigned long)sys_addr, csrow->page_mask);
2756 debugf1(" nr_pages: %u first_page: 0x%lx "
2757 "last_page: 0x%lx\n",
2758 (unsigned)csrow->nr_pages,
2759 csrow->first_page, csrow->last_page);
2760
2761 /*
2762 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2763 */
2764 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2765 csrow->edac_mode =
2766 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2767 EDAC_S4ECD4ED : EDAC_SECDED;
2768 else
2769 csrow->edac_mode = EDAC_NONE;
2770 }
2771
2772 return empty;
2773}
2774
2775/*
2776 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2777 * enable it.
2778 */
2779static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2780{
2781 struct amd64_pvt *pvt = mci->pvt_info;
2782 const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
2783 int cpu, idx = 0, err = 0;
2784 struct msr msrs[cpumask_weight(cpumask)];
2785 u32 value;
2786 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2787
2788 if (!ecc_enable_override)
2789 return;
2790
2791 memset(msrs, 0, sizeof(msrs));
2792
2793 amd64_printk(KERN_WARNING,
2794 "'ecc_enable_override' parameter is active, "
2795 "Enabling AMD ECC hardware now: CAUTION\n");
2796
2797 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2798 if (err)
2799 debugf0("Reading K8_NBCTL failed\n");
2800
2801 /* turn on UECCn and CECCEn bits */
2802 pvt->old_nbctl = value & mask;
2803 pvt->nbctl_mcgctl_saved = 1;
2804
2805 value |= mask;
2806 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2807
2808 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2809
2810 for_each_cpu(cpu, cpumask) {
2811 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2812 set_bit(idx, &pvt->old_mcgctl);
2813
2814 msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2815 idx++;
2816 }
2817 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2818
2819 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2820 if (err)
2821 debugf0("Reading K8_NBCFG failed\n");
2822
2823 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2824 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2825 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2826
2827 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2828 amd64_printk(KERN_WARNING,
2829 "This node reports that DRAM ECC is "
2830 "currently Disabled; ENABLING now\n");
2831
2832 /* Attempt to turn on DRAM ECC Enable */
2833 value |= K8_NBCFG_ECC_ENABLE;
2834 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2835
2836 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2837 if (err)
2838 debugf0("Reading K8_NBCFG failed\n");
2839
2840 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2841 amd64_printk(KERN_WARNING,
2842 "Hardware rejects Enabling DRAM ECC checking\n"
2843 "Check memory DIMM configuration\n");
2844 } else {
2845 amd64_printk(KERN_DEBUG,
2846 "Hardware accepted DRAM ECC Enable\n");
2847 }
2848 }
2849 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2850 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2851 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2852
2853 pvt->ctl_error_info.nbcfg = value;
2854}
2855
2856static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2857{
2858 const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
2859 int cpu, idx = 0, err = 0;
2860 struct msr msrs[cpumask_weight(cpumask)];
2861 u32 value;
2862 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2863
2864 if (!pvt->nbctl_mcgctl_saved)
2865 return;
2866
2867 memset(msrs, 0, sizeof(msrs));
2868
2869 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2870 if (err)
2871 debugf0("Reading K8_NBCTL failed\n");
2872 value &= ~mask;
2873 value |= pvt->old_nbctl;
2874
2875 /* restore the NB Enable MCGCTL bit */
2876 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2877
2878 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2879
2880 for_each_cpu(cpu, cpumask) {
2881 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2882 msrs[idx].l |=
2883 test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
2884 idx++;
2885 }
2886
2887 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2888}
2889
2890static void check_mcg_ctl(void *ret)
2891{
2892 u64 msr_val = 0;
2893 u8 nbe;
2894
2895 rdmsrl(MSR_IA32_MCG_CTL, msr_val);
2896 nbe = msr_val & K8_MSR_MCGCTL_NBE;
2897
2898 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2899 raw_smp_processor_id(), msr_val,
2900 (nbe ? "enabled" : "disabled"));
2901
2902 if (!nbe)
2903 *(int *)ret = 0;
2904}
2905
2906/* check MCG_CTL on all the cpus on this node */
2907static int amd64_mcg_ctl_enabled_on_cpus(const cpumask_t *mask)
2908{
2909 int ret = 1;
2910 preempt_disable();
2911 smp_call_function_many(mask, check_mcg_ctl, &ret, 1);
2912 preempt_enable();
2913
2914 return ret;
2915}
2916
2917/*
2918 * EDAC requires that the BIOS have ECC enabled before taking over the
2919 * processing of ECC errors. This is because the BIOS can properly initialize
2920 * the memory system completely. A command line option allows to force-enable
2921 * hardware ECC later in amd64_enable_ecc_error_reporting().
2922 */
2923static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2924{
2925 u32 value;
2926 int err = 0, ret = 0;
2927 u8 ecc_enabled = 0;
2928
2929 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2930 if (err)
2931 debugf0("Reading K8_NBCTL failed\n");
2932
2933 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2934
2935 ret = amd64_mcg_ctl_enabled_on_cpus(cpumask_of_node(pvt->mc_node_id));
2936
2937 debugf0("K8_NBCFG=0x%x, DRAM ECC is %s\n", value,
2938 (value & K8_NBCFG_ECC_ENABLE ? "enabled" : "disabled"));
2939
2940 if (!ecc_enabled || !ret) {
2941 if (!ecc_enabled) {
2942 amd64_printk(KERN_WARNING, "This node reports that "
2943 "Memory ECC is currently "
2944 "disabled.\n");
2945
2946 amd64_printk(KERN_WARNING, "bit 0x%lx in register "
2947 "F3x%x of the MISC_CONTROL device (%s) "
2948 "should be enabled\n", K8_NBCFG_ECC_ENABLE,
2949 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2950 }
2951 if (!ret) {
2952 amd64_printk(KERN_WARNING, "bit 0x%016lx in MSR 0x%08x "
2953 "of node %d should be enabled\n",
2954 K8_MSR_MCGCTL_NBE, MSR_IA32_MCG_CTL,
2955 pvt->mc_node_id);
2956 }
2957 if (!ecc_enable_override) {
2958 amd64_printk(KERN_WARNING, "WARNING: ECC is NOT "
2959 "currently enabled by the BIOS. Module "
2960 "will NOT be loaded.\n"
2961 " Either Enable ECC in the BIOS, "
2962 "or use the 'ecc_enable_override' "
2963 "parameter.\n"
2964 " Might be a BIOS bug, if BIOS says "
2965 "ECC is enabled\n"
2966 " Use of the override can cause "
2967 "unknown side effects.\n");
2968 ret = -ENODEV;
2969 }
2970 } else {
2971 amd64_printk(KERN_INFO,
2972 "ECC is enabled by BIOS, Proceeding "
2973 "with EDAC module initialization\n");
2974
2975 /* CLEAR the override, since BIOS controlled it */
2976 ecc_enable_override = 0;
2977 }
2978
2979 return ret;
2980}
2981
2982struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2983 ARRAY_SIZE(amd64_inj_attrs) +
2984 1];
2985
2986struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2987
2988static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2989{
2990 unsigned int i = 0, j = 0;
2991
2992 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2993 sysfs_attrs[i] = amd64_dbg_attrs[i];
2994
2995 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2996 sysfs_attrs[i] = amd64_inj_attrs[j];
2997
2998 sysfs_attrs[i] = terminator;
2999
3000 mci->mc_driver_sysfs_attributes = sysfs_attrs;
3001}
3002
3003static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
3004{
3005 struct amd64_pvt *pvt = mci->pvt_info;
3006
3007 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3008 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3009 mci->edac_cap = EDAC_FLAG_NONE;
3010
3011 if (pvt->nbcap & K8_NBCAP_SECDED)
3012 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3013
3014 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
3015 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3016
3017 mci->edac_cap = amd64_determine_edac_cap(pvt);
3018 mci->mod_name = EDAC_MOD_STR;
3019 mci->mod_ver = EDAC_AMD64_VERSION;
3020 mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
3021 mci->dev_name = pci_name(pvt->dram_f2_ctl);
3022 mci->ctl_page_to_phys = NULL;
3023
3024 /* IMPORTANT: Set the polling 'check' function in this module */
3025 mci->edac_check = amd64_check;
3026
3027 /* memory scrubber interface */
3028 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
3029 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
3030}
3031
3032/*
3033 * Init stuff for this DRAM Controller device.
3034 *
3035 * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
3036 * Space feature MUST be enabled on ALL Processors prior to actually reading
3037 * from the ECS registers. Since the loading of the module can occur on any
3038 * 'core', and cores don't 'see' all the other processors ECS data when the
3039 * others are NOT enabled. Our solution is to first enable ECS access in this
3040 * routine on all processors, gather some data in a amd64_pvt structure and
3041 * later come back in a finish-setup function to perform that final
3042 * initialization. See also amd64_init_2nd_stage() for that.
3043 */
3044static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
3045 int mc_type_index)
3046{
3047 struct amd64_pvt *pvt = NULL;
3048 int err = 0, ret;
3049
3050 ret = -ENOMEM;
3051 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3052 if (!pvt)
3053 goto err_exit;
3054
3055 pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
3056
3057 pvt->dram_f2_ctl = dram_f2_ctl;
3058 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3059 pvt->mc_type_index = mc_type_index;
3060 pvt->ops = family_ops(mc_type_index);
3061 pvt->old_mcgctl = 0;
3062
3063 /*
3064 * We have the dram_f2_ctl device as an argument, now go reserve its
3065 * sibling devices from the PCI system.
3066 */
3067 ret = -ENODEV;
3068 err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
3069 if (err)
3070 goto err_free;
3071
3072 ret = -EINVAL;
3073 err = amd64_check_ecc_enabled(pvt);
3074 if (err)
3075 goto err_put;
3076
3077 /*
3078 * Key operation here: setup of HW prior to performing ops on it. Some
3079 * setup is required to access ECS data. After this is performed, the
3080 * 'teardown' function must be called upon error and normal exit paths.
3081 */
3082 if (boot_cpu_data.x86 >= 0x10)
3083 amd64_setup(pvt);
3084
3085 /*
3086 * Save the pointer to the private data for use in 2nd initialization
3087 * stage
3088 */
3089 pvt_lookup[pvt->mc_node_id] = pvt;
3090
3091 return 0;
3092
3093err_put:
3094 amd64_free_mc_sibling_devices(pvt);
3095
3096err_free:
3097 kfree(pvt);
3098
3099err_exit:
3100 return ret;
3101}
3102
3103/*
3104 * This is the finishing stage of the init code. Needs to be performed after all
3105 * MCs' hardware have been prepped for accessing extended config space.
3106 */
3107static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
3108{
3109 int node_id = pvt->mc_node_id;
3110 struct mem_ctl_info *mci;
3111 int ret, err = 0;
3112
3113 amd64_read_mc_registers(pvt);
3114
3115 ret = -ENODEV;
3116 if (pvt->ops->probe_valid_hardware) {
3117 err = pvt->ops->probe_valid_hardware(pvt);
3118 if (err)
3119 goto err_exit;
3120 }
3121
3122 /*
3123 * We need to determine how many memory channels there are. Then use
3124 * that information for calculating the size of the dynamic instance
3125 * tables in the 'mci' structure
3126 */
3127 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3128 if (pvt->channel_count < 0)
3129 goto err_exit;
3130
3131 ret = -ENOMEM;
3132 mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
3133 if (!mci)
3134 goto err_exit;
3135
3136 mci->pvt_info = pvt;
3137
3138 mci->dev = &pvt->dram_f2_ctl->dev;
3139 amd64_setup_mci_misc_attributes(mci);
3140
3141 if (amd64_init_csrows(mci))
3142 mci->edac_cap = EDAC_FLAG_NONE;
3143
3144 amd64_enable_ecc_error_reporting(mci);
3145 amd64_set_mc_sysfs_attributes(mci);
3146
3147 ret = -ENODEV;
3148 if (edac_mc_add_mc(mci)) {
3149 debugf1("failed edac_mc_add_mc()\n");
3150 goto err_add_mc;
3151 }
3152
3153 mci_lookup[node_id] = mci;
3154 pvt_lookup[node_id] = NULL;
3155 return 0;
3156
3157err_add_mc:
3158 edac_mc_free(mci);
3159
3160err_exit:
3161 debugf0("failure to init 2nd stage: ret=%d\n", ret);
3162
3163 amd64_restore_ecc_error_reporting(pvt);
3164
3165 if (boot_cpu_data.x86 > 0xf)
3166 amd64_teardown(pvt);
3167
3168 amd64_free_mc_sibling_devices(pvt);
3169
3170 kfree(pvt_lookup[pvt->mc_node_id]);
3171 pvt_lookup[node_id] = NULL;
3172
3173 return ret;
3174}
3175
3176
3177static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
3178 const struct pci_device_id *mc_type)
3179{
3180 int ret = 0;
3181
3182 debugf0("(MC node=%d,mc_type='%s')\n",
3183 get_mc_node_id_from_pdev(pdev),
3184 get_amd_family_name(mc_type->driver_data));
3185
3186 ret = pci_enable_device(pdev);
3187 if (ret < 0)
3188 ret = -EIO;
3189 else
3190 ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
3191
3192 if (ret < 0)
3193 debugf0("ret=%d\n", ret);
3194
3195 return ret;
3196}
3197
3198static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
3199{
3200 struct mem_ctl_info *mci;
3201 struct amd64_pvt *pvt;
3202
3203 /* Remove from EDAC CORE tracking list */
3204 mci = edac_mc_del_mc(&pdev->dev);
3205 if (!mci)
3206 return;
3207
3208 pvt = mci->pvt_info;
3209
3210 amd64_restore_ecc_error_reporting(pvt);
3211
3212 if (boot_cpu_data.x86 > 0xf)
3213 amd64_teardown(pvt);
3214
3215 amd64_free_mc_sibling_devices(pvt);
3216
3217 kfree(pvt);
3218 mci->pvt_info = NULL;
3219
3220 mci_lookup[pvt->mc_node_id] = NULL;
3221
3222 /* Free the EDAC CORE resources */
3223 edac_mc_free(mci);
3224}
3225
3226/*
3227 * This table is part of the interface for loading drivers for PCI devices. The
3228 * PCI core identifies what devices are on a system during boot, and then
3229 * inquiry this table to see if this driver is for a given device found.
3230 */
3231static const struct pci_device_id amd64_pci_table[] __devinitdata = {
3232 {
3233 .vendor = PCI_VENDOR_ID_AMD,
3234 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
3235 .subvendor = PCI_ANY_ID,
3236 .subdevice = PCI_ANY_ID,
3237 .class = 0,
3238 .class_mask = 0,
3239 .driver_data = K8_CPUS
3240 },
3241 {
3242 .vendor = PCI_VENDOR_ID_AMD,
3243 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
3244 .subvendor = PCI_ANY_ID,
3245 .subdevice = PCI_ANY_ID,
3246 .class = 0,
3247 .class_mask = 0,
3248 .driver_data = F10_CPUS
3249 },
3250 {
3251 .vendor = PCI_VENDOR_ID_AMD,
3252 .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
3253 .subvendor = PCI_ANY_ID,
3254 .subdevice = PCI_ANY_ID,
3255 .class = 0,
3256 .class_mask = 0,
3257 .driver_data = F11_CPUS
3258 },
3259 {0, }
3260};
3261MODULE_DEVICE_TABLE(pci, amd64_pci_table);
3262
3263static struct pci_driver amd64_pci_driver = {
3264 .name = EDAC_MOD_STR,
3265 .probe = amd64_init_one_instance,
3266 .remove = __devexit_p(amd64_remove_one_instance),
3267 .id_table = amd64_pci_table,
3268};
3269
3270static void amd64_setup_pci_device(void)
3271{
3272 struct mem_ctl_info *mci;
3273 struct amd64_pvt *pvt;
3274
3275 if (amd64_ctl_pci)
3276 return;
3277
3278 mci = mci_lookup[0];
3279 if (mci) {
3280
3281 pvt = mci->pvt_info;
3282 amd64_ctl_pci =
3283 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
3284 EDAC_MOD_STR);
3285
3286 if (!amd64_ctl_pci) {
3287 pr_warning("%s(): Unable to create PCI control\n",
3288 __func__);
3289
3290 pr_warning("%s(): PCI error report via EDAC not set\n",
3291 __func__);
3292 }
3293 }
3294}
3295
3296static int __init amd64_edac_init(void)
3297{
3298 int nb, err = -ENODEV;
3299
3300 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
3301
3302 opstate_init();
3303
3304 if (cache_k8_northbridges() < 0)
3305 goto err_exit;
3306
3307 err = pci_register_driver(&amd64_pci_driver);
3308 if (err)
3309 return err;
3310
3311 /*
3312 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
3313 * amd64_pvt structs. These will be used in the 2nd stage init function
3314 * to finish initialization of the MC instances.
3315 */
3316 for (nb = 0; nb < num_k8_northbridges; nb++) {
3317 if (!pvt_lookup[nb])
3318 continue;
3319
3320 err = amd64_init_2nd_stage(pvt_lookup[nb]);
3321 if (err)
3322 goto err_exit;
3323 }
3324
3325 amd64_setup_pci_device();
3326
3327 return 0;
3328
3329err_exit:
3330 debugf0("'finish_setup' stage failed\n");
3331 pci_unregister_driver(&amd64_pci_driver);
3332
3333 return err;
3334}
3335
3336static void __exit amd64_edac_exit(void)
3337{
3338 if (amd64_ctl_pci)
3339 edac_pci_release_generic_ctl(amd64_ctl_pci);
3340
3341 pci_unregister_driver(&amd64_pci_driver);
3342}
3343
3344module_init(amd64_edac_init);
3345module_exit(amd64_edac_exit);
3346
3347MODULE_LICENSE("GPL");
3348MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3349 "Dave Peterson, Thayne Harbaugh");
3350MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3351 EDAC_AMD64_VERSION);
3352
3353module_param(edac_op_state, int, 0444);
3354MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
new file mode 100644
index 000000000000..a159957e167b
--- /dev/null
+++ b/drivers/edac/amd64_edac.h
@@ -0,0 +1,644 @@
1/*
2 * AMD64 class Memory Controller kernel module
3 *
4 * Copyright (c) 2009 SoftwareBitMaker.
5 * Copyright (c) 2009 Advanced Micro Devices, Inc.
6 *
7 * This file may be distributed under the terms of the
8 * GNU General Public License.
9 *
10 * Originally Written by Thayne Harbaugh
11 *
12 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
13 * - K8 CPU Revision D and greater support
14 *
15 * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
16 * - Module largely rewritten, with new (and hopefully correct)
17 * code for dealing with node and chip select interleaving,
18 * various code cleanup, and bug fixes
19 * - Added support for memory hoisting using DRAM hole address
20 * register
21 *
22 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
23 * -K8 Rev (1207) revision support added, required Revision
24 * specific mini-driver code to support Rev F as well as
25 * prior revisions
26 *
27 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
28 * -Family 10h revision support added. New PCI Device IDs,
29 * indicating new changes. Actual registers modified
30 * were slight, less than the Rev E to Rev F transition
31 * but changing the PCI Device ID was the proper thing to
32 * do, as it provides for almost automactic family
33 * detection. The mods to Rev F required more family
34 * information detection.
35 *
36 * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
37 * - misc fixes and code cleanups
38 *
39 * This module is based on the following documents
40 * (available from http://www.amd.com/):
41 *
42 * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
43 * Opteron Processors
44 * AMD publication #: 26094
45 *` Revision: 3.26
46 *
47 * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
48 * Processors
49 * AMD publication #: 32559
50 * Revision: 3.00
51 * Issue Date: May 2006
52 *
53 * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
54 * Processors
55 * AMD publication #: 31116
56 * Revision: 3.00
57 * Issue Date: September 07, 2007
58 *
59 * Sections in the first 2 documents are no longer in sync with each other.
60 * The Family 10h BKDG was totally re-written from scratch with a new
61 * presentation model.
62 * Therefore, comments that refer to a Document section might be off.
63 */
64
65#include <linux/module.h>
66#include <linux/ctype.h>
67#include <linux/init.h>
68#include <linux/pci.h>
69#include <linux/pci_ids.h>
70#include <linux/slab.h>
71#include <linux/mmzone.h>
72#include <linux/edac.h>
73#include <asm/msr.h>
74#include "edac_core.h"
75
76#define amd64_printk(level, fmt, arg...) \
77 edac_printk(level, "amd64", fmt, ##arg)
78
79#define amd64_mc_printk(mci, level, fmt, arg...) \
80 edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
81
82/*
83 * Throughout the comments in this code, the following terms are used:
84 *
85 * SysAddr, DramAddr, and InputAddr
86 *
87 * These terms come directly from the amd64 documentation
88 * (AMD publication #26094). They are defined as follows:
89 *
90 * SysAddr:
91 * This is a physical address generated by a CPU core or a device
92 * doing DMA. If generated by a CPU core, a SysAddr is the result of
93 * a virtual to physical address translation by the CPU core's address
94 * translation mechanism (MMU).
95 *
96 * DramAddr:
97 * A DramAddr is derived from a SysAddr by subtracting an offset that
98 * depends on which node the SysAddr maps to and whether the SysAddr
99 * is within a range affected by memory hoisting. The DRAM Base
100 * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
101 * determine which node a SysAddr maps to.
102 *
103 * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
104 * is within the range of addresses specified by this register, then
105 * a value x from the DHAR is subtracted from the SysAddr to produce a
106 * DramAddr. Here, x represents the base address for the node that
107 * the SysAddr maps to plus an offset due to memory hoisting. See
108 * section 3.4.8 and the comments in amd64_get_dram_hole_info() and
109 * sys_addr_to_dram_addr() below for more information.
110 *
111 * If the SysAddr is not affected by the DHAR then a value y is
112 * subtracted from the SysAddr to produce a DramAddr. Here, y is the
113 * base address for the node that the SysAddr maps to. See section
114 * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
115 * information.
116 *
117 * InputAddr:
118 * A DramAddr is translated to an InputAddr before being passed to the
119 * memory controller for the node that the DramAddr is associated
120 * with. The memory controller then maps the InputAddr to a csrow.
121 * If node interleaving is not in use, then the InputAddr has the same
122 * value as the DramAddr. Otherwise, the InputAddr is produced by
123 * discarding the bits used for node interleaving from the DramAddr.
124 * See section 3.4.4 for more information.
125 *
126 * The memory controller for a given node uses its DRAM CS Base and
127 * DRAM CS Mask registers to map an InputAddr to a csrow. See
128 * sections 3.5.4 and 3.5.5 for more information.
129 */
130
131#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
132#define EDAC_MOD_STR "amd64_edac"
133
134/* Extended Model from CPUID, for CPU Revision numbers */
135#define OPTERON_CPU_LE_REV_C 0
136#define OPTERON_CPU_REV_D 1
137#define OPTERON_CPU_REV_E 2
138
139/* NPT processors have the following Extended Models */
140#define OPTERON_CPU_REV_F 4
141#define OPTERON_CPU_REV_FA 5
142
143/* Hardware limit on ChipSelect rows per MC and processors per system */
144#define CHIPSELECT_COUNT 8
145#define DRAM_REG_COUNT 8
146
147
148/*
149 * PCI-defined configuration space registers
150 */
151
152
153/*
154 * Function 1 - Address Map
155 */
156#define K8_DRAM_BASE_LOW 0x40
157#define K8_DRAM_LIMIT_LOW 0x44
158#define K8_DHAR 0xf0
159
160#define DHAR_VALID BIT(0)
161#define F10_DRAM_MEM_HOIST_VALID BIT(1)
162
163#define DHAR_BASE_MASK 0xff000000
164#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
165
166#define K8_DHAR_OFFSET_MASK 0x0000ff00
167#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
168
169#define F10_DHAR_OFFSET_MASK 0x0000ff80
170 /* NOTE: Extra mask bit vs K8 */
171#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
172
173
174/* F10 High BASE/LIMIT registers */
175#define F10_DRAM_BASE_HIGH 0x140
176#define F10_DRAM_LIMIT_HIGH 0x144
177
178
179/*
180 * Function 2 - DRAM controller
181 */
182#define K8_DCSB0 0x40
183#define F10_DCSB1 0x140
184
185#define K8_DCSB_CS_ENABLE BIT(0)
186#define K8_DCSB_NPT_SPARE BIT(1)
187#define K8_DCSB_NPT_TESTFAIL BIT(2)
188
189/*
190 * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
191 * the address
192 */
193#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
194#define REV_E_DCS_SHIFT 4
195#define REV_E_DCSM_COUNT 8
196
197#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
198#define REV_F_F1Xh_DCS_SHIFT 8
199
200/*
201 * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
202 * to form the address
203 */
204#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
205#define REV_F_DCS_SHIFT 8
206#define REV_F_DCSM_COUNT 4
207#define F10_DCSM_COUNT 4
208#define F11_DCSM_COUNT 2
209
210/* DRAM CS Mask Registers */
211#define K8_DCSM0 0x60
212#define F10_DCSM1 0x160
213
214/* REV E: select [29:21] and [15:9] from DCSM */
215#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
216
217/* unused bits [24:20] and [12:0] */
218#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
219
220/* REV F and later: select [28:19] and [13:5] from DCSM */
221#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
222
223/* unused bits [26:22] and [12:0] */
224#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
225
226#define DBAM0 0x80
227#define DBAM1 0x180
228
229/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
230#define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
231
232#define DBAM_MAX_VALUE 11
233
234
235#define F10_DCLR_0 0x90
236#define F10_DCLR_1 0x190
237#define REVE_WIDTH_128 BIT(16)
238#define F10_WIDTH_128 BIT(11)
239
240
241#define F10_DCHR_0 0x94
242#define F10_DCHR_1 0x194
243
244#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
245#define F10_DCHR_Ddr3Mode BIT(8)
246#define F10_DCHR_MblMode BIT(6)
247
248
249#define F10_DCTL_SEL_LOW 0x110
250
251#define dct_sel_baseaddr(pvt) \
252 ((pvt->dram_ctl_select_low) & 0xFFFFF800)
253
254#define dct_sel_interleave_addr(pvt) \
255 (((pvt->dram_ctl_select_low) >> 6) & 0x3)
256
257enum {
258 F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0),
259 F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2),
260 F10_DCTL_SEL_LOW_DctGangEn = BIT(4),
261 F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5),
262 F10_DCTL_SEL_LOW_DramEnable = BIT(8),
263 F10_DCTL_SEL_LOW_MemCleared = BIT(10),
264};
265
266#define dct_high_range_enabled(pvt) \
267 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
268
269#define dct_interleave_enabled(pvt) \
270 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
271
272#define dct_ganging_enabled(pvt) \
273 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
274
275#define dct_data_intlv_enabled(pvt) \
276 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
277
278#define dct_dram_enabled(pvt) \
279 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
280
281#define dct_memory_cleared(pvt) \
282 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
283
284
285#define F10_DCTL_SEL_HIGH 0x114
286
287
288/*
289 * Function 3 - Misc Control
290 */
291#define K8_NBCTL 0x40
292
293/* Correctable ECC error reporting enable */
294#define K8_NBCTL_CECCEn BIT(0)
295
296/* UnCorrectable ECC error reporting enable */
297#define K8_NBCTL_UECCEn BIT(1)
298
299#define K8_NBCFG 0x44
300#define K8_NBCFG_CHIPKILL BIT(23)
301#define K8_NBCFG_ECC_ENABLE BIT(22)
302
303#define K8_NBSL 0x48
304
305
306#define EXTRACT_HIGH_SYNDROME(x) (((x) >> 24) & 0xff)
307#define EXTRACT_EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f)
308
309/* Family F10h: Normalized Extended Error Codes */
310#define F10_NBSL_EXT_ERR_RES 0x0
311#define F10_NBSL_EXT_ERR_CRC 0x1
312#define F10_NBSL_EXT_ERR_SYNC 0x2
313#define F10_NBSL_EXT_ERR_MST 0x3
314#define F10_NBSL_EXT_ERR_TGT 0x4
315#define F10_NBSL_EXT_ERR_GART 0x5
316#define F10_NBSL_EXT_ERR_RMW 0x6
317#define F10_NBSL_EXT_ERR_WDT 0x7
318#define F10_NBSL_EXT_ERR_ECC 0x8
319#define F10_NBSL_EXT_ERR_DEV 0x9
320#define F10_NBSL_EXT_ERR_LINK_DATA 0xA
321
322/* Next two are overloaded values */
323#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
324#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
325
326#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
327#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
328#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
329
330/* Next two are overloaded values */
331#define F10_NBSL_EXT_ERR_GART_WALK 0xF
332#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
333
334/* 0x10 to 0x1B: Reserved */
335#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
336#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
337#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
338
339/* K8: Normalized Extended Error Codes */
340#define K8_NBSL_EXT_ERR_ECC 0x0
341#define K8_NBSL_EXT_ERR_CRC 0x1
342#define K8_NBSL_EXT_ERR_SYNC 0x2
343#define K8_NBSL_EXT_ERR_MST 0x3
344#define K8_NBSL_EXT_ERR_TGT 0x4
345#define K8_NBSL_EXT_ERR_GART 0x5
346#define K8_NBSL_EXT_ERR_RMW 0x6
347#define K8_NBSL_EXT_ERR_WDT 0x7
348#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
349#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
350
351#define EXTRACT_ERROR_CODE(x) ((x) & 0xffff)
352#define TEST_TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
353#define TEST_MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
354#define TEST_BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
355#define EXTRACT_TT_CODE(x) (((x) >> 2) & 0x3)
356#define EXTRACT_II_CODE(x) (((x) >> 2) & 0x3)
357#define EXTRACT_LL_CODE(x) (((x) >> 0) & 0x3)
358#define EXTRACT_RRRR_CODE(x) (((x) >> 4) & 0xf)
359#define EXTRACT_TO_CODE(x) (((x) >> 8) & 0x1)
360#define EXTRACT_PP_CODE(x) (((x) >> 9) & 0x3)
361
362/*
363 * The following are for BUS type errors AFTER values have been normalized by
364 * shifting right
365 */
366#define K8_NBSL_PP_SRC 0x0
367#define K8_NBSL_PP_RES 0x1
368#define K8_NBSL_PP_OBS 0x2
369#define K8_NBSL_PP_GENERIC 0x3
370
371
372#define K8_NBSH 0x4C
373
374#define K8_NBSH_VALID_BIT BIT(31)
375#define K8_NBSH_OVERFLOW BIT(30)
376#define K8_NBSH_UNCORRECTED_ERR BIT(29)
377#define K8_NBSH_ERR_ENABLE BIT(28)
378#define K8_NBSH_MISC_ERR_VALID BIT(27)
379#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
380#define K8_NBSH_PCC BIT(25)
381#define K8_NBSH_CECC BIT(14)
382#define K8_NBSH_UECC BIT(13)
383#define K8_NBSH_ERR_SCRUBER BIT(8)
384#define K8_NBSH_CORE3 BIT(3)
385#define K8_NBSH_CORE2 BIT(2)
386#define K8_NBSH_CORE1 BIT(1)
387#define K8_NBSH_CORE0 BIT(0)
388
389#define EXTRACT_LDT_LINK(x) (((x) >> 4) & 0x7)
390#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
391#define EXTRACT_LOW_SYNDROME(x) (((x) >> 15) & 0xff)
392
393
394#define K8_NBEAL 0x50
395#define K8_NBEAH 0x54
396#define K8_SCRCTRL 0x58
397
398#define F10_NB_CFG_LOW 0x88
399#define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
400
401#define F10_NB_CFG_HIGH 0x8C
402
403#define F10_ONLINE_SPARE 0xB0
404#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
405#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
406#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
407#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
408
409#define F10_NB_ARRAY_ADDR 0xB8
410
411#define F10_NB_ARRAY_DRAM_ECC 0x80000000
412
413/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
414#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
415
416#define F10_NB_ARRAY_DATA 0xBC
417
418#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
419 (BIT(((word) & 0xF) + 20) | \
420 BIT(17) | \
421 ((bits) & 0xF))
422
423#define SET_NB_DRAM_INJECTION_READ(word, bits) \
424 (BIT(((word) & 0xF) + 20) | \
425 BIT(16) | \
426 ((bits) & 0xF))
427
428#define K8_NBCAP 0xE8
429#define K8_NBCAP_CORES (BIT(12)|BIT(13))
430#define K8_NBCAP_CHIPKILL BIT(4)
431#define K8_NBCAP_SECDED BIT(3)
432#define K8_NBCAP_8_NODE BIT(2)
433#define K8_NBCAP_DUAL_NODE BIT(1)
434#define K8_NBCAP_DCT_DUAL BIT(0)
435
436/*
437 * MSR Regs
438 */
439#define K8_MSR_MCGCTL 0x017b
440#define K8_MSR_MCGCTL_NBE BIT(4)
441
442#define K8_MSR_MC4CTL 0x0410
443#define K8_MSR_MC4STAT 0x0411
444#define K8_MSR_MC4ADDR 0x0412
445
446/* AMD sets the first MC device at device ID 0x18. */
447static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
448{
449 return PCI_SLOT(pdev->devfn) - 0x18;
450}
451
452enum amd64_chipset_families {
453 K8_CPUS = 0,
454 F10_CPUS,
455 F11_CPUS,
456};
457
458/*
459 * Structure to hold:
460 *
461 * 1) dynamically read status and error address HW registers
462 * 2) sysfs entered values
463 * 3) MCE values
464 *
465 * Depends on entry into the modules
466 */
467struct amd64_error_info_regs {
468 u32 nbcfg;
469 u32 nbsh;
470 u32 nbsl;
471 u32 nbeah;
472 u32 nbeal;
473};
474
475/* Error injection control structure */
476struct error_injection {
477 u32 section;
478 u32 word;
479 u32 bit_map;
480};
481
482struct amd64_pvt {
483 /* pci_device handles which we utilize */
484 struct pci_dev *addr_f1_ctl;
485 struct pci_dev *dram_f2_ctl;
486 struct pci_dev *misc_f3_ctl;
487
488 int mc_node_id; /* MC index of this MC node */
489 int ext_model; /* extended model value of this node */
490
491 struct low_ops *ops; /* pointer to per PCI Device ID func table */
492
493 int channel_count;
494
495 /* Raw registers */
496 u32 dclr0; /* DRAM Configuration Low DCT0 reg */
497 u32 dclr1; /* DRAM Configuration Low DCT1 reg */
498 u32 dchr0; /* DRAM Configuration High DCT0 reg */
499 u32 dchr1; /* DRAM Configuration High DCT1 reg */
500 u32 nbcap; /* North Bridge Capabilities */
501 u32 nbcfg; /* F10 North Bridge Configuration */
502 u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
503 u32 dhar; /* DRAM Hoist reg */
504 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
505 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
506
507 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
508 u32 dcsb0[CHIPSELECT_COUNT];
509 u32 dcsb1[CHIPSELECT_COUNT];
510
511 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
512 u32 dcsm0[CHIPSELECT_COUNT];
513 u32 dcsm1[CHIPSELECT_COUNT];
514
515 /*
516 * Decoded parts of DRAM BASE and LIMIT Registers
517 * F1x[78,70,68,60,58,50,48,40]
518 */
519 u64 dram_base[DRAM_REG_COUNT];
520 u64 dram_limit[DRAM_REG_COUNT];
521 u8 dram_IntlvSel[DRAM_REG_COUNT];
522 u8 dram_IntlvEn[DRAM_REG_COUNT];
523 u8 dram_DstNode[DRAM_REG_COUNT];
524 u8 dram_rw_en[DRAM_REG_COUNT];
525
526 /*
527 * The following fields are set at (load) run time, after CPU revision
528 * has been determined, since the dct_base and dct_mask registers vary
529 * based on revision
530 */
531 u32 dcsb_base; /* DCSB base bits */
532 u32 dcsm_mask; /* DCSM mask bits */
533 u32 num_dcsm; /* Number of DCSM registers */
534 u32 dcs_mask_notused; /* DCSM notused mask bits */
535 u32 dcs_shift; /* DCSB and DCSM shift value */
536
537 u64 top_mem; /* top of memory below 4GB */
538 u64 top_mem2; /* top of memory above 4GB */
539
540 u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
541 u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
542 u32 online_spare; /* On-Line spare Reg */
543
544 /* temp storage for when input is received from sysfs */
545 struct amd64_error_info_regs ctl_error_info;
546
547 /* place to store error injection parameters prior to issue */
548 struct error_injection injection;
549
550 /* Save old hw registers' values before we modified them */
551 u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
552 u32 old_nbctl;
553 unsigned long old_mcgctl; /* per core on this node */
554
555 /* MC Type Index value: socket F vs Family 10h */
556 u32 mc_type_index;
557
558 /* misc settings */
559 struct flags {
560 unsigned long cf8_extcfg:1;
561 } flags;
562};
563
564struct scrubrate {
565 u32 scrubval; /* bit pattern for scrub rate */
566 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
567};
568
569extern struct scrubrate scrubrates[23];
570extern u32 revf_quad_ddr2_shift[16];
571extern const char *tt_msgs[4];
572extern const char *ll_msgs[4];
573extern const char *rrrr_msgs[16];
574extern const char *to_msgs[2];
575extern const char *pp_msgs[4];
576extern const char *ii_msgs[4];
577extern const char *ext_msgs[32];
578extern const char *htlink_msgs[8];
579
580#ifdef CONFIG_EDAC_DEBUG
581#define NUM_DBG_ATTRS 9
582#else
583#define NUM_DBG_ATTRS 0
584#endif
585
586#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
587#define NUM_INJ_ATTRS 5
588#else
589#define NUM_INJ_ATTRS 0
590#endif
591
592extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
593 amd64_inj_attrs[NUM_INJ_ATTRS];
594
595/*
596 * Each of the PCI Device IDs types have their own set of hardware accessor
597 * functions and per device encoding/decoding logic.
598 */
599struct low_ops {
600 int (*probe_valid_hardware)(struct amd64_pvt *pvt);
601 int (*early_channel_count)(struct amd64_pvt *pvt);
602
603 u64 (*get_error_address)(struct mem_ctl_info *mci,
604 struct amd64_error_info_regs *info);
605 void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
606 void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
607 void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
608 struct amd64_error_info_regs *info,
609 u64 SystemAddr);
610 int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
611};
612
613struct amd64_family_type {
614 const char *ctl_name;
615 u16 addr_f1_ctl;
616 u16 misc_f3_ctl;
617 struct low_ops ops;
618};
619
620static struct amd64_family_type amd64_family_types[];
621
622static inline const char *get_amd_family_name(int index)
623{
624 return amd64_family_types[index].ctl_name;
625}
626
627static inline struct low_ops *family_ops(int index)
628{
629 return &amd64_family_types[index].ops;
630}
631
632/*
633 * For future CPU versions, verify the following as new 'slow' rates appear and
634 * modify the necessary skip values for the supported CPU.
635 */
636#define K8_MIN_SCRUB_RATE_BITS 0x0
637#define F10_MIN_SCRUB_RATE_BITS 0x5
638#define F11_MIN_SCRUB_RATE_BITS 0x6
639
640int amd64_process_error_info(struct mem_ctl_info *mci,
641 struct amd64_error_info_regs *info,
642 int handle_errors);
643int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
644 u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
new file mode 100644
index 000000000000..0a41b248a4ad
--- /dev/null
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -0,0 +1,255 @@
1#include "amd64_edac.h"
2
3/*
4 * accept a hex value and store it into the virtual error register file, field:
5 * nbeal and nbeah. Assume virtual error values have already been set for: NBSL,
6 * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and
7 * CHANNEL
8 */
9static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data,
10 size_t count)
11{
12 struct amd64_pvt *pvt = mci->pvt_info;
13 unsigned long long value;
14 int ret = 0;
15
16 ret = strict_strtoull(data, 16, &value);
17 if (ret != -EINVAL) {
18 debugf0("received NBEA= 0x%llx\n", value);
19
20 /* place the value into the virtual error packet */
21 pvt->ctl_error_info.nbeal = (u32) value;
22 value >>= 32;
23 pvt->ctl_error_info.nbeah = (u32) value;
24
25 /* Process the Mapping request */
26 /* TODO: Add race prevention */
27 amd64_process_error_info(mci, &pvt->ctl_error_info, 1);
28
29 return count;
30 }
31 return ret;
32}
33
34/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */
35static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data)
36{
37 struct amd64_pvt *pvt = mci->pvt_info;
38 u64 value;
39
40 value = pvt->ctl_error_info.nbeah;
41 value <<= 32;
42 value |= pvt->ctl_error_info.nbeal;
43
44 return sprintf(data, "%llx\n", value);
45}
46
47/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */
48static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data,
49 size_t count)
50{
51 struct amd64_pvt *pvt = mci->pvt_info;
52 unsigned long value;
53 int ret = 0;
54
55 ret = strict_strtoul(data, 16, &value);
56 if (ret != -EINVAL) {
57 debugf0("received NBSL= 0x%lx\n", value);
58
59 pvt->ctl_error_info.nbsl = (u32) value;
60
61 return count;
62 }
63 return ret;
64}
65
66/* display back what the last NBSL value written */
67static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data)
68{
69 struct amd64_pvt *pvt = mci->pvt_info;
70 u32 value;
71
72 value = pvt->ctl_error_info.nbsl;
73
74 return sprintf(data, "%x\n", value);
75}
76
77/* store the NBSH (MCA NB Status High) value user desires */
78static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data,
79 size_t count)
80{
81 struct amd64_pvt *pvt = mci->pvt_info;
82 unsigned long value;
83 int ret = 0;
84
85 ret = strict_strtoul(data, 16, &value);
86 if (ret != -EINVAL) {
87 debugf0("received NBSH= 0x%lx\n", value);
88
89 pvt->ctl_error_info.nbsh = (u32) value;
90
91 return count;
92 }
93 return ret;
94}
95
96/* display back what the last NBSH value written */
97static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data)
98{
99 struct amd64_pvt *pvt = mci->pvt_info;
100 u32 value;
101
102 value = pvt->ctl_error_info.nbsh;
103
104 return sprintf(data, "%x\n", value);
105}
106
107/* accept and store the NBCFG (MCA NB Configuration) value user desires */
108static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci,
109 const char *data, size_t count)
110{
111 struct amd64_pvt *pvt = mci->pvt_info;
112 unsigned long value;
113 int ret = 0;
114
115 ret = strict_strtoul(data, 16, &value);
116 if (ret != -EINVAL) {
117 debugf0("received NBCFG= 0x%lx\n", value);
118
119 pvt->ctl_error_info.nbcfg = (u32) value;
120
121 return count;
122 }
123 return ret;
124}
125
126/* various show routines for the controls of a MCI */
127static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data)
128{
129 struct amd64_pvt *pvt = mci->pvt_info;
130
131 return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg);
132}
133
134
135static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data)
136{
137 struct amd64_pvt *pvt = mci->pvt_info;
138
139 return sprintf(data, "%x\n", pvt->dhar);
140}
141
142
143static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data)
144{
145 struct amd64_pvt *pvt = mci->pvt_info;
146
147 return sprintf(data, "%x\n", pvt->dbam0);
148}
149
150
151static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data)
152{
153 struct amd64_pvt *pvt = mci->pvt_info;
154
155 return sprintf(data, "%llx\n", pvt->top_mem);
156}
157
158
159static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data)
160{
161 struct amd64_pvt *pvt = mci->pvt_info;
162
163 return sprintf(data, "%llx\n", pvt->top_mem2);
164}
165
166static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
167{
168 u64 hole_base = 0;
169 u64 hole_offset = 0;
170 u64 hole_size = 0;
171
172 amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
173
174 return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
175 hole_size);
176}
177
178/*
179 * update NUM_DBG_ATTRS in case you add new members
180 */
181struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
182
183 {
184 .attr = {
185 .name = "nbea_ctl",
186 .mode = (S_IRUGO | S_IWUSR)
187 },
188 .show = amd64_nbea_show,
189 .store = amd64_nbea_store,
190 },
191 {
192 .attr = {
193 .name = "nbsl_ctl",
194 .mode = (S_IRUGO | S_IWUSR)
195 },
196 .show = amd64_nbsl_show,
197 .store = amd64_nbsl_store,
198 },
199 {
200 .attr = {
201 .name = "nbsh_ctl",
202 .mode = (S_IRUGO | S_IWUSR)
203 },
204 .show = amd64_nbsh_show,
205 .store = amd64_nbsh_store,
206 },
207 {
208 .attr = {
209 .name = "nbcfg_ctl",
210 .mode = (S_IRUGO | S_IWUSR)
211 },
212 .show = amd64_nbcfg_show,
213 .store = amd64_nbcfg_store,
214 },
215 {
216 .attr = {
217 .name = "dhar",
218 .mode = (S_IRUGO)
219 },
220 .show = amd64_dhar_show,
221 .store = NULL,
222 },
223 {
224 .attr = {
225 .name = "dbam",
226 .mode = (S_IRUGO)
227 },
228 .show = amd64_dbam_show,
229 .store = NULL,
230 },
231 {
232 .attr = {
233 .name = "topmem",
234 .mode = (S_IRUGO)
235 },
236 .show = amd64_topmem_show,
237 .store = NULL,
238 },
239 {
240 .attr = {
241 .name = "topmem2",
242 .mode = (S_IRUGO)
243 },
244 .show = amd64_topmem2_show,
245 .store = NULL,
246 },
247 {
248 .attr = {
249 .name = "dram_hole",
250 .mode = (S_IRUGO)
251 },
252 .show = amd64_hole_show,
253 .store = NULL,
254 },
255};
diff --git a/drivers/edac/amd64_edac_err_types.c b/drivers/edac/amd64_edac_err_types.c
new file mode 100644
index 000000000000..f212ff12a9d8
--- /dev/null
+++ b/drivers/edac/amd64_edac_err_types.c
@@ -0,0 +1,161 @@
1#include "amd64_edac.h"
2
3/*
4 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
5 * for DDR2 DRAM mapping.
6 */
7u32 revf_quad_ddr2_shift[] = {
8 0, /* 0000b NULL DIMM (128mb) */
9 28, /* 0001b 256mb */
10 29, /* 0010b 512mb */
11 29, /* 0011b 512mb */
12 29, /* 0100b 512mb */
13 30, /* 0101b 1gb */
14 30, /* 0110b 1gb */
15 31, /* 0111b 2gb */
16 31, /* 1000b 2gb */
17 32, /* 1001b 4gb */
18 32, /* 1010b 4gb */
19 33, /* 1011b 8gb */
20 0, /* 1100b future */
21 0, /* 1101b future */
22 0, /* 1110b future */
23 0 /* 1111b future */
24};
25
26/*
27 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
28 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
29 * or higher value'.
30 *
31 *FIXME: Produce a better mapping/linearisation.
32 */
33
34struct scrubrate scrubrates[] = {
35 { 0x01, 1600000000UL},
36 { 0x02, 800000000UL},
37 { 0x03, 400000000UL},
38 { 0x04, 200000000UL},
39 { 0x05, 100000000UL},
40 { 0x06, 50000000UL},
41 { 0x07, 25000000UL},
42 { 0x08, 12284069UL},
43 { 0x09, 6274509UL},
44 { 0x0A, 3121951UL},
45 { 0x0B, 1560975UL},
46 { 0x0C, 781440UL},
47 { 0x0D, 390720UL},
48 { 0x0E, 195300UL},
49 { 0x0F, 97650UL},
50 { 0x10, 48854UL},
51 { 0x11, 24427UL},
52 { 0x12, 12213UL},
53 { 0x13, 6101UL},
54 { 0x14, 3051UL},
55 { 0x15, 1523UL},
56 { 0x16, 761UL},
57 { 0x00, 0UL}, /* scrubbing off */
58};
59
60/*
61 * string representation for the different MCA reported error types, see F3x48
62 * or MSR0000_0411.
63 */
64const char *tt_msgs[] = { /* transaction type */
65 "instruction",
66 "data",
67 "generic",
68 "reserved"
69};
70
71const char *ll_msgs[] = { /* cache level */
72 "L0",
73 "L1",
74 "L2",
75 "L3/generic"
76};
77
78const char *rrrr_msgs[] = {
79 "generic",
80 "generic read",
81 "generic write",
82 "data read",
83 "data write",
84 "inst fetch",
85 "prefetch",
86 "evict",
87 "snoop",
88 "reserved RRRR= 9",
89 "reserved RRRR= 10",
90 "reserved RRRR= 11",
91 "reserved RRRR= 12",
92 "reserved RRRR= 13",
93 "reserved RRRR= 14",
94 "reserved RRRR= 15"
95};
96
97const char *pp_msgs[] = { /* participating processor */
98 "local node originated (SRC)",
99 "local node responded to request (RES)",
100 "local node observed as 3rd party (OBS)",
101 "generic"
102};
103
104const char *to_msgs[] = {
105 "no timeout",
106 "timed out"
107};
108
109const char *ii_msgs[] = { /* memory or i/o */
110 "mem access",
111 "reserved",
112 "i/o access",
113 "generic"
114};
115
116/* Map the 5 bits of Extended Error code to the string table. */
117const char *ext_msgs[] = { /* extended error */
118 "K8 ECC error/F10 reserved", /* 0_0000b */
119 "CRC error", /* 0_0001b */
120 "sync error", /* 0_0010b */
121 "mst abort", /* 0_0011b */
122 "tgt abort", /* 0_0100b */
123 "GART error", /* 0_0101b */
124 "RMW error", /* 0_0110b */
125 "Wdog timer error", /* 0_0111b */
126 "F10-ECC/K8-Chipkill error", /* 0_1000b */
127 "DEV Error", /* 0_1001b */
128 "Link Data error", /* 0_1010b */
129 "Link or L3 Protocol error", /* 0_1011b */
130 "NB Array error", /* 0_1100b */
131 "DRAM Parity error", /* 0_1101b */
132 "Link Retry/GART Table Walk/DEV Table Walk error", /* 0_1110b */
133 "Res 0x0ff error", /* 0_1111b */
134 "Res 0x100 error", /* 1_0000b */
135 "Res 0x101 error", /* 1_0001b */
136 "Res 0x102 error", /* 1_0010b */
137 "Res 0x103 error", /* 1_0011b */
138 "Res 0x104 error", /* 1_0100b */
139 "Res 0x105 error", /* 1_0101b */
140 "Res 0x106 error", /* 1_0110b */
141 "Res 0x107 error", /* 1_0111b */
142 "Res 0x108 error", /* 1_1000b */
143 "Res 0x109 error", /* 1_1001b */
144 "Res 0x10A error", /* 1_1010b */
145 "Res 0x10B error", /* 1_1011b */
146 "L3 Cache Data error", /* 1_1100b */
147 "L3 CacheTag error", /* 1_1101b */
148 "L3 Cache LRU error", /* 1_1110b */
149 "Res 0x1FF error" /* 1_1111b */
150};
151
152const char *htlink_msgs[] = {
153 "none",
154 "1",
155 "2",
156 "1 2",
157 "3",
158 "1 3",
159 "2 3",
160 "1 2 3"
161};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
new file mode 100644
index 000000000000..d3675b76b3a7
--- /dev/null
+++ b/drivers/edac/amd64_edac_inj.c
@@ -0,0 +1,185 @@
1#include "amd64_edac.h"
2
3/*
4 * store error injection section value which refers to one of 4 16-byte sections
5 * within a 64-byte cacheline
6 *
7 * range: 0..3
8 */
9static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
10 const char *data, size_t count)
11{
12 struct amd64_pvt *pvt = mci->pvt_info;
13 unsigned long value;
14 int ret = 0;
15
16 ret = strict_strtoul(data, 10, &value);
17 if (ret != -EINVAL) {
18 pvt->injection.section = (u32) value;
19 return count;
20 }
21 return ret;
22}
23
24/*
25 * store error injection word value which refers to one of 9 16-bit word of the
26 * 16-byte (128-bit + ECC bits) section
27 *
28 * range: 0..8
29 */
30static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
31 const char *data, size_t count)
32{
33 struct amd64_pvt *pvt = mci->pvt_info;
34 unsigned long value;
35 int ret = 0;
36
37 ret = strict_strtoul(data, 10, &value);
38 if (ret != -EINVAL) {
39
40 value = (value <= 8) ? value : 0;
41 pvt->injection.word = (u32) value;
42
43 return count;
44 }
45 return ret;
46}
47
48/*
49 * store 16 bit error injection vector which enables injecting errors to the
50 * corresponding bit within the error injection word above. When used during a
51 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
52 */
53static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
54 const char *data, size_t count)
55{
56 struct amd64_pvt *pvt = mci->pvt_info;
57 unsigned long value;
58 int ret = 0;
59
60 ret = strict_strtoul(data, 16, &value);
61 if (ret != -EINVAL) {
62
63 pvt->injection.bit_map = (u32) value & 0xFFFF;
64
65 return count;
66 }
67 return ret;
68}
69
70/*
71 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
72 * fields needed by the injection registers and read the NB Array Data Port.
73 */
74static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
75 const char *data, size_t count)
76{
77 struct amd64_pvt *pvt = mci->pvt_info;
78 unsigned long value;
79 u32 section, word_bits;
80 int ret = 0;
81
82 ret = strict_strtoul(data, 10, &value);
83 if (ret != -EINVAL) {
84
85 /* Form value to choose 16-byte section of cacheline */
86 section = F10_NB_ARRAY_DRAM_ECC |
87 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
88 pci_write_config_dword(pvt->misc_f3_ctl,
89 F10_NB_ARRAY_ADDR, section);
90
91 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
92 pvt->injection.bit_map);
93
94 /* Issue 'word' and 'bit' along with the READ request */
95 pci_write_config_dword(pvt->misc_f3_ctl,
96 F10_NB_ARRAY_DATA, word_bits);
97
98 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
99
100 return count;
101 }
102 return ret;
103}
104
105/*
106 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
107 * fields needed by the injection registers.
108 */
109static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
110 const char *data, size_t count)
111{
112 struct amd64_pvt *pvt = mci->pvt_info;
113 unsigned long value;
114 u32 section, word_bits;
115 int ret = 0;
116
117 ret = strict_strtoul(data, 10, &value);
118 if (ret != -EINVAL) {
119
120 /* Form value to choose 16-byte section of cacheline */
121 section = F10_NB_ARRAY_DRAM_ECC |
122 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
123 pci_write_config_dword(pvt->misc_f3_ctl,
124 F10_NB_ARRAY_ADDR, section);
125
126 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
127 pvt->injection.bit_map);
128
129 /* Issue 'word' and 'bit' along with the READ request */
130 pci_write_config_dword(pvt->misc_f3_ctl,
131 F10_NB_ARRAY_DATA, word_bits);
132
133 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
134
135 return count;
136 }
137 return ret;
138}
139
140/*
141 * update NUM_INJ_ATTRS in case you add new members
142 */
143struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
144
145 {
146 .attr = {
147 .name = "inject_section",
148 .mode = (S_IRUGO | S_IWUSR)
149 },
150 .show = NULL,
151 .store = amd64_inject_section_store,
152 },
153 {
154 .attr = {
155 .name = "inject_word",
156 .mode = (S_IRUGO | S_IWUSR)
157 },
158 .show = NULL,
159 .store = amd64_inject_word_store,
160 },
161 {
162 .attr = {
163 .name = "inject_ecc_vector",
164 .mode = (S_IRUGO | S_IWUSR)
165 },
166 .show = NULL,
167 .store = amd64_inject_ecc_vector_store,
168 },
169 {
170 .attr = {
171 .name = "inject_write",
172 .mode = (S_IRUGO | S_IWUSR)
173 },
174 .show = NULL,
175 .store = amd64_inject_write_store,
176 },
177 {
178 .attr = {
179 .name = "inject_read",
180 .mode = (S_IRUGO | S_IWUSR)
181 },
182 .show = NULL,
183 .store = amd64_inject_read_store,
184 },
185};
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 6ad95c8d6363..48d3b1409834 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -76,10 +76,11 @@
76extern int edac_debug_level; 76extern int edac_debug_level;
77 77
78#ifndef CONFIG_EDAC_DEBUG_VERBOSE 78#ifndef CONFIG_EDAC_DEBUG_VERBOSE
79#define edac_debug_printk(level, fmt, arg...) \ 79#define edac_debug_printk(level, fmt, arg...) \
80 do { \ 80 do { \
81 if (level <= edac_debug_level) \ 81 if (level <= edac_debug_level) \
82 edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ 82 edac_printk(KERN_DEBUG, EDAC_DEBUG, \
83 "%s: " fmt, __func__, ##arg); \
83 } while (0) 84 } while (0)
84#else /* CONFIG_EDAC_DEBUG_VERBOSE */ 85#else /* CONFIG_EDAC_DEBUG_VERBOSE */
85#define edac_debug_printk(level, fmt, arg...) \ 86#define edac_debug_printk(level, fmt, arg...) \