diff options
-rw-r--r-- | Documentation/x86/x86_64/boot-options.txt | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/mce.h | 16 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/amd_nb.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-internal.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 28 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 42 | ||||
-rw-r--r-- | drivers/acpi/apei/erst.c | 51 | ||||
-rw-r--r-- | drivers/acpi/apei/ghes.c | 38 | ||||
-rw-r--r-- | drivers/acpi/apei/hest.c | 39 | ||||
-rw-r--r-- | drivers/edac/amd64_edac.c | 334 | ||||
-rw-r--r-- | drivers/edac/amd64_edac.h | 60 | ||||
-rw-r--r-- | drivers/edac/cpc925_edac.c | 2 | ||||
-rw-r--r-- | drivers/edac/edac_mc_sysfs.c | 6 | ||||
-rw-r--r-- | drivers/edac/i3200_edac.c | 3 | ||||
-rw-r--r-- | drivers/edac/x38_edac.c | 3 | ||||
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | include/linux/pci_ids.h | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 5 |
20 files changed, 523 insertions, 135 deletions
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt index e9e8ddbbf376..1228b22e142b 100644 --- a/Documentation/x86/x86_64/boot-options.txt +++ b/Documentation/x86/x86_64/boot-options.txt | |||
@@ -176,6 +176,11 @@ ACPI | |||
176 | 176 | ||
177 | acpi=noirq Don't route interrupts | 177 | acpi=noirq Don't route interrupts |
178 | 178 | ||
179 | acpi=nocmcff Disable firmware first mode for corrected errors. This | ||
180 | disables parsing the HEST CMC error source to check if | ||
181 | firmware has set the FF flag. This may result in | ||
182 | duplicate corrected error reports. | ||
183 | |||
179 | PCI | 184 | PCI |
180 | 185 | ||
181 | pci=off Don't use PCI | 186 | pci=off Don't use PCI |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 2dfac58f3b11..b1977bad5435 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -86,6 +86,7 @@ extern int acpi_pci_disabled; | |||
86 | extern int acpi_skip_timer_override; | 86 | extern int acpi_skip_timer_override; |
87 | extern int acpi_use_timer_override; | 87 | extern int acpi_use_timer_override; |
88 | extern int acpi_fix_pin2_polarity; | 88 | extern int acpi_fix_pin2_polarity; |
89 | extern int acpi_disable_cmcff; | ||
89 | 90 | ||
90 | extern u8 acpi_sci_flags; | 91 | extern u8 acpi_sci_flags; |
91 | extern int acpi_sci_override_gsi; | 92 | extern int acpi_sci_override_gsi; |
@@ -168,6 +169,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf) | |||
168 | 169 | ||
169 | #define acpi_lapic 0 | 170 | #define acpi_lapic 0 |
170 | #define acpi_ioapic 0 | 171 | #define acpi_ioapic 0 |
172 | #define acpi_disable_cmcff 0 | ||
171 | static inline void acpi_noirq_set(void) { } | 173 | static inline void acpi_noirq_set(void) { } |
172 | static inline void acpi_disable_pci(void) { } | 174 | static inline void acpi_disable_pci(void) { } |
173 | static inline void disable_acpi(void) { } | 175 | static inline void disable_acpi(void) { } |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 29e3093bbd21..cbe6b9e404ce 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -32,11 +32,20 @@ | |||
32 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | 32 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ |
33 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | 33 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ |
34 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | 34 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ |
35 | #define MCACOD 0xffff /* MCA Error Code */ | 35 | |
36 | /* | ||
37 | * Note that the full MCACOD field of IA32_MCi_STATUS MSR is | ||
38 | * bits 15:0. But bit 12 is the 'F' bit, defined for corrected | ||
39 | * errors to indicate that errors are being filtered by hardware. | ||
40 | * We should mask out bit 12 when looking for specific signatures | ||
41 | * of uncorrected errors - so the F bit is deliberately skipped | ||
42 | * in this #define. | ||
43 | */ | ||
44 | #define MCACOD 0xefff /* MCA Error Code */ | ||
36 | 45 | ||
37 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | 46 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ |
38 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ | 47 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ |
39 | #define MCACOD_SCRUBMSK 0xfff0 | 48 | #define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */ |
40 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ | 49 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ |
41 | #define MCACOD_DATA 0x0134 /* Data Load */ | 50 | #define MCACOD_DATA 0x0134 /* Data Load */ |
42 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ | 51 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ |
@@ -188,6 +197,9 @@ extern void register_mce_write_callback(ssize_t (*)(struct file *filp, | |||
188 | const char __user *ubuf, | 197 | const char __user *ubuf, |
189 | size_t usize, loff_t *off)); | 198 | size_t usize, loff_t *off)); |
190 | 199 | ||
200 | /* Disable CMCI/polling for MCA bank claimed by firmware */ | ||
201 | extern void mce_disable_bank(int bank); | ||
202 | |||
191 | /* | 203 | /* |
192 | * Exception handler | 204 | * Exception handler |
193 | */ | 205 | */ |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 8e594a489d75..40c76604199f 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -67,6 +67,7 @@ EXPORT_SYMBOL(acpi_pci_disabled); | |||
67 | int acpi_lapic; | 67 | int acpi_lapic; |
68 | int acpi_ioapic; | 68 | int acpi_ioapic; |
69 | int acpi_strict; | 69 | int acpi_strict; |
70 | int acpi_disable_cmcff; | ||
70 | 71 | ||
71 | u8 acpi_sci_flags __initdata; | 72 | u8 acpi_sci_flags __initdata; |
72 | int acpi_sci_override_gsi __initdata; | 73 | int acpi_sci_override_gsi __initdata; |
@@ -1622,6 +1623,10 @@ static int __init parse_acpi(char *arg) | |||
1622 | /* "acpi=copy_dsdt" copys DSDT */ | 1623 | /* "acpi=copy_dsdt" copys DSDT */ |
1623 | else if (strcmp(arg, "copy_dsdt") == 0) { | 1624 | else if (strcmp(arg, "copy_dsdt") == 0) { |
1624 | acpi_gbl_copy_dsdt_locally = 1; | 1625 | acpi_gbl_copy_dsdt_locally = 1; |
1626 | } | ||
1627 | /* "acpi=nocmcff" disables FF mode for corrected errors */ | ||
1628 | else if (strcmp(arg, "nocmcff") == 0) { | ||
1629 | acpi_disable_cmcff = 1; | ||
1625 | } else { | 1630 | } else { |
1626 | /* Core will printk when we return error. */ | 1631 | /* Core will printk when we return error. */ |
1627 | return -EINVAL; | 1632 | return -EINVAL; |
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 3048ded1b598..59554dca96ec 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { | |||
20 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 20 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
21 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, | 21 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, |
22 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, | 22 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, |
23 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, | ||
23 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, | 24 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
24 | {} | 25 | {} |
25 | }; | 26 | }; |
@@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids); | |||
27 | 28 | ||
28 | static const struct pci_device_id amd_nb_link_ids[] = { | 29 | static const struct pci_device_id amd_nb_link_ids[] = { |
29 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, | 30 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, |
31 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, | ||
30 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, | 32 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
31 | {} | 33 | {} |
32 | }; | 34 | }; |
@@ -81,13 +83,20 @@ int amd_cache_northbridges(void) | |||
81 | next_northbridge(misc, amd_nb_misc_ids); | 83 | next_northbridge(misc, amd_nb_misc_ids); |
82 | node_to_amd_nb(i)->link = link = | 84 | node_to_amd_nb(i)->link = link = |
83 | next_northbridge(link, amd_nb_link_ids); | 85 | next_northbridge(link, amd_nb_link_ids); |
84 | } | 86 | } |
85 | 87 | ||
88 | /* GART present only on Fam15h upto model 0fh */ | ||
86 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || | 89 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || |
87 | boot_cpu_data.x86 == 0x15) | 90 | (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10)) |
88 | amd_northbridges.flags |= AMD_NB_GART; | 91 | amd_northbridges.flags |= AMD_NB_GART; |
89 | 92 | ||
90 | /* | 93 | /* |
94 | * Check for L3 cache presence. | ||
95 | */ | ||
96 | if (!cpuid_edx(0x80000006)) | ||
97 | return 0; | ||
98 | |||
99 | /* | ||
91 | * Some CPU families support L3 Cache Index Disable. There are some | 100 | * Some CPU families support L3 Cache Index Disable. There are some |
92 | * limitations because of E382 and E388 on family 0x10. | 101 | * limitations because of E382 and E388 on family 0x10. |
93 | */ | 102 | */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 5b7d4fa5d3b7..09edd0b65fef 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h | |||
@@ -25,15 +25,18 @@ int mce_severity(struct mce *a, int tolerant, char **msg); | |||
25 | struct dentry *mce_get_debugfs_dir(void); | 25 | struct dentry *mce_get_debugfs_dir(void); |
26 | 26 | ||
27 | extern struct mce_bank *mce_banks; | 27 | extern struct mce_bank *mce_banks; |
28 | extern mce_banks_t mce_banks_ce_disabled; | ||
28 | 29 | ||
29 | #ifdef CONFIG_X86_MCE_INTEL | 30 | #ifdef CONFIG_X86_MCE_INTEL |
30 | unsigned long mce_intel_adjust_timer(unsigned long interval); | 31 | unsigned long mce_intel_adjust_timer(unsigned long interval); |
31 | void mce_intel_cmci_poll(void); | 32 | void mce_intel_cmci_poll(void); |
32 | void mce_intel_hcpu_update(unsigned long cpu); | 33 | void mce_intel_hcpu_update(unsigned long cpu); |
34 | void cmci_disable_bank(int bank); | ||
33 | #else | 35 | #else |
34 | # define mce_intel_adjust_timer mce_adjust_timer_default | 36 | # define mce_intel_adjust_timer mce_adjust_timer_default |
35 | static inline void mce_intel_cmci_poll(void) { } | 37 | static inline void mce_intel_cmci_poll(void) { } |
36 | static inline void mce_intel_hcpu_update(unsigned long cpu) { } | 38 | static inline void mce_intel_hcpu_update(unsigned long cpu) { } |
39 | static inline void cmci_disable_bank(int bank) { } | ||
37 | #endif | 40 | #endif |
38 | 41 | ||
39 | void mce_timer_kick(unsigned long interval); | 42 | void mce_timer_kick(unsigned long interval); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 87a65c939bcd..b3218cdee95f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -97,6 +97,15 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | |||
97 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | 97 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
98 | }; | 98 | }; |
99 | 99 | ||
100 | /* | ||
101 | * MCA banks controlled through firmware first for corrected errors. | ||
102 | * This is a global list of banks for which we won't enable CMCI and we | ||
103 | * won't poll. Firmware controls these banks and is responsible for | ||
104 | * reporting corrected errors through GHES. Uncorrected/recoverable | ||
105 | * errors are still notified through a machine check. | ||
106 | */ | ||
107 | mce_banks_t mce_banks_ce_disabled; | ||
108 | |||
100 | static DEFINE_PER_CPU(struct work_struct, mce_work); | 109 | static DEFINE_PER_CPU(struct work_struct, mce_work); |
101 | 110 | ||
102 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); | 111 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); |
@@ -1935,6 +1944,25 @@ static struct miscdevice mce_chrdev_device = { | |||
1935 | &mce_chrdev_ops, | 1944 | &mce_chrdev_ops, |
1936 | }; | 1945 | }; |
1937 | 1946 | ||
1947 | static void __mce_disable_bank(void *arg) | ||
1948 | { | ||
1949 | int bank = *((int *)arg); | ||
1950 | __clear_bit(bank, __get_cpu_var(mce_poll_banks)); | ||
1951 | cmci_disable_bank(bank); | ||
1952 | } | ||
1953 | |||
1954 | void mce_disable_bank(int bank) | ||
1955 | { | ||
1956 | if (bank >= mca_cfg.banks) { | ||
1957 | pr_warn(FW_BUG | ||
1958 | "Ignoring request to disable invalid MCA bank %d.\n", | ||
1959 | bank); | ||
1960 | return; | ||
1961 | } | ||
1962 | set_bit(bank, mce_banks_ce_disabled); | ||
1963 | on_each_cpu(__mce_disable_bank, &bank, 1); | ||
1964 | } | ||
1965 | |||
1938 | /* | 1966 | /* |
1939 | * mce=off Disables machine check | 1967 | * mce=off Disables machine check |
1940 | * mce=no_cmci Disables CMCI | 1968 | * mce=no_cmci Disables CMCI |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index d56405309dc1..4cfe0458ca66 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -203,6 +203,10 @@ static void cmci_discover(int banks) | |||
203 | if (test_bit(i, owned)) | 203 | if (test_bit(i, owned)) |
204 | continue; | 204 | continue; |
205 | 205 | ||
206 | /* Skip banks in firmware first mode */ | ||
207 | if (test_bit(i, mce_banks_ce_disabled)) | ||
208 | continue; | ||
209 | |||
206 | rdmsrl(MSR_IA32_MCx_CTL2(i), val); | 210 | rdmsrl(MSR_IA32_MCx_CTL2(i), val); |
207 | 211 | ||
208 | /* Already owned by someone else? */ | 212 | /* Already owned by someone else? */ |
@@ -271,6 +275,19 @@ void cmci_recheck(void) | |||
271 | local_irq_restore(flags); | 275 | local_irq_restore(flags); |
272 | } | 276 | } |
273 | 277 | ||
278 | /* Caller must hold the lock on cmci_discover_lock */ | ||
279 | static void __cmci_disable_bank(int bank) | ||
280 | { | ||
281 | u64 val; | ||
282 | |||
283 | if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) | ||
284 | return; | ||
285 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | ||
286 | val &= ~MCI_CTL2_CMCI_EN; | ||
287 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | ||
288 | __clear_bit(bank, __get_cpu_var(mce_banks_owned)); | ||
289 | } | ||
290 | |||
274 | /* | 291 | /* |
275 | * Disable CMCI on this CPU for all banks it owns when it goes down. | 292 | * Disable CMCI on this CPU for all banks it owns when it goes down. |
276 | * This allows other CPUs to claim the banks on rediscovery. | 293 | * This allows other CPUs to claim the banks on rediscovery. |
@@ -280,20 +297,12 @@ void cmci_clear(void) | |||
280 | unsigned long flags; | 297 | unsigned long flags; |
281 | int i; | 298 | int i; |
282 | int banks; | 299 | int banks; |
283 | u64 val; | ||
284 | 300 | ||
285 | if (!cmci_supported(&banks)) | 301 | if (!cmci_supported(&banks)) |
286 | return; | 302 | return; |
287 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 303 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
288 | for (i = 0; i < banks; i++) { | 304 | for (i = 0; i < banks; i++) |
289 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | 305 | __cmci_disable_bank(i); |
290 | continue; | ||
291 | /* Disable CMCI */ | ||
292 | rdmsrl(MSR_IA32_MCx_CTL2(i), val); | ||
293 | val &= ~MCI_CTL2_CMCI_EN; | ||
294 | wrmsrl(MSR_IA32_MCx_CTL2(i), val); | ||
295 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | ||
296 | } | ||
297 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 306 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
298 | } | 307 | } |
299 | 308 | ||
@@ -327,6 +336,19 @@ void cmci_reenable(void) | |||
327 | cmci_discover(banks); | 336 | cmci_discover(banks); |
328 | } | 337 | } |
329 | 338 | ||
339 | void cmci_disable_bank(int bank) | ||
340 | { | ||
341 | int banks; | ||
342 | unsigned long flags; | ||
343 | |||
344 | if (!cmci_supported(&banks)) | ||
345 | return; | ||
346 | |||
347 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | ||
348 | __cmci_disable_bank(bank); | ||
349 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | ||
350 | } | ||
351 | |||
330 | static void intel_init_cmci(void) | 352 | static void intel_init_cmci(void) |
331 | { | 353 | { |
332 | int banks; | 354 | int banks; |
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 822b1ed3b00f..26311f23c824 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -39,7 +39,8 @@ | |||
39 | 39 | ||
40 | #include "apei-internal.h" | 40 | #include "apei-internal.h" |
41 | 41 | ||
42 | #define ERST_PFX "ERST: " | 42 | #undef pr_fmt |
43 | #define pr_fmt(fmt) "ERST: " fmt | ||
43 | 44 | ||
44 | /* ERST command status */ | 45 | /* ERST command status */ |
45 | #define ERST_STATUS_SUCCESS 0x0 | 46 | #define ERST_STATUS_SUCCESS 0x0 |
@@ -109,8 +110,7 @@ static inline int erst_errno(int command_status) | |||
109 | static int erst_timedout(u64 *t, u64 spin_unit) | 110 | static int erst_timedout(u64 *t, u64 spin_unit) |
110 | { | 111 | { |
111 | if ((s64)*t < spin_unit) { | 112 | if ((s64)*t < spin_unit) { |
112 | pr_warning(FW_WARN ERST_PFX | 113 | pr_warn(FW_WARN "Firmware does not respond in time.\n"); |
113 | "Firmware does not respond in time\n"); | ||
114 | return 1; | 114 | return 1; |
115 | } | 115 | } |
116 | *t -= spin_unit; | 116 | *t -= spin_unit; |
@@ -186,8 +186,8 @@ static int erst_exec_stall(struct apei_exec_context *ctx, | |||
186 | 186 | ||
187 | if (ctx->value > FIRMWARE_MAX_STALL) { | 187 | if (ctx->value > FIRMWARE_MAX_STALL) { |
188 | if (!in_nmi()) | 188 | if (!in_nmi()) |
189 | pr_warning(FW_WARN ERST_PFX | 189 | pr_warn(FW_WARN |
190 | "Too long stall time for stall instruction: %llx.\n", | 190 | "Too long stall time for stall instruction: 0x%llx.\n", |
191 | ctx->value); | 191 | ctx->value); |
192 | stall_time = FIRMWARE_MAX_STALL; | 192 | stall_time = FIRMWARE_MAX_STALL; |
193 | } else | 193 | } else |
@@ -206,8 +206,8 @@ static int erst_exec_stall_while_true(struct apei_exec_context *ctx, | |||
206 | 206 | ||
207 | if (ctx->var1 > FIRMWARE_MAX_STALL) { | 207 | if (ctx->var1 > FIRMWARE_MAX_STALL) { |
208 | if (!in_nmi()) | 208 | if (!in_nmi()) |
209 | pr_warning(FW_WARN ERST_PFX | 209 | pr_warn(FW_WARN |
210 | "Too long stall time for stall while true instruction: %llx.\n", | 210 | "Too long stall time for stall while true instruction: 0x%llx.\n", |
211 | ctx->var1); | 211 | ctx->var1); |
212 | stall_time = FIRMWARE_MAX_STALL; | 212 | stall_time = FIRMWARE_MAX_STALL; |
213 | } else | 213 | } else |
@@ -271,8 +271,7 @@ static int erst_exec_move_data(struct apei_exec_context *ctx, | |||
271 | 271 | ||
272 | /* ioremap does not work in interrupt context */ | 272 | /* ioremap does not work in interrupt context */ |
273 | if (in_interrupt()) { | 273 | if (in_interrupt()) { |
274 | pr_warning(ERST_PFX | 274 | pr_warn("MOVE_DATA can not be used in interrupt context.\n"); |
275 | "MOVE_DATA can not be used in interrupt context"); | ||
276 | return -EBUSY; | 275 | return -EBUSY; |
277 | } | 276 | } |
278 | 277 | ||
@@ -524,8 +523,7 @@ retry: | |||
524 | ERST_RECORD_ID_CACHE_SIZE_MAX); | 523 | ERST_RECORD_ID_CACHE_SIZE_MAX); |
525 | if (new_size <= erst_record_id_cache.size) { | 524 | if (new_size <= erst_record_id_cache.size) { |
526 | if (printk_ratelimit()) | 525 | if (printk_ratelimit()) |
527 | pr_warning(FW_WARN ERST_PFX | 526 | pr_warn(FW_WARN "too many record IDs!\n"); |
528 | "too many record ID!\n"); | ||
529 | return 0; | 527 | return 0; |
530 | } | 528 | } |
531 | alloc_size = new_size * sizeof(entries[0]); | 529 | alloc_size = new_size * sizeof(entries[0]); |
@@ -761,8 +759,7 @@ static int __erst_clear_from_storage(u64 record_id) | |||
761 | static void pr_unimpl_nvram(void) | 759 | static void pr_unimpl_nvram(void) |
762 | { | 760 | { |
763 | if (printk_ratelimit()) | 761 | if (printk_ratelimit()) |
764 | pr_warning(ERST_PFX | 762 | pr_warn("NVRAM ERST Log Address Range not implemented yet.\n"); |
765 | "NVRAM ERST Log Address Range is not implemented yet\n"); | ||
766 | } | 763 | } |
767 | 764 | ||
768 | static int __erst_write_to_nvram(const struct cper_record_header *record) | 765 | static int __erst_write_to_nvram(const struct cper_record_header *record) |
@@ -1133,7 +1130,7 @@ static int __init erst_init(void) | |||
1133 | goto err; | 1130 | goto err; |
1134 | 1131 | ||
1135 | if (erst_disable) { | 1132 | if (erst_disable) { |
1136 | pr_info(ERST_PFX | 1133 | pr_info( |
1137 | "Error Record Serialization Table (ERST) support is disabled.\n"); | 1134 | "Error Record Serialization Table (ERST) support is disabled.\n"); |
1138 | goto err; | 1135 | goto err; |
1139 | } | 1136 | } |
@@ -1144,14 +1141,14 @@ static int __init erst_init(void) | |||
1144 | goto err; | 1141 | goto err; |
1145 | else if (ACPI_FAILURE(status)) { | 1142 | else if (ACPI_FAILURE(status)) { |
1146 | const char *msg = acpi_format_exception(status); | 1143 | const char *msg = acpi_format_exception(status); |
1147 | pr_err(ERST_PFX "Failed to get table, %s\n", msg); | 1144 | pr_err("Failed to get table, %s\n", msg); |
1148 | rc = -EINVAL; | 1145 | rc = -EINVAL; |
1149 | goto err; | 1146 | goto err; |
1150 | } | 1147 | } |
1151 | 1148 | ||
1152 | rc = erst_check_table(erst_tab); | 1149 | rc = erst_check_table(erst_tab); |
1153 | if (rc) { | 1150 | if (rc) { |
1154 | pr_err(FW_BUG ERST_PFX "ERST table is invalid\n"); | 1151 | pr_err(FW_BUG "ERST table is invalid.\n"); |
1155 | goto err; | 1152 | goto err; |
1156 | } | 1153 | } |
1157 | 1154 | ||
@@ -1169,21 +1166,19 @@ static int __init erst_init(void) | |||
1169 | rc = erst_get_erange(&erst_erange); | 1166 | rc = erst_get_erange(&erst_erange); |
1170 | if (rc) { | 1167 | if (rc) { |
1171 | if (rc == -ENODEV) | 1168 | if (rc == -ENODEV) |
1172 | pr_info(ERST_PFX | 1169 | pr_info( |
1173 | "The corresponding hardware device or firmware implementation " | 1170 | "The corresponding hardware device or firmware implementation " |
1174 | "is not available.\n"); | 1171 | "is not available.\n"); |
1175 | else | 1172 | else |
1176 | pr_err(ERST_PFX | 1173 | pr_err("Failed to get Error Log Address Range.\n"); |
1177 | "Failed to get Error Log Address Range.\n"); | ||
1178 | goto err_unmap_reg; | 1174 | goto err_unmap_reg; |
1179 | } | 1175 | } |
1180 | 1176 | ||
1181 | r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST"); | 1177 | r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST"); |
1182 | if (!r) { | 1178 | if (!r) { |
1183 | pr_err(ERST_PFX | 1179 | pr_err("Can not request [mem %#010llx-%#010llx] for ERST.\n", |
1184 | "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n", | 1180 | (unsigned long long)erst_erange.base, |
1185 | (unsigned long long)erst_erange.base, | 1181 | (unsigned long long)erst_erange.base + erst_erange.size - 1); |
1186 | (unsigned long long)erst_erange.base + erst_erange.size); | ||
1187 | rc = -EIO; | 1182 | rc = -EIO; |
1188 | goto err_unmap_reg; | 1183 | goto err_unmap_reg; |
1189 | } | 1184 | } |
@@ -1193,7 +1188,7 @@ static int __init erst_init(void) | |||
1193 | if (!erst_erange.vaddr) | 1188 | if (!erst_erange.vaddr) |
1194 | goto err_release_erange; | 1189 | goto err_release_erange; |
1195 | 1190 | ||
1196 | pr_info(ERST_PFX | 1191 | pr_info( |
1197 | "Error Record Serialization Table (ERST) support is initialized.\n"); | 1192 | "Error Record Serialization Table (ERST) support is initialized.\n"); |
1198 | 1193 | ||
1199 | buf = kmalloc(erst_erange.size, GFP_KERNEL); | 1194 | buf = kmalloc(erst_erange.size, GFP_KERNEL); |
@@ -1205,15 +1200,15 @@ static int __init erst_init(void) | |||
1205 | rc = pstore_register(&erst_info); | 1200 | rc = pstore_register(&erst_info); |
1206 | if (rc) { | 1201 | if (rc) { |
1207 | if (rc != -EPERM) | 1202 | if (rc != -EPERM) |
1208 | pr_info(ERST_PFX | 1203 | pr_info( |
1209 | "Could not register with persistent store\n"); | 1204 | "Could not register with persistent store.\n"); |
1210 | erst_info.buf = NULL; | 1205 | erst_info.buf = NULL; |
1211 | erst_info.bufsize = 0; | 1206 | erst_info.bufsize = 0; |
1212 | kfree(buf); | 1207 | kfree(buf); |
1213 | } | 1208 | } |
1214 | } else | 1209 | } else |
1215 | pr_err(ERST_PFX | 1210 | pr_err( |
1216 | "Failed to allocate %lld bytes for persistent store error log\n", | 1211 | "Failed to allocate %lld bytes for persistent store error log.\n", |
1217 | erst_erange.size); | 1212 | erst_erange.size); |
1218 | 1213 | ||
1219 | return 0; | 1214 | return 0; |
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index ec9b57d428a1..8ec37bbdd699 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -409,6 +409,34 @@ static void ghes_clear_estatus(struct ghes *ghes) | |||
409 | ghes->flags &= ~GHES_TO_CLEAR; | 409 | ghes->flags &= ~GHES_TO_CLEAR; |
410 | } | 410 | } |
411 | 411 | ||
412 | static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev) | ||
413 | { | ||
414 | #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE | ||
415 | unsigned long pfn; | ||
416 | int sec_sev = ghes_severity(gdata->error_severity); | ||
417 | struct cper_sec_mem_err *mem_err; | ||
418 | mem_err = (struct cper_sec_mem_err *)(gdata + 1); | ||
419 | |||
420 | if (sec_sev == GHES_SEV_CORRECTED && | ||
421 | (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) && | ||
422 | (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)) { | ||
423 | pfn = mem_err->physical_addr >> PAGE_SHIFT; | ||
424 | if (pfn_valid(pfn)) | ||
425 | memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE); | ||
426 | else if (printk_ratelimit()) | ||
427 | pr_warn(FW_WARN GHES_PFX | ||
428 | "Invalid address in generic error data: %#llx\n", | ||
429 | mem_err->physical_addr); | ||
430 | } | ||
431 | if (sev == GHES_SEV_RECOVERABLE && | ||
432 | sec_sev == GHES_SEV_RECOVERABLE && | ||
433 | mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) { | ||
434 | pfn = mem_err->physical_addr >> PAGE_SHIFT; | ||
435 | memory_failure_queue(pfn, 0, 0); | ||
436 | } | ||
437 | #endif | ||
438 | } | ||
439 | |||
412 | static void ghes_do_proc(struct ghes *ghes, | 440 | static void ghes_do_proc(struct ghes *ghes, |
413 | const struct acpi_hest_generic_status *estatus) | 441 | const struct acpi_hest_generic_status *estatus) |
414 | { | 442 | { |
@@ -428,15 +456,7 @@ static void ghes_do_proc(struct ghes *ghes, | |||
428 | apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, | 456 | apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, |
429 | mem_err); | 457 | mem_err); |
430 | #endif | 458 | #endif |
431 | #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE | 459 | ghes_handle_memory_failure(gdata, sev); |
432 | if (sev == GHES_SEV_RECOVERABLE && | ||
433 | sec_sev == GHES_SEV_RECOVERABLE && | ||
434 | mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) { | ||
435 | unsigned long pfn; | ||
436 | pfn = mem_err->physical_addr >> PAGE_SHIFT; | ||
437 | memory_failure_queue(pfn, 0, 0); | ||
438 | } | ||
439 | #endif | ||
440 | } | 460 | } |
441 | #ifdef CONFIG_ACPI_APEI_PCIEAER | 461 | #ifdef CONFIG_ACPI_APEI_PCIEAER |
442 | else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, | 462 | else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, |
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index f5ef5d54e4ac..f5e37f32c71f 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/io.h> | 36 | #include <linux/io.h> |
37 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
38 | #include <acpi/apei.h> | 38 | #include <acpi/apei.h> |
39 | #include <asm/mce.h> | ||
39 | 40 | ||
40 | #include "apei-internal.h" | 41 | #include "apei-internal.h" |
41 | 42 | ||
@@ -121,6 +122,41 @@ int apei_hest_parse(apei_hest_func_t func, void *data) | |||
121 | } | 122 | } |
122 | EXPORT_SYMBOL_GPL(apei_hest_parse); | 123 | EXPORT_SYMBOL_GPL(apei_hest_parse); |
123 | 124 | ||
125 | /* | ||
126 | * Check if firmware advertises firmware first mode. We need FF bit to be set | ||
127 | * along with a set of MC banks which work in FF mode. | ||
128 | */ | ||
129 | static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data) | ||
130 | { | ||
131 | #ifdef CONFIG_X86_MCE | ||
132 | int i; | ||
133 | struct acpi_hest_ia_corrected *cmc; | ||
134 | struct acpi_hest_ia_error_bank *mc_bank; | ||
135 | |||
136 | if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) | ||
137 | return 0; | ||
138 | |||
139 | cmc = (struct acpi_hest_ia_corrected *)hest_hdr; | ||
140 | if (!cmc->enabled) | ||
141 | return 0; | ||
142 | |||
143 | /* | ||
144 | * We expect HEST to provide a list of MC banks that report errors | ||
145 | * in firmware first mode. Otherwise, return non-zero value to | ||
146 | * indicate that we are done parsing HEST. | ||
147 | */ | ||
148 | if (!(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) || !cmc->num_hardware_banks) | ||
149 | return 1; | ||
150 | |||
151 | pr_info(HEST_PFX "Enabling Firmware First mode for corrected errors.\n"); | ||
152 | |||
153 | mc_bank = (struct acpi_hest_ia_error_bank *)(cmc + 1); | ||
154 | for (i = 0; i < cmc->num_hardware_banks; i++, mc_bank++) | ||
155 | mce_disable_bank(mc_bank->bank_number); | ||
156 | #endif | ||
157 | return 1; | ||
158 | } | ||
159 | |||
124 | struct ghes_arr { | 160 | struct ghes_arr { |
125 | struct platform_device **ghes_devs; | 161 | struct platform_device **ghes_devs; |
126 | unsigned int count; | 162 | unsigned int count; |
@@ -227,6 +263,9 @@ void __init acpi_hest_init(void) | |||
227 | goto err; | 263 | goto err; |
228 | } | 264 | } |
229 | 265 | ||
266 | if (!acpi_disable_cmcff) | ||
267 | apei_hest_parse(hest_parse_cmc, NULL); | ||
268 | |||
230 | if (!ghes_disable) { | 269 | if (!ghes_disable) { |
231 | rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); | 270 | rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); |
232 | if (rc) | 271 | if (rc) |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 8b6a0343c220..3c9e4e98c651 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -123,7 +123,7 @@ static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) | |||
123 | u32 reg = 0; | 123 | u32 reg = 0; |
124 | 124 | ||
125 | amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); | 125 | amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); |
126 | reg &= 0xfffffffe; | 126 | reg &= (pvt->model >= 0x30) ? ~3 : ~1; |
127 | reg |= dct; | 127 | reg |= dct; |
128 | amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); | 128 | amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); |
129 | } | 129 | } |
@@ -133,8 +133,9 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, | |||
133 | { | 133 | { |
134 | u8 dct = 0; | 134 | u8 dct = 0; |
135 | 135 | ||
136 | /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */ | ||
136 | if (addr >= 0x140 && addr <= 0x1a0) { | 137 | if (addr >= 0x140 && addr <= 0x1a0) { |
137 | dct = 1; | 138 | dct = (pvt->model >= 0x30) ? 3 : 1; |
138 | addr -= 0x100; | 139 | addr -= 0x100; |
139 | } | 140 | } |
140 | 141 | ||
@@ -202,11 +203,11 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) | |||
202 | struct amd64_pvt *pvt = mci->pvt_info; | 203 | struct amd64_pvt *pvt = mci->pvt_info; |
203 | u32 min_scrubrate = 0x5; | 204 | u32 min_scrubrate = 0x5; |
204 | 205 | ||
205 | if (boot_cpu_data.x86 == 0xf) | 206 | if (pvt->fam == 0xf) |
206 | min_scrubrate = 0x0; | 207 | min_scrubrate = 0x0; |
207 | 208 | ||
208 | /* F15h Erratum #505 */ | 209 | /* Erratum #505 */ |
209 | if (boot_cpu_data.x86 == 0x15) | 210 | if (pvt->fam == 0x15 && pvt->model < 0x10) |
210 | f15h_select_dct(pvt, 0); | 211 | f15h_select_dct(pvt, 0); |
211 | 212 | ||
212 | return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); | 213 | return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); |
@@ -218,8 +219,8 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) | |||
218 | u32 scrubval = 0; | 219 | u32 scrubval = 0; |
219 | int i, retval = -EINVAL; | 220 | int i, retval = -EINVAL; |
220 | 221 | ||
221 | /* F15h Erratum #505 */ | 222 | /* Erratum #505 */ |
222 | if (boot_cpu_data.x86 == 0x15) | 223 | if (pvt->fam == 0x15 && pvt->model < 0x10) |
223 | f15h_select_dct(pvt, 0); | 224 | f15h_select_dct(pvt, 0); |
224 | 225 | ||
225 | amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); | 226 | amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); |
@@ -335,7 +336,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, | |||
335 | u64 csbase, csmask, base_bits, mask_bits; | 336 | u64 csbase, csmask, base_bits, mask_bits; |
336 | u8 addr_shift; | 337 | u8 addr_shift; |
337 | 338 | ||
338 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { | 339 | if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { |
339 | csbase = pvt->csels[dct].csbases[csrow]; | 340 | csbase = pvt->csels[dct].csbases[csrow]; |
340 | csmask = pvt->csels[dct].csmasks[csrow]; | 341 | csmask = pvt->csels[dct].csmasks[csrow]; |
341 | base_bits = GENMASK(21, 31) | GENMASK(9, 15); | 342 | base_bits = GENMASK(21, 31) | GENMASK(9, 15); |
@@ -343,10 +344,11 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, | |||
343 | addr_shift = 4; | 344 | addr_shift = 4; |
344 | 345 | ||
345 | /* | 346 | /* |
346 | * F16h needs two addr_shift values: 8 for high and 6 for low | 347 | * F16h and F15h, models 30h and later need two addr_shift values: |
347 | * (cf. F16h BKDG). | 348 | * 8 for high and 6 for low (cf. F16h BKDG). |
348 | */ | 349 | */ |
349 | } else if (boot_cpu_data.x86 == 0x16) { | 350 | } else if (pvt->fam == 0x16 || |
351 | (pvt->fam == 0x15 && pvt->model >= 0x30)) { | ||
350 | csbase = pvt->csels[dct].csbases[csrow]; | 352 | csbase = pvt->csels[dct].csbases[csrow]; |
351 | csmask = pvt->csels[dct].csmasks[csrow >> 1]; | 353 | csmask = pvt->csels[dct].csmasks[csrow >> 1]; |
352 | 354 | ||
@@ -367,7 +369,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, | |||
367 | csmask = pvt->csels[dct].csmasks[csrow >> 1]; | 369 | csmask = pvt->csels[dct].csmasks[csrow >> 1]; |
368 | addr_shift = 8; | 370 | addr_shift = 8; |
369 | 371 | ||
370 | if (boot_cpu_data.x86 == 0x15) | 372 | if (pvt->fam == 0x15) |
371 | base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); | 373 | base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); |
372 | else | 374 | else |
373 | base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); | 375 | base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); |
@@ -447,14 +449,14 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
447 | struct amd64_pvt *pvt = mci->pvt_info; | 449 | struct amd64_pvt *pvt = mci->pvt_info; |
448 | 450 | ||
449 | /* only revE and later have the DRAM Hole Address Register */ | 451 | /* only revE and later have the DRAM Hole Address Register */ |
450 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { | 452 | if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) { |
451 | edac_dbg(1, " revision %d for node %d does not support DHAR\n", | 453 | edac_dbg(1, " revision %d for node %d does not support DHAR\n", |
452 | pvt->ext_model, pvt->mc_node_id); | 454 | pvt->ext_model, pvt->mc_node_id); |
453 | return 1; | 455 | return 1; |
454 | } | 456 | } |
455 | 457 | ||
456 | /* valid for Fam10h and above */ | 458 | /* valid for Fam10h and above */ |
457 | if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { | 459 | if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) { |
458 | edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n"); | 460 | edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n"); |
459 | return 1; | 461 | return 1; |
460 | } | 462 | } |
@@ -486,10 +488,8 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
486 | *hole_base = dhar_base(pvt); | 488 | *hole_base = dhar_base(pvt); |
487 | *hole_size = (1ULL << 32) - *hole_base; | 489 | *hole_size = (1ULL << 32) - *hole_base; |
488 | 490 | ||
489 | if (boot_cpu_data.x86 > 0xf) | 491 | *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt) |
490 | *hole_offset = f10_dhar_offset(pvt); | 492 | : k8_dhar_offset(pvt); |
491 | else | ||
492 | *hole_offset = k8_dhar_offset(pvt); | ||
493 | 493 | ||
494 | edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | 494 | edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", |
495 | pvt->mc_node_id, (unsigned long)*hole_base, | 495 | pvt->mc_node_id, (unsigned long)*hole_base, |
@@ -663,7 +663,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) | |||
663 | u8 bit; | 663 | u8 bit; |
664 | unsigned long edac_cap = EDAC_FLAG_NONE; | 664 | unsigned long edac_cap = EDAC_FLAG_NONE; |
665 | 665 | ||
666 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) | 666 | bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F) |
667 | ? 19 | 667 | ? 19 |
668 | : 17; | 668 | : 17; |
669 | 669 | ||
@@ -675,7 +675,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) | |||
675 | 675 | ||
676 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); | 676 | static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); |
677 | 677 | ||
678 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) | 678 | static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) |
679 | { | 679 | { |
680 | edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); | 680 | edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); |
681 | 681 | ||
@@ -686,7 +686,7 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
686 | edac_dbg(1, " PAR/ERR parity: %s\n", | 686 | edac_dbg(1, " PAR/ERR parity: %s\n", |
687 | (dclr & BIT(8)) ? "enabled" : "disabled"); | 687 | (dclr & BIT(8)) ? "enabled" : "disabled"); |
688 | 688 | ||
689 | if (boot_cpu_data.x86 == 0x10) | 689 | if (pvt->fam == 0x10) |
690 | edac_dbg(1, " DCT 128bit mode width: %s\n", | 690 | edac_dbg(1, " DCT 128bit mode width: %s\n", |
691 | (dclr & BIT(11)) ? "128b" : "64b"); | 691 | (dclr & BIT(11)) ? "128b" : "64b"); |
692 | 692 | ||
@@ -709,21 +709,21 @@ static void dump_misc_regs(struct amd64_pvt *pvt) | |||
709 | (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", | 709 | (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", |
710 | (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); | 710 | (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); |
711 | 711 | ||
712 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | 712 | amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0); |
713 | 713 | ||
714 | edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); | 714 | edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); |
715 | 715 | ||
716 | edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n", | 716 | edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n", |
717 | pvt->dhar, dhar_base(pvt), | 717 | pvt->dhar, dhar_base(pvt), |
718 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) | 718 | (pvt->fam == 0xf) ? k8_dhar_offset(pvt) |
719 | : f10_dhar_offset(pvt)); | 719 | : f10_dhar_offset(pvt)); |
720 | 720 | ||
721 | edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); | 721 | edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); |
722 | 722 | ||
723 | amd64_debug_display_dimm_sizes(pvt, 0); | 723 | amd64_debug_display_dimm_sizes(pvt, 0); |
724 | 724 | ||
725 | /* everything below this point is Fam10h and above */ | 725 | /* everything below this point is Fam10h and above */ |
726 | if (boot_cpu_data.x86 == 0xf) | 726 | if (pvt->fam == 0xf) |
727 | return; | 727 | return; |
728 | 728 | ||
729 | amd64_debug_display_dimm_sizes(pvt, 1); | 729 | amd64_debug_display_dimm_sizes(pvt, 1); |
@@ -732,17 +732,20 @@ static void dump_misc_regs(struct amd64_pvt *pvt) | |||
732 | 732 | ||
733 | /* Only if NOT ganged does dclr1 have valid info */ | 733 | /* Only if NOT ganged does dclr1 have valid info */ |
734 | if (!dct_ganging_enabled(pvt)) | 734 | if (!dct_ganging_enabled(pvt)) |
735 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | 735 | amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1); |
736 | } | 736 | } |
737 | 737 | ||
738 | /* | 738 | /* |
739 | * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] | 739 | * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] |
740 | */ | 740 | */ |
741 | static void prep_chip_selects(struct amd64_pvt *pvt) | 741 | static void prep_chip_selects(struct amd64_pvt *pvt) |
742 | { | 742 | { |
743 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { | 743 | if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { |
744 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; | 744 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
745 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; | 745 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; |
746 | } else if (pvt->fam == 0x15 && pvt->model >= 0x30) { | ||
747 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; | ||
748 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; | ||
746 | } else { | 749 | } else { |
747 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; | 750 | pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; |
748 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; | 751 | pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; |
@@ -768,7 +771,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) | |||
768 | edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", | 771 | edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", |
769 | cs, *base0, reg0); | 772 | cs, *base0, reg0); |
770 | 773 | ||
771 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) | 774 | if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) |
772 | continue; | 775 | continue; |
773 | 776 | ||
774 | if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) | 777 | if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) |
@@ -786,7 +789,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) | |||
786 | edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", | 789 | edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", |
787 | cs, *mask0, reg0); | 790 | cs, *mask0, reg0); |
788 | 791 | ||
789 | if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) | 792 | if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) |
790 | continue; | 793 | continue; |
791 | 794 | ||
792 | if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) | 795 | if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) |
@@ -800,9 +803,9 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) | |||
800 | enum mem_type type; | 803 | enum mem_type type; |
801 | 804 | ||
802 | /* F15h supports only DDR3 */ | 805 | /* F15h supports only DDR3 */ |
803 | if (boot_cpu_data.x86 >= 0x15) | 806 | if (pvt->fam >= 0x15) |
804 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | 807 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; |
805 | else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { | 808 | else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) { |
806 | if (pvt->dchr0 & DDR3_MODE) | 809 | if (pvt->dchr0 & DDR3_MODE) |
807 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | 810 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; |
808 | else | 811 | else |
@@ -835,14 +838,13 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) | |||
835 | } | 838 | } |
836 | 839 | ||
837 | /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ | 840 | /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ |
838 | static u64 get_error_address(struct mce *m) | 841 | static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m) |
839 | { | 842 | { |
840 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
841 | u64 addr; | 843 | u64 addr; |
842 | u8 start_bit = 1; | 844 | u8 start_bit = 1; |
843 | u8 end_bit = 47; | 845 | u8 end_bit = 47; |
844 | 846 | ||
845 | if (c->x86 == 0xf) { | 847 | if (pvt->fam == 0xf) { |
846 | start_bit = 3; | 848 | start_bit = 3; |
847 | end_bit = 39; | 849 | end_bit = 39; |
848 | } | 850 | } |
@@ -852,7 +854,7 @@ static u64 get_error_address(struct mce *m) | |||
852 | /* | 854 | /* |
853 | * Erratum 637 workaround | 855 | * Erratum 637 workaround |
854 | */ | 856 | */ |
855 | if (c->x86 == 0x15) { | 857 | if (pvt->fam == 0x15) { |
856 | struct amd64_pvt *pvt; | 858 | struct amd64_pvt *pvt; |
857 | u64 cc6_base, tmp_addr; | 859 | u64 cc6_base, tmp_addr; |
858 | u32 tmp; | 860 | u32 tmp; |
@@ -916,15 +918,15 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor, | |||
916 | static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) | 918 | static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) |
917 | { | 919 | { |
918 | struct amd_northbridge *nb; | 920 | struct amd_northbridge *nb; |
919 | struct pci_dev *misc, *f1 = NULL; | 921 | struct pci_dev *f1 = NULL; |
920 | struct cpuinfo_x86 *c = &boot_cpu_data; | 922 | unsigned int pci_func; |
921 | int off = range << 3; | 923 | int off = range << 3; |
922 | u32 llim; | 924 | u32 llim; |
923 | 925 | ||
924 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); | 926 | amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); |
925 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); | 927 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); |
926 | 928 | ||
927 | if (c->x86 == 0xf) | 929 | if (pvt->fam == 0xf) |
928 | return; | 930 | return; |
929 | 931 | ||
930 | if (!dram_rw(pvt, range)) | 932 | if (!dram_rw(pvt, range)) |
@@ -934,15 +936,17 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) | |||
934 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); | 936 | amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); |
935 | 937 | ||
936 | /* F15h: factor in CC6 save area by reading dst node's limit reg */ | 938 | /* F15h: factor in CC6 save area by reading dst node's limit reg */ |
937 | if (c->x86 != 0x15) | 939 | if (pvt->fam != 0x15) |
938 | return; | 940 | return; |
939 | 941 | ||
940 | nb = node_to_amd_nb(dram_dst_node(pvt, range)); | 942 | nb = node_to_amd_nb(dram_dst_node(pvt, range)); |
941 | if (WARN_ON(!nb)) | 943 | if (WARN_ON(!nb)) |
942 | return; | 944 | return; |
943 | 945 | ||
944 | misc = nb->misc; | 946 | pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 |
945 | f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc); | 947 | : PCI_DEVICE_ID_AMD_15H_NB_F1; |
948 | |||
949 | f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc); | ||
946 | if (WARN_ON(!f1)) | 950 | if (WARN_ON(!f1)) |
947 | return; | 951 | return; |
948 | 952 | ||
@@ -1089,7 +1093,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt) | |||
1089 | int i, j, channels = 0; | 1093 | int i, j, channels = 0; |
1090 | 1094 | ||
1091 | /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ | 1095 | /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ |
1092 | if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) | 1096 | if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128)) |
1093 | return 2; | 1097 | return 2; |
1094 | 1098 | ||
1095 | /* | 1099 | /* |
@@ -1173,7 +1177,7 @@ static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, | |||
1173 | } | 1177 | } |
1174 | 1178 | ||
1175 | /* | 1179 | /* |
1176 | * F16h has only limited cs_modes | 1180 | * F16h and F15h model 30h have only limited cs_modes. |
1177 | */ | 1181 | */ |
1178 | static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, | 1182 | static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, |
1179 | unsigned cs_mode) | 1183 | unsigned cs_mode) |
@@ -1190,7 +1194,7 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, | |||
1190 | static void read_dram_ctl_register(struct amd64_pvt *pvt) | 1194 | static void read_dram_ctl_register(struct amd64_pvt *pvt) |
1191 | { | 1195 | { |
1192 | 1196 | ||
1193 | if (boot_cpu_data.x86 == 0xf) | 1197 | if (pvt->fam == 0xf) |
1194 | return; | 1198 | return; |
1195 | 1199 | ||
1196 | if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { | 1200 | if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { |
@@ -1218,6 +1222,29 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) | |||
1218 | } | 1222 | } |
1219 | 1223 | ||
1220 | /* | 1224 | /* |
1225 | * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG, | ||
1226 | * 2.10.12 Memory Interleaving Modes). | ||
1227 | */ | ||
1228 | static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, | ||
1229 | u8 intlv_en, int num_dcts_intlv, | ||
1230 | u32 dct_sel) | ||
1231 | { | ||
1232 | u8 channel = 0; | ||
1233 | u8 select; | ||
1234 | |||
1235 | if (!(intlv_en)) | ||
1236 | return (u8)(dct_sel); | ||
1237 | |||
1238 | if (num_dcts_intlv == 2) { | ||
1239 | select = (sys_addr >> 8) & 0x3; | ||
1240 | channel = select ? 0x3 : 0; | ||
1241 | } else if (num_dcts_intlv == 4) | ||
1242 | channel = (sys_addr >> 8) & 0x7; | ||
1243 | |||
1244 | return channel; | ||
1245 | } | ||
1246 | |||
1247 | /* | ||
1221 | * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory | 1248 | * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory |
1222 | * Interleaving Modes. | 1249 | * Interleaving Modes. |
1223 | */ | 1250 | */ |
@@ -1366,6 +1393,10 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct) | |||
1366 | (in_addr & cs_mask), (cs_base & cs_mask)); | 1393 | (in_addr & cs_mask), (cs_base & cs_mask)); |
1367 | 1394 | ||
1368 | if ((in_addr & cs_mask) == (cs_base & cs_mask)) { | 1395 | if ((in_addr & cs_mask) == (cs_base & cs_mask)) { |
1396 | if (pvt->fam == 0x15 && pvt->model >= 0x30) { | ||
1397 | cs_found = csrow; | ||
1398 | break; | ||
1399 | } | ||
1369 | cs_found = f10_process_possible_spare(pvt, dct, csrow); | 1400 | cs_found = f10_process_possible_spare(pvt, dct, csrow); |
1370 | 1401 | ||
1371 | edac_dbg(1, " MATCH csrow=%d\n", cs_found); | 1402 | edac_dbg(1, " MATCH csrow=%d\n", cs_found); |
@@ -1384,11 +1415,9 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) | |||
1384 | { | 1415 | { |
1385 | u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; | 1416 | u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; |
1386 | 1417 | ||
1387 | if (boot_cpu_data.x86 == 0x10) { | 1418 | if (pvt->fam == 0x10) { |
1388 | /* only revC3 and revE have that feature */ | 1419 | /* only revC3 and revE have that feature */ |
1389 | if (boot_cpu_data.x86_model < 4 || | 1420 | if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3)) |
1390 | (boot_cpu_data.x86_model < 0xa && | ||
1391 | boot_cpu_data.x86_mask < 3)) | ||
1392 | return sys_addr; | 1421 | return sys_addr; |
1393 | } | 1422 | } |
1394 | 1423 | ||
@@ -1492,20 +1521,143 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, | |||
1492 | return cs_found; | 1521 | return cs_found; |
1493 | } | 1522 | } |
1494 | 1523 | ||
1495 | static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | 1524 | static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range, |
1496 | int *chan_sel) | 1525 | u64 sys_addr, int *chan_sel) |
1526 | { | ||
1527 | int cs_found = -EINVAL; | ||
1528 | int num_dcts_intlv = 0; | ||
1529 | u64 chan_addr, chan_offset; | ||
1530 | u64 dct_base, dct_limit; | ||
1531 | u32 dct_cont_base_reg, dct_cont_limit_reg, tmp; | ||
1532 | u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en; | ||
1533 | |||
1534 | u64 dhar_offset = f10_dhar_offset(pvt); | ||
1535 | u8 intlv_addr = dct_sel_interleave_addr(pvt); | ||
1536 | u8 node_id = dram_dst_node(pvt, range); | ||
1537 | u8 intlv_en = dram_intlv_en(pvt, range); | ||
1538 | |||
1539 | amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg); | ||
1540 | amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg); | ||
1541 | |||
1542 | dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0)); | ||
1543 | dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7); | ||
1544 | |||
1545 | edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", | ||
1546 | range, sys_addr, get_dram_limit(pvt, range)); | ||
1547 | |||
1548 | if (!(get_dram_base(pvt, range) <= sys_addr) && | ||
1549 | !(get_dram_limit(pvt, range) >= sys_addr)) | ||
1550 | return -EINVAL; | ||
1551 | |||
1552 | if (dhar_valid(pvt) && | ||
1553 | dhar_base(pvt) <= sys_addr && | ||
1554 | sys_addr < BIT_64(32)) { | ||
1555 | amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", | ||
1556 | sys_addr); | ||
1557 | return -EINVAL; | ||
1558 | } | ||
1559 | |||
1560 | /* Verify sys_addr is within DCT Range. */ | ||
1561 | dct_base = (u64) dct_sel_baseaddr(pvt); | ||
1562 | dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF; | ||
1563 | |||
1564 | if (!(dct_cont_base_reg & BIT(0)) && | ||
1565 | !(dct_base <= (sys_addr >> 27) && | ||
1566 | dct_limit >= (sys_addr >> 27))) | ||
1567 | return -EINVAL; | ||
1568 | |||
1569 | /* Verify number of dct's that participate in channel interleaving. */ | ||
1570 | num_dcts_intlv = (int) hweight8(intlv_en); | ||
1571 | |||
1572 | if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4)) | ||
1573 | return -EINVAL; | ||
1574 | |||
1575 | channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en, | ||
1576 | num_dcts_intlv, dct_sel); | ||
1577 | |||
1578 | /* Verify we stay within the MAX number of channels allowed */ | ||
1579 | if (channel > 4 || channel < 0) | ||
1580 | return -EINVAL; | ||
1581 | |||
1582 | leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0)); | ||
1583 | |||
1584 | /* Get normalized DCT addr */ | ||
1585 | if (leg_mmio_hole && (sys_addr >= BIT_64(32))) | ||
1586 | chan_offset = dhar_offset; | ||
1587 | else | ||
1588 | chan_offset = dct_base << 27; | ||
1589 | |||
1590 | chan_addr = sys_addr - chan_offset; | ||
1591 | |||
1592 | /* remove channel interleave */ | ||
1593 | if (num_dcts_intlv == 2) { | ||
1594 | if (intlv_addr == 0x4) | ||
1595 | chan_addr = ((chan_addr >> 9) << 8) | | ||
1596 | (chan_addr & 0xff); | ||
1597 | else if (intlv_addr == 0x5) | ||
1598 | chan_addr = ((chan_addr >> 10) << 9) | | ||
1599 | (chan_addr & 0x1ff); | ||
1600 | else | ||
1601 | return -EINVAL; | ||
1602 | |||
1603 | } else if (num_dcts_intlv == 4) { | ||
1604 | if (intlv_addr == 0x4) | ||
1605 | chan_addr = ((chan_addr >> 10) << 8) | | ||
1606 | (chan_addr & 0xff); | ||
1607 | else if (intlv_addr == 0x5) | ||
1608 | chan_addr = ((chan_addr >> 11) << 9) | | ||
1609 | (chan_addr & 0x1ff); | ||
1610 | else | ||
1611 | return -EINVAL; | ||
1612 | } | ||
1613 | |||
1614 | if (dct_offset_en) { | ||
1615 | amd64_read_pci_cfg(pvt->F1, | ||
1616 | DRAM_CONT_HIGH_OFF + (int) channel * 4, | ||
1617 | &tmp); | ||
1618 | chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27; | ||
1619 | } | ||
1620 | |||
1621 | f15h_select_dct(pvt, channel); | ||
1622 | |||
1623 | edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr); | ||
1624 | |||
1625 | /* | ||
1626 | * Find Chip select: | ||
1627 | * if channel = 3, then alias it to 1. This is because, in F15 M30h, | ||
1628 | * there is support for 4 DCT's, but only 2 are currently functional. | ||
1629 | * They are DCT0 and DCT3. But we have read all registers of DCT3 into | ||
1630 | * pvt->csels[1]. So we need to use '1' here to get correct info. | ||
1631 | * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications. | ||
1632 | */ | ||
1633 | alias_channel = (channel == 3) ? 1 : channel; | ||
1634 | |||
1635 | cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel); | ||
1636 | |||
1637 | if (cs_found >= 0) | ||
1638 | *chan_sel = alias_channel; | ||
1639 | |||
1640 | return cs_found; | ||
1641 | } | ||
1642 | |||
1643 | static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, | ||
1644 | u64 sys_addr, | ||
1645 | int *chan_sel) | ||
1497 | { | 1646 | { |
1498 | int cs_found = -EINVAL; | 1647 | int cs_found = -EINVAL; |
1499 | unsigned range; | 1648 | unsigned range; |
1500 | 1649 | ||
1501 | for (range = 0; range < DRAM_RANGES; range++) { | 1650 | for (range = 0; range < DRAM_RANGES; range++) { |
1502 | |||
1503 | if (!dram_rw(pvt, range)) | 1651 | if (!dram_rw(pvt, range)) |
1504 | continue; | 1652 | continue; |
1505 | 1653 | ||
1506 | if ((get_dram_base(pvt, range) <= sys_addr) && | 1654 | if (pvt->fam == 0x15 && pvt->model >= 0x30) |
1507 | (get_dram_limit(pvt, range) >= sys_addr)) { | 1655 | cs_found = f15_m30h_match_to_this_node(pvt, range, |
1656 | sys_addr, | ||
1657 | chan_sel); | ||
1508 | 1658 | ||
1659 | else if ((get_dram_base(pvt, range) <= sys_addr) && | ||
1660 | (get_dram_limit(pvt, range) >= sys_addr)) { | ||
1509 | cs_found = f1x_match_to_this_node(pvt, range, | 1661 | cs_found = f1x_match_to_this_node(pvt, range, |
1510 | sys_addr, chan_sel); | 1662 | sys_addr, chan_sel); |
1511 | if (cs_found >= 0) | 1663 | if (cs_found >= 0) |
@@ -1554,7 +1706,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) | |||
1554 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; | 1706 | u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; |
1555 | u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | 1707 | u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; |
1556 | 1708 | ||
1557 | if (boot_cpu_data.x86 == 0xf) { | 1709 | if (pvt->fam == 0xf) { |
1558 | /* K8 families < revF not supported yet */ | 1710 | /* K8 families < revF not supported yet */ |
1559 | if (pvt->ext_model < K8_REV_F) | 1711 | if (pvt->ext_model < K8_REV_F) |
1560 | return; | 1712 | return; |
@@ -1624,6 +1776,17 @@ static struct amd64_family_type amd64_family_types[] = { | |||
1624 | .read_dct_pci_cfg = f15_read_dct_pci_cfg, | 1776 | .read_dct_pci_cfg = f15_read_dct_pci_cfg, |
1625 | } | 1777 | } |
1626 | }, | 1778 | }, |
1779 | [F15_M30H_CPUS] = { | ||
1780 | .ctl_name = "F15h_M30h", | ||
1781 | .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1, | ||
1782 | .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3, | ||
1783 | .ops = { | ||
1784 | .early_channel_count = f1x_early_channel_count, | ||
1785 | .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, | ||
1786 | .dbam_to_cs = f16_dbam_to_chip_select, | ||
1787 | .read_dct_pci_cfg = f15_read_dct_pci_cfg, | ||
1788 | } | ||
1789 | }, | ||
1627 | [F16_CPUS] = { | 1790 | [F16_CPUS] = { |
1628 | .ctl_name = "F16h", | 1791 | .ctl_name = "F16h", |
1629 | .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, | 1792 | .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, |
@@ -1860,7 +2023,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | |||
1860 | 2023 | ||
1861 | memset(&err, 0, sizeof(err)); | 2024 | memset(&err, 0, sizeof(err)); |
1862 | 2025 | ||
1863 | sys_addr = get_error_address(m); | 2026 | sys_addr = get_error_address(pvt, m); |
1864 | 2027 | ||
1865 | if (ecc_type == 2) | 2028 | if (ecc_type == 2) |
1866 | err.syndrome = extract_syndrome(m->status); | 2029 | err.syndrome = extract_syndrome(m->status); |
@@ -1921,10 +2084,9 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt) | |||
1921 | */ | 2084 | */ |
1922 | static void read_mc_regs(struct amd64_pvt *pvt) | 2085 | static void read_mc_regs(struct amd64_pvt *pvt) |
1923 | { | 2086 | { |
1924 | struct cpuinfo_x86 *c = &boot_cpu_data; | 2087 | unsigned range; |
1925 | u64 msr_val; | 2088 | u64 msr_val; |
1926 | u32 tmp; | 2089 | u32 tmp; |
1927 | unsigned range; | ||
1928 | 2090 | ||
1929 | /* | 2091 | /* |
1930 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | 2092 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since |
@@ -1985,14 +2147,14 @@ static void read_mc_regs(struct amd64_pvt *pvt) | |||
1985 | 2147 | ||
1986 | pvt->ecc_sym_sz = 4; | 2148 | pvt->ecc_sym_sz = 4; |
1987 | 2149 | ||
1988 | if (c->x86 >= 0x10) { | 2150 | if (pvt->fam >= 0x10) { |
1989 | amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); | 2151 | amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); |
1990 | if (c->x86 != 0x16) | 2152 | if (pvt->fam != 0x16) |
1991 | /* F16h has only DCT0 */ | 2153 | /* F16h has only DCT0 */ |
1992 | amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); | 2154 | amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); |
1993 | 2155 | ||
1994 | /* F10h, revD and later can do x8 ECC too */ | 2156 | /* F10h, revD and later can do x8 ECC too */ |
1995 | if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) | 2157 | if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) |
1996 | pvt->ecc_sym_sz = 8; | 2158 | pvt->ecc_sym_sz = 8; |
1997 | } | 2159 | } |
1998 | dump_misc_regs(pvt); | 2160 | dump_misc_regs(pvt); |
@@ -2086,7 +2248,7 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2086 | bool row_dct0 = !!csrow_enabled(i, 0, pvt); | 2248 | bool row_dct0 = !!csrow_enabled(i, 0, pvt); |
2087 | bool row_dct1 = false; | 2249 | bool row_dct1 = false; |
2088 | 2250 | ||
2089 | if (boot_cpu_data.x86 != 0xf) | 2251 | if (pvt->fam != 0xf) |
2090 | row_dct1 = !!csrow_enabled(i, 1, pvt); | 2252 | row_dct1 = !!csrow_enabled(i, 1, pvt); |
2091 | 2253 | ||
2092 | if (!row_dct0 && !row_dct1) | 2254 | if (!row_dct0 && !row_dct1) |
@@ -2104,7 +2266,7 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2104 | } | 2266 | } |
2105 | 2267 | ||
2106 | /* K8 has only one DCT */ | 2268 | /* K8 has only one DCT */ |
2107 | if (boot_cpu_data.x86 != 0xf && row_dct1) { | 2269 | if (pvt->fam != 0xf && row_dct1) { |
2108 | int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); | 2270 | int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); |
2109 | 2271 | ||
2110 | csrow->channels[1]->dimm->nr_pages = row_dct1_pages; | 2272 | csrow->channels[1]->dimm->nr_pages = row_dct1_pages; |
@@ -2333,13 +2495,14 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid) | |||
2333 | 2495 | ||
2334 | static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) | 2496 | static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) |
2335 | { | 2497 | { |
2498 | struct amd64_pvt *pvt = mci->pvt_info; | ||
2336 | int rc; | 2499 | int rc; |
2337 | 2500 | ||
2338 | rc = amd64_create_sysfs_dbg_files(mci); | 2501 | rc = amd64_create_sysfs_dbg_files(mci); |
2339 | if (rc < 0) | 2502 | if (rc < 0) |
2340 | return rc; | 2503 | return rc; |
2341 | 2504 | ||
2342 | if (boot_cpu_data.x86 >= 0x10) { | 2505 | if (pvt->fam >= 0x10) { |
2343 | rc = amd64_create_sysfs_inject_files(mci); | 2506 | rc = amd64_create_sysfs_inject_files(mci); |
2344 | if (rc < 0) | 2507 | if (rc < 0) |
2345 | return rc; | 2508 | return rc; |
@@ -2350,9 +2513,11 @@ static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) | |||
2350 | 2513 | ||
2351 | static void del_mc_sysfs_attrs(struct mem_ctl_info *mci) | 2514 | static void del_mc_sysfs_attrs(struct mem_ctl_info *mci) |
2352 | { | 2515 | { |
2516 | struct amd64_pvt *pvt = mci->pvt_info; | ||
2517 | |||
2353 | amd64_remove_sysfs_dbg_files(mci); | 2518 | amd64_remove_sysfs_dbg_files(mci); |
2354 | 2519 | ||
2355 | if (boot_cpu_data.x86 >= 0x10) | 2520 | if (pvt->fam >= 0x10) |
2356 | amd64_remove_sysfs_inject_files(mci); | 2521 | amd64_remove_sysfs_inject_files(mci); |
2357 | } | 2522 | } |
2358 | 2523 | ||
@@ -2387,10 +2552,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, | |||
2387 | */ | 2552 | */ |
2388 | static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) | 2553 | static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) |
2389 | { | 2554 | { |
2390 | u8 fam = boot_cpu_data.x86; | ||
2391 | struct amd64_family_type *fam_type = NULL; | 2555 | struct amd64_family_type *fam_type = NULL; |
2392 | 2556 | ||
2393 | switch (fam) { | 2557 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
2558 | pvt->stepping = boot_cpu_data.x86_mask; | ||
2559 | pvt->model = boot_cpu_data.x86_model; | ||
2560 | pvt->fam = boot_cpu_data.x86; | ||
2561 | |||
2562 | switch (pvt->fam) { | ||
2394 | case 0xf: | 2563 | case 0xf: |
2395 | fam_type = &amd64_family_types[K8_CPUS]; | 2564 | fam_type = &amd64_family_types[K8_CPUS]; |
2396 | pvt->ops = &amd64_family_types[K8_CPUS].ops; | 2565 | pvt->ops = &amd64_family_types[K8_CPUS].ops; |
@@ -2402,6 +2571,12 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) | |||
2402 | break; | 2571 | break; |
2403 | 2572 | ||
2404 | case 0x15: | 2573 | case 0x15: |
2574 | if (pvt->model == 0x30) { | ||
2575 | fam_type = &amd64_family_types[F15_M30H_CPUS]; | ||
2576 | pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops; | ||
2577 | break; | ||
2578 | } | ||
2579 | |||
2405 | fam_type = &amd64_family_types[F15_CPUS]; | 2580 | fam_type = &amd64_family_types[F15_CPUS]; |
2406 | pvt->ops = &amd64_family_types[F15_CPUS].ops; | 2581 | pvt->ops = &amd64_family_types[F15_CPUS].ops; |
2407 | break; | 2582 | break; |
@@ -2416,10 +2591,8 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) | |||
2416 | return NULL; | 2591 | return NULL; |
2417 | } | 2592 | } |
2418 | 2593 | ||
2419 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | ||
2420 | |||
2421 | amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, | 2594 | amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, |
2422 | (fam == 0xf ? | 2595 | (pvt->fam == 0xf ? |
2423 | (pvt->ext_model >= K8_REV_F ? "revF or later " | 2596 | (pvt->ext_model >= K8_REV_F ? "revF or later " |
2424 | : "revE or earlier ") | 2597 | : "revE or earlier ") |
2425 | : ""), pvt->mc_node_id); | 2598 | : ""), pvt->mc_node_id); |
@@ -2470,8 +2643,15 @@ static int amd64_init_one_instance(struct pci_dev *F2) | |||
2470 | layers[0].size = pvt->csels[0].b_cnt; | 2643 | layers[0].size = pvt->csels[0].b_cnt; |
2471 | layers[0].is_virt_csrow = true; | 2644 | layers[0].is_virt_csrow = true; |
2472 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | 2645 | layers[1].type = EDAC_MC_LAYER_CHANNEL; |
2473 | layers[1].size = pvt->channel_count; | 2646 | |
2647 | /* | ||
2648 | * Always allocate two channels since we can have setups with DIMMs on | ||
2649 | * only one channel. Also, this simplifies handling later for the price | ||
2650 | * of a couple of KBs tops. | ||
2651 | */ | ||
2652 | layers[1].size = 2; | ||
2474 | layers[1].is_virt_csrow = false; | 2653 | layers[1].is_virt_csrow = false; |
2654 | |||
2475 | mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0); | 2655 | mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0); |
2476 | if (!mci) | 2656 | if (!mci) |
2477 | goto err_siblings; | 2657 | goto err_siblings; |
@@ -2579,6 +2759,8 @@ static void amd64_remove_one_instance(struct pci_dev *pdev) | |||
2579 | struct ecc_settings *s = ecc_stngs[nid]; | 2759 | struct ecc_settings *s = ecc_stngs[nid]; |
2580 | 2760 | ||
2581 | mci = find_mci_by_dev(&pdev->dev); | 2761 | mci = find_mci_by_dev(&pdev->dev); |
2762 | WARN_ON(!mci); | ||
2763 | |||
2582 | del_mc_sysfs_attrs(mci); | 2764 | del_mc_sysfs_attrs(mci); |
2583 | /* Remove from EDAC CORE tracking list */ | 2765 | /* Remove from EDAC CORE tracking list */ |
2584 | mci = edac_mc_del_mc(&pdev->dev); | 2766 | mci = edac_mc_del_mc(&pdev->dev); |
@@ -2638,6 +2820,14 @@ static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = { | |||
2638 | }, | 2820 | }, |
2639 | { | 2821 | { |
2640 | .vendor = PCI_VENDOR_ID_AMD, | 2822 | .vendor = PCI_VENDOR_ID_AMD, |
2823 | .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2, | ||
2824 | .subvendor = PCI_ANY_ID, | ||
2825 | .subdevice = PCI_ANY_ID, | ||
2826 | .class = 0, | ||
2827 | .class_mask = 0, | ||
2828 | }, | ||
2829 | { | ||
2830 | .vendor = PCI_VENDOR_ID_AMD, | ||
2641 | .device = PCI_DEVICE_ID_AMD_16H_NB_F2, | 2831 | .device = PCI_DEVICE_ID_AMD_16H_NB_F2, |
2642 | .subvendor = PCI_ANY_ID, | 2832 | .subvendor = PCI_ANY_ID, |
2643 | .subdevice = PCI_ANY_ID, | 2833 | .subdevice = PCI_ANY_ID, |
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 2c6f113bae2b..d2443cfa0698 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -170,6 +170,8 @@ | |||
170 | /* | 170 | /* |
171 | * PCI-defined configuration space registers | 171 | * PCI-defined configuration space registers |
172 | */ | 172 | */ |
173 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b | ||
174 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c | ||
173 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 | 175 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 |
174 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 | 176 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 |
175 | #define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531 | 177 | #define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531 |
@@ -181,13 +183,22 @@ | |||
181 | #define DRAM_BASE_LO 0x40 | 183 | #define DRAM_BASE_LO 0x40 |
182 | #define DRAM_LIMIT_LO 0x44 | 184 | #define DRAM_LIMIT_LO 0x44 |
183 | 185 | ||
184 | #define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7)) | 186 | /* |
187 | * F15 M30h D18F1x2[1C:00] | ||
188 | */ | ||
189 | #define DRAM_CONT_BASE 0x200 | ||
190 | #define DRAM_CONT_LIMIT 0x204 | ||
191 | |||
192 | /* | ||
193 | * F15 M30h D18F1x2[4C:40] | ||
194 | */ | ||
195 | #define DRAM_CONT_HIGH_OFF 0x240 | ||
196 | |||
185 | #define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3)) | 197 | #define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3)) |
186 | #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) | 198 | #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) |
187 | #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) | 199 | #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) |
188 | 200 | ||
189 | #define DHAR 0xf0 | 201 | #define DHAR 0xf0 |
190 | #define dhar_valid(pvt) ((pvt)->dhar & BIT(0)) | ||
191 | #define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1)) | 202 | #define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1)) |
192 | #define dhar_base(pvt) ((pvt)->dhar & 0xff000000) | 203 | #define dhar_base(pvt) ((pvt)->dhar & 0xff000000) |
193 | #define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16) | 204 | #define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16) |
@@ -234,8 +245,6 @@ | |||
234 | #define DDR3_MODE BIT(8) | 245 | #define DDR3_MODE BIT(8) |
235 | 246 | ||
236 | #define DCT_SEL_LO 0x110 | 247 | #define DCT_SEL_LO 0x110 |
237 | #define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800) | ||
238 | #define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3) | ||
239 | #define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0)) | 248 | #define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0)) |
240 | #define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2)) | 249 | #define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2)) |
241 | 250 | ||
@@ -297,6 +306,7 @@ enum amd_families { | |||
297 | K8_CPUS = 0, | 306 | K8_CPUS = 0, |
298 | F10_CPUS, | 307 | F10_CPUS, |
299 | F15_CPUS, | 308 | F15_CPUS, |
309 | F15_M30H_CPUS, | ||
300 | F16_CPUS, | 310 | F16_CPUS, |
301 | NUM_FAMILIES, | 311 | NUM_FAMILIES, |
302 | }; | 312 | }; |
@@ -337,6 +347,10 @@ struct amd64_pvt { | |||
337 | struct pci_dev *F1, *F2, *F3; | 347 | struct pci_dev *F1, *F2, *F3; |
338 | 348 | ||
339 | u16 mc_node_id; /* MC index of this MC node */ | 349 | u16 mc_node_id; /* MC index of this MC node */ |
350 | u8 fam; /* CPU family */ | ||
351 | u8 model; /* ... model */ | ||
352 | u8 stepping; /* ... stepping */ | ||
353 | |||
340 | int ext_model; /* extended model value of this node */ | 354 | int ext_model; /* extended model value of this node */ |
341 | int channel_count; | 355 | int channel_count; |
342 | 356 | ||
@@ -414,6 +428,14 @@ static inline u16 extract_syndrome(u64 status) | |||
414 | return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00); | 428 | return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00); |
415 | } | 429 | } |
416 | 430 | ||
431 | static inline u8 dct_sel_interleave_addr(struct amd64_pvt *pvt) | ||
432 | { | ||
433 | if (pvt->fam == 0x15 && pvt->model >= 0x30) | ||
434 | return (((pvt->dct_sel_hi >> 9) & 0x1) << 2) | | ||
435 | ((pvt->dct_sel_lo >> 6) & 0x3); | ||
436 | |||
437 | return ((pvt)->dct_sel_lo >> 6) & 0x3; | ||
438 | } | ||
417 | /* | 439 | /* |
418 | * per-node ECC settings descriptor | 440 | * per-node ECC settings descriptor |
419 | */ | 441 | */ |
@@ -504,3 +526,33 @@ static inline void enable_caches(void *dummy) | |||
504 | { | 526 | { |
505 | write_cr0(read_cr0() & ~X86_CR0_CD); | 527 | write_cr0(read_cr0() & ~X86_CR0_CD); |
506 | } | 528 | } |
529 | |||
530 | static inline u8 dram_intlv_en(struct amd64_pvt *pvt, unsigned int i) | ||
531 | { | ||
532 | if (pvt->fam == 0x15 && pvt->model >= 0x30) { | ||
533 | u32 tmp; | ||
534 | amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &tmp); | ||
535 | return (u8) tmp & 0xF; | ||
536 | } | ||
537 | return (u8) (pvt->ranges[i].base.lo >> 8) & 0x7; | ||
538 | } | ||
539 | |||
540 | static inline u8 dhar_valid(struct amd64_pvt *pvt) | ||
541 | { | ||
542 | if (pvt->fam == 0x15 && pvt->model >= 0x30) { | ||
543 | u32 tmp; | ||
544 | amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp); | ||
545 | return (tmp >> 1) & BIT(0); | ||
546 | } | ||
547 | return (pvt)->dhar & BIT(0); | ||
548 | } | ||
549 | |||
550 | static inline u32 dct_sel_baseaddr(struct amd64_pvt *pvt) | ||
551 | { | ||
552 | if (pvt->fam == 0x15 && pvt->model >= 0x30) { | ||
553 | u32 tmp; | ||
554 | amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp); | ||
555 | return (tmp >> 11) & 0x1FFF; | ||
556 | } | ||
557 | return (pvt)->dct_sel_lo & 0xFFFFF800; | ||
558 | } | ||
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index 7f3c57113ba1..df6575f1430d 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c | |||
@@ -789,7 +789,7 @@ static struct cpc925_dev_info cpc925_devs[] = { | |||
789 | .exit = cpc925_htlink_exit, | 789 | .exit = cpc925_htlink_exit, |
790 | .check = cpc925_htlink_check, | 790 | .check = cpc925_htlink_check, |
791 | }, | 791 | }, |
792 | {0}, /* Terminated by NULL */ | 792 | { } |
793 | }; | 793 | }; |
794 | 794 | ||
795 | /* | 795 | /* |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index e7c32c4f7837..9f7e0e609516 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -58,8 +58,10 @@ static int edac_set_poll_msec(const char *val, struct kernel_param *kp) | |||
58 | if (!val) | 58 | if (!val) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | 60 | ||
61 | ret = strict_strtol(val, 0, &l); | 61 | ret = kstrtol(val, 0, &l); |
62 | if (ret == -EINVAL || ((int)l != l)) | 62 | if (ret) |
63 | return ret; | ||
64 | if ((int)l != l) | ||
63 | return -EINVAL; | 65 | return -EINVAL; |
64 | *((int *)kp->arg) = l; | 66 | *((int *)kp->arg) = l; |
65 | 67 | ||
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index aa44c1718f50..be10a74b16ea 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c | |||
@@ -260,8 +260,7 @@ static void i3200_check(struct mem_ctl_info *mci) | |||
260 | i3200_process_error_info(mci, &info); | 260 | i3200_process_error_info(mci, &info); |
261 | } | 261 | } |
262 | 262 | ||
263 | 263 | static void __iomem *i3200_map_mchbar(struct pci_dev *pdev) | |
264 | void __iomem *i3200_map_mchbar(struct pci_dev *pdev) | ||
265 | { | 264 | { |
266 | union { | 265 | union { |
267 | u64 mchbar; | 266 | u64 mchbar; |
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index c9db24d95caa..1a4df82376ba 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c | |||
@@ -248,8 +248,7 @@ static void x38_check(struct mem_ctl_info *mci) | |||
248 | x38_process_error_info(mci, &info); | 248 | x38_process_error_info(mci, &info); |
249 | } | 249 | } |
250 | 250 | ||
251 | 251 | static void __iomem *x38_map_mchbar(struct pci_dev *pdev) | |
252 | void __iomem *x38_map_mchbar(struct pci_dev *pdev) | ||
253 | { | 252 | { |
254 | union { | 253 | union { |
255 | u64 mchbar; | 254 | u64 mchbar; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f0224608d15e..d2d59b4149d0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1798,6 +1798,7 @@ enum mf_flags { | |||
1798 | MF_COUNT_INCREASED = 1 << 0, | 1798 | MF_COUNT_INCREASED = 1 << 0, |
1799 | MF_ACTION_REQUIRED = 1 << 1, | 1799 | MF_ACTION_REQUIRED = 1 << 1, |
1800 | MF_MUST_KILL = 1 << 2, | 1800 | MF_MUST_KILL = 1 << 2, |
1801 | MF_SOFT_OFFLINE = 1 << 3, | ||
1801 | }; | 1802 | }; |
1802 | extern int memory_failure(unsigned long pfn, int trapno, int flags); | 1803 | extern int memory_failure(unsigned long pfn, int trapno, int flags); |
1803 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); | 1804 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 6dec3d6abe0b..bc95b2b391bf 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -518,6 +518,8 @@ | |||
518 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 | 518 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 |
519 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 | 519 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 |
520 | #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 | 520 | #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 |
521 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d | ||
522 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e | ||
521 | #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 | 523 | #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 |
522 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 | 524 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 |
523 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 | 525 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2c13aa7a0164..55d7c8026ab0 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1286,7 +1286,10 @@ static void memory_failure_work_func(struct work_struct *work) | |||
1286 | spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); | 1286 | spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); |
1287 | if (!gotten) | 1287 | if (!gotten) |
1288 | break; | 1288 | break; |
1289 | memory_failure(entry.pfn, entry.trapno, entry.flags); | 1289 | if (entry.flags & MF_SOFT_OFFLINE) |
1290 | soft_offline_page(pfn_to_page(entry.pfn), entry.flags); | ||
1291 | else | ||
1292 | memory_failure(entry.pfn, entry.trapno, entry.flags); | ||
1290 | } | 1293 | } |
1291 | } | 1294 | } |
1292 | 1295 | ||