diff options
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 1 | ||||
| -rw-r--r-- | drivers/edac/Kconfig | 8 | ||||
| -rw-r--r-- | drivers/edac/amd64_edac.c | 213 | ||||
| -rw-r--r-- | drivers/edac/amd64_edac.h | 48 | ||||
| -rw-r--r-- | drivers/edac/e752x_edac.c | 4 | ||||
| -rw-r--r-- | drivers/edac/edac_core.h | 17 | ||||
| -rw-r--r-- | drivers/edac/edac_mc_sysfs.c | 86 | ||||
| -rw-r--r-- | drivers/edac/edac_mce_amd.c | 16 | ||||
| -rw-r--r-- | drivers/edac/i5100_edac.c | 7 |
9 files changed, 121 insertions, 279 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 18cc42562250..1970ef911c99 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -600,6 +600,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
| 600 | */ | 600 | */ |
| 601 | if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { | 601 | if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { |
| 602 | mce_log(&m); | 602 | mce_log(&m); |
| 603 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m); | ||
| 603 | add_taint(TAINT_MACHINE_CHECK); | 604 | add_taint(TAINT_MACHINE_CHECK); |
| 604 | } | 605 | } |
| 605 | 606 | ||
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 0d2f9dbb47e4..70bb350de996 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
| @@ -39,14 +39,6 @@ config EDAC_DEBUG | |||
| 39 | there're four debug levels (x=0,1,2,3 from low to high). | 39 | there're four debug levels (x=0,1,2,3 from low to high). |
| 40 | Usually you should select 'N'. | 40 | Usually you should select 'N'. |
| 41 | 41 | ||
| 42 | config EDAC_DEBUG_VERBOSE | ||
| 43 | bool "More verbose debugging" | ||
| 44 | depends on EDAC_DEBUG | ||
| 45 | help | ||
| 46 | This option makes debugging information more verbose. | ||
| 47 | Source file name and line number where debugging message | ||
| 48 | printed will be added to debugging message. | ||
| 49 | |||
| 50 | config EDAC_DECODE_MCE | 42 | config EDAC_DECODE_MCE |
| 51 | tristate "Decode MCEs in human-readable form (only on AMD for now)" | 43 | tristate "Decode MCEs in human-readable form (only on AMD for now)" |
| 52 | depends on CPU_SUP_AMD && X86_MCE | 44 | depends on CPU_SUP_AMD && X86_MCE |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ac9f7985096d..670239ab7511 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
| @@ -160,7 +160,7 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | |||
| 160 | return 0; | 160 | return 0; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth) | 163 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) |
| 164 | { | 164 | { |
| 165 | struct amd64_pvt *pvt = mci->pvt_info; | 165 | struct amd64_pvt *pvt = mci->pvt_info; |
| 166 | u32 min_scrubrate = 0x0; | 166 | u32 min_scrubrate = 0x0; |
| @@ -178,10 +178,10 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth) | |||
| 178 | 178 | ||
| 179 | default: | 179 | default: |
| 180 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | 180 | amd64_printk(KERN_ERR, "Unsupported family!\n"); |
| 181 | break; | 181 | return -EINVAL; |
| 182 | } | 182 | } |
| 183 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth, | 183 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, bandwidth, |
| 184 | min_scrubrate); | 184 | min_scrubrate); |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | 187 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) |
| @@ -796,6 +796,11 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |||
| 796 | 796 | ||
| 797 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); | 797 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
| 798 | 798 | ||
| 799 | static u16 extract_syndrome(struct err_regs *err) | ||
| 800 | { | ||
| 801 | return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); | ||
| 802 | } | ||
| 803 | |||
| 799 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) | 804 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) |
| 800 | { | 805 | { |
| 801 | if (boot_cpu_data.x86 == 0x11) | 806 | if (boot_cpu_data.x86 == 0x11) |
| @@ -888,6 +893,9 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |||
| 888 | return; | 893 | return; |
| 889 | } | 894 | } |
| 890 | 895 | ||
| 896 | amd64_printk(KERN_INFO, "using %s syndromes.\n", | ||
| 897 | ((pvt->syn_type == 8) ? "x8" : "x4")); | ||
| 898 | |||
| 891 | /* Only if NOT ganged does dclr1 have valid info */ | 899 | /* Only if NOT ganged does dclr1 have valid info */ |
| 892 | if (!dct_ganging_enabled(pvt)) | 900 | if (!dct_ganging_enabled(pvt)) |
| 893 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | 901 | amd64_dump_dramcfg_low(pvt->dclr1, 1); |
| @@ -1101,20 +1109,17 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1101 | } | 1109 | } |
| 1102 | 1110 | ||
| 1103 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1111 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, |
| 1104 | struct err_regs *info, | 1112 | struct err_regs *err_info, u64 sys_addr) |
| 1105 | u64 sys_addr) | ||
| 1106 | { | 1113 | { |
| 1107 | struct mem_ctl_info *src_mci; | 1114 | struct mem_ctl_info *src_mci; |
| 1108 | unsigned short syndrome; | ||
| 1109 | int channel, csrow; | 1115 | int channel, csrow; |
| 1110 | u32 page, offset; | 1116 | u32 page, offset; |
| 1117 | u16 syndrome; | ||
| 1111 | 1118 | ||
| 1112 | /* Extract the syndrome parts and form a 16-bit syndrome */ | 1119 | syndrome = extract_syndrome(err_info); |
| 1113 | syndrome = HIGH_SYNDROME(info->nbsl) << 8; | ||
| 1114 | syndrome |= LOW_SYNDROME(info->nbsh); | ||
| 1115 | 1120 | ||
| 1116 | /* CHIPKILL enabled */ | 1121 | /* CHIPKILL enabled */ |
| 1117 | if (info->nbcfg & K8_NBCFG_CHIPKILL) { | 1122 | if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { |
| 1118 | channel = get_channel_from_ecc_syndrome(mci, syndrome); | 1123 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
| 1119 | if (channel < 0) { | 1124 | if (channel < 0) { |
| 1120 | /* | 1125 | /* |
| @@ -1123,8 +1128,8 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
| 1123 | * as suspect. | 1128 | * as suspect. |
| 1124 | */ | 1129 | */ |
| 1125 | amd64_mc_printk(mci, KERN_WARNING, | 1130 | amd64_mc_printk(mci, KERN_WARNING, |
| 1126 | "unknown syndrome 0x%x - possible error " | 1131 | "unknown syndrome 0x%04x - possible " |
| 1127 | "reporting race\n", syndrome); | 1132 | "error reporting race\n", syndrome); |
| 1128 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1133 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
| 1129 | return; | 1134 | return; |
| 1130 | } | 1135 | } |
| @@ -1430,7 +1435,7 @@ static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, | |||
| 1430 | u64 chan_off; | 1435 | u64 chan_off; |
| 1431 | 1436 | ||
| 1432 | if (hi_range_sel) { | 1437 | if (hi_range_sel) { |
| 1433 | if (!(dct_sel_base_addr & 0xFFFFF800) && | 1438 | if (!(dct_sel_base_addr & 0xFFFF0000) && |
| 1434 | hole_valid && (sys_addr >= 0x100000000ULL)) | 1439 | hole_valid && (sys_addr >= 0x100000000ULL)) |
| 1435 | chan_off = hole_off << 16; | 1440 | chan_off = hole_off << 16; |
| 1436 | else | 1441 | else |
| @@ -1654,13 +1659,13 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |||
| 1654 | * (MCX_ADDR). | 1659 | * (MCX_ADDR). |
| 1655 | */ | 1660 | */ |
| 1656 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1661 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, |
| 1657 | struct err_regs *info, | 1662 | struct err_regs *err_info, |
| 1658 | u64 sys_addr) | 1663 | u64 sys_addr) |
| 1659 | { | 1664 | { |
| 1660 | struct amd64_pvt *pvt = mci->pvt_info; | 1665 | struct amd64_pvt *pvt = mci->pvt_info; |
| 1661 | u32 page, offset; | 1666 | u32 page, offset; |
| 1662 | unsigned short syndrome; | ||
| 1663 | int nid, csrow, chan = 0; | 1667 | int nid, csrow, chan = 0; |
| 1668 | u16 syndrome; | ||
| 1664 | 1669 | ||
| 1665 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | 1670 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); |
| 1666 | 1671 | ||
| @@ -1671,15 +1676,14 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
| 1671 | 1676 | ||
| 1672 | error_address_to_page_and_offset(sys_addr, &page, &offset); | 1677 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
| 1673 | 1678 | ||
| 1674 | syndrome = HIGH_SYNDROME(info->nbsl) << 8; | 1679 | syndrome = extract_syndrome(err_info); |
| 1675 | syndrome |= LOW_SYNDROME(info->nbsh); | ||
| 1676 | 1680 | ||
| 1677 | /* | 1681 | /* |
| 1678 | * We need the syndromes for channel detection only when we're | 1682 | * We need the syndromes for channel detection only when we're |
| 1679 | * ganged. Otherwise @chan should already contain the channel at | 1683 | * ganged. Otherwise @chan should already contain the channel at |
| 1680 | * this point. | 1684 | * this point. |
| 1681 | */ | 1685 | */ |
| 1682 | if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL) | 1686 | if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) |
| 1683 | chan = get_channel_from_ecc_syndrome(mci, syndrome); | 1687 | chan = get_channel_from_ecc_syndrome(mci, syndrome); |
| 1684 | 1688 | ||
| 1685 | if (chan >= 0) | 1689 | if (chan >= 0) |
| @@ -1878,7 +1882,7 @@ static u16 x8_vectors[] = { | |||
| 1878 | }; | 1882 | }; |
| 1879 | 1883 | ||
| 1880 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | 1884 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, |
| 1881 | int v_dim) | 1885 | int v_dim) |
| 1882 | { | 1886 | { |
| 1883 | unsigned int i, err_sym; | 1887 | unsigned int i, err_sym; |
| 1884 | 1888 | ||
| @@ -1955,124 +1959,23 @@ static int map_err_sym_to_channel(int err_sym, int sym_size) | |||
| 1955 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | 1959 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) |
| 1956 | { | 1960 | { |
| 1957 | struct amd64_pvt *pvt = mci->pvt_info; | 1961 | struct amd64_pvt *pvt = mci->pvt_info; |
| 1958 | u32 value = 0; | 1962 | int err_sym = -1; |
| 1959 | int err_sym = 0; | 1963 | |
| 1960 | 1964 | if (pvt->syn_type == 8) | |
| 1961 | if (boot_cpu_data.x86 == 0x10) { | 1965 | err_sym = decode_syndrome(syndrome, x8_vectors, |
| 1962 | 1966 | ARRAY_SIZE(x8_vectors), | |
| 1963 | amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); | 1967 | pvt->syn_type); |
| 1964 | 1968 | else if (pvt->syn_type == 4) | |
| 1965 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ | 1969 | err_sym = decode_syndrome(syndrome, x4_vectors, |
| 1966 | if (boot_cpu_data.x86_model > 7 && | 1970 | ARRAY_SIZE(x4_vectors), |
| 1967 | value & BIT(25)) { | 1971 | pvt->syn_type); |
| 1968 | err_sym = decode_syndrome(syndrome, x8_vectors, | 1972 | else { |
| 1969 | ARRAY_SIZE(x8_vectors), 8); | 1973 | amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n", |
| 1970 | return map_err_sym_to_channel(err_sym, 8); | 1974 | __func__, pvt->syn_type); |
| 1971 | } | 1975 | return err_sym; |
| 1972 | } | 1976 | } |
| 1973 | err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), 4); | ||
| 1974 | return map_err_sym_to_channel(err_sym, 4); | ||
| 1975 | } | ||
| 1976 | |||
| 1977 | /* | ||
| 1978 | * Check for valid error in the NB Status High register. If so, proceed to read | ||
| 1979 | * NB Status Low, NB Address Low and NB Address High registers and store data | ||
| 1980 | * into error structure. | ||
| 1981 | * | ||
| 1982 | * Returns: | ||
| 1983 | * - 1: if hardware regs contains valid error info | ||
| 1984 | * - 0: if no valid error is indicated | ||
| 1985 | */ | ||
| 1986 | static int amd64_get_error_info_regs(struct mem_ctl_info *mci, | ||
| 1987 | struct err_regs *regs) | ||
| 1988 | { | ||
| 1989 | struct amd64_pvt *pvt; | ||
| 1990 | struct pci_dev *misc_f3_ctl; | ||
| 1991 | |||
| 1992 | pvt = mci->pvt_info; | ||
| 1993 | misc_f3_ctl = pvt->misc_f3_ctl; | ||
| 1994 | |||
| 1995 | if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, ®s->nbsh)) | ||
| 1996 | return 0; | ||
| 1997 | |||
| 1998 | if (!(regs->nbsh & K8_NBSH_VALID_BIT)) | ||
| 1999 | return 0; | ||
| 2000 | |||
| 2001 | /* valid error, read remaining error information registers */ | ||
| 2002 | if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, ®s->nbsl) || | ||
| 2003 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, ®s->nbeal) || | ||
| 2004 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, ®s->nbeah) || | ||
| 2005 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, ®s->nbcfg)) | ||
| 2006 | return 0; | ||
| 2007 | |||
| 2008 | return 1; | ||
| 2009 | } | ||
| 2010 | |||
| 2011 | /* | ||
| 2012 | * This function is called to retrieve the error data from hardware and store it | ||
| 2013 | * in the info structure. | ||
| 2014 | * | ||
| 2015 | * Returns: | ||
| 2016 | * - 1: if a valid error is found | ||
| 2017 | * - 0: if no error is found | ||
| 2018 | */ | ||
| 2019 | static int amd64_get_error_info(struct mem_ctl_info *mci, | ||
| 2020 | struct err_regs *info) | ||
| 2021 | { | ||
| 2022 | struct amd64_pvt *pvt; | ||
| 2023 | struct err_regs regs; | ||
| 2024 | |||
| 2025 | pvt = mci->pvt_info; | ||
| 2026 | |||
| 2027 | if (!amd64_get_error_info_regs(mci, info)) | ||
| 2028 | return 0; | ||
| 2029 | |||
| 2030 | /* | ||
| 2031 | * Here's the problem with the K8's EDAC reporting: There are four | ||
| 2032 | * registers which report pieces of error information. They are shared | ||
| 2033 | * between CEs and UEs. Furthermore, contrary to what is stated in the | ||
| 2034 | * BKDG, the overflow bit is never used! Every error always updates the | ||
| 2035 | * reporting registers. | ||
| 2036 | * | ||
| 2037 | * Can you see the race condition? All four error reporting registers | ||
| 2038 | * must be read before a new error updates them! There is no way to read | ||
| 2039 | * all four registers atomically. The best than can be done is to detect | ||
| 2040 | * that a race has occured and then report the error without any kind of | ||
| 2041 | * precision. | ||
| 2042 | * | ||
| 2043 | * What is still positive is that errors are still reported and thus | ||
| 2044 | * problems can still be detected - just not localized because the | ||
| 2045 | * syndrome and address are spread out across registers. | ||
| 2046 | * | ||
| 2047 | * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev. | ||
| 2048 | * UEs and CEs should have separate register sets with proper overflow | ||
| 2049 | * bits that are used! At very least the problem can be fixed by | ||
| 2050 | * honoring the ErrValid bit in 'nbsh' and not updating registers - just | ||
| 2051 | * set the overflow bit - unless the current error is CE and the new | ||
| 2052 | * error is UE which would be the only situation for overwriting the | ||
| 2053 | * current values. | ||
| 2054 | */ | ||
| 2055 | |||
| 2056 | regs = *info; | ||
| 2057 | |||
| 2058 | /* Use info from the second read - most current */ | ||
| 2059 | if (unlikely(!amd64_get_error_info_regs(mci, info))) | ||
| 2060 | return 0; | ||
| 2061 | 1977 | ||
| 2062 | /* clear the error bits in hardware */ | 1978 | return map_err_sym_to_channel(err_sym, pvt->syn_type); |
| 2063 | pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT); | ||
| 2064 | |||
| 2065 | /* Check for the possible race condition */ | ||
| 2066 | if ((regs.nbsh != info->nbsh) || | ||
| 2067 | (regs.nbsl != info->nbsl) || | ||
| 2068 | (regs.nbeah != info->nbeah) || | ||
| 2069 | (regs.nbeal != info->nbeal)) { | ||
| 2070 | amd64_mc_printk(mci, KERN_WARNING, | ||
| 2071 | "hardware STATUS read access race condition " | ||
| 2072 | "detected!\n"); | ||
| 2073 | return 0; | ||
| 2074 | } | ||
| 2075 | return 1; | ||
| 2076 | } | 1979 | } |
| 2077 | 1980 | ||
| 2078 | /* | 1981 | /* |
| @@ -2177,7 +2080,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, | |||
| 2177 | * catastrophic. | 2080 | * catastrophic. |
| 2178 | */ | 2081 | */ |
| 2179 | if (info->nbsh & K8_NBSH_OVERFLOW) | 2082 | if (info->nbsh & K8_NBSH_OVERFLOW) |
| 2180 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow"); | 2083 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow"); |
| 2181 | } | 2084 | } |
| 2182 | 2085 | ||
| 2183 | void amd64_decode_bus_error(int node_id, struct err_regs *regs) | 2086 | void amd64_decode_bus_error(int node_id, struct err_regs *regs) |
| @@ -2199,20 +2102,6 @@ void amd64_decode_bus_error(int node_id, struct err_regs *regs) | |||
| 2199 | } | 2102 | } |
| 2200 | 2103 | ||
| 2201 | /* | 2104 | /* |
| 2202 | * The main polling 'check' function, called FROM the edac core to perform the | ||
| 2203 | * error checking and if an error is encountered, error processing. | ||
| 2204 | */ | ||
| 2205 | static void amd64_check(struct mem_ctl_info *mci) | ||
| 2206 | { | ||
| 2207 | struct err_regs regs; | ||
| 2208 | |||
| 2209 | if (amd64_get_error_info(mci, ®s)) { | ||
| 2210 | struct amd64_pvt *pvt = mci->pvt_info; | ||
| 2211 | amd_decode_nb_mce(pvt->mc_node_id, ®s, 1); | ||
| 2212 | } | ||
| 2213 | } | ||
| 2214 | |||
| 2215 | /* | ||
| 2216 | * Input: | 2105 | * Input: |
| 2217 | * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer | 2106 | * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer |
| 2218 | * 2) AMD Family index value | 2107 | * 2) AMD Family index value |
| @@ -2284,6 +2173,7 @@ static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) | |||
| 2284 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) | 2173 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) |
| 2285 | { | 2174 | { |
| 2286 | u64 msr_val; | 2175 | u64 msr_val; |
| 2176 | u32 tmp; | ||
| 2287 | int dram; | 2177 | int dram; |
| 2288 | 2178 | ||
| 2289 | /* | 2179 | /* |
| @@ -2349,10 +2239,22 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
| 2349 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | 2239 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); |
| 2350 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); | 2240 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); |
| 2351 | 2241 | ||
| 2352 | if (!dct_ganging_enabled(pvt) && boot_cpu_data.x86 >= 0x10) { | 2242 | if (boot_cpu_data.x86 >= 0x10) { |
| 2353 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); | 2243 | if (!dct_ganging_enabled(pvt)) { |
| 2354 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); | 2244 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); |
| 2245 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); | ||
| 2246 | } | ||
| 2247 | amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp); | ||
| 2355 | } | 2248 | } |
| 2249 | |||
| 2250 | if (boot_cpu_data.x86 == 0x10 && | ||
| 2251 | boot_cpu_data.x86_model > 7 && | ||
| 2252 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ | ||
| 2253 | tmp & BIT(25)) | ||
| 2254 | pvt->syn_type = 8; | ||
| 2255 | else | ||
| 2256 | pvt->syn_type = 4; | ||
| 2257 | |||
| 2356 | amd64_dump_misc_regs(pvt); | 2258 | amd64_dump_misc_regs(pvt); |
| 2357 | } | 2259 | } |
| 2358 | 2260 | ||
| @@ -2739,9 +2641,6 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | |||
| 2739 | mci->dev_name = pci_name(pvt->dram_f2_ctl); | 2641 | mci->dev_name = pci_name(pvt->dram_f2_ctl); |
| 2740 | mci->ctl_page_to_phys = NULL; | 2642 | mci->ctl_page_to_phys = NULL; |
| 2741 | 2643 | ||
| 2742 | /* IMPORTANT: Set the polling 'check' function in this module */ | ||
| 2743 | mci->edac_check = amd64_check; | ||
| 2744 | |||
| 2745 | /* memory scrubber interface */ | 2644 | /* memory scrubber interface */ |
| 2746 | mci->set_sdram_scrub_rate = amd64_set_scrub_rate; | 2645 | mci->set_sdram_scrub_rate = amd64_set_scrub_rate; |
| 2747 | mci->get_sdram_scrub_rate = amd64_get_scrub_rate; | 2646 | mci->get_sdram_scrub_rate = amd64_get_scrub_rate; |
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 0d4bf5638243..613b9381e71a 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
| @@ -244,44 +244,17 @@ | |||
| 244 | 244 | ||
| 245 | 245 | ||
| 246 | #define F10_DCTL_SEL_LOW 0x110 | 246 | #define F10_DCTL_SEL_LOW 0x110 |
| 247 | 247 | #define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800) | |
| 248 | #define dct_sel_baseaddr(pvt) \ | 248 | #define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3) |
| 249 | ((pvt->dram_ctl_select_low) & 0xFFFFF800) | 249 | #define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0)) |
| 250 | 250 | #define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2)) | |
| 251 | #define dct_sel_interleave_addr(pvt) \ | 251 | #define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4)) |
| 252 | (((pvt->dram_ctl_select_low) >> 6) & 0x3) | 252 | #define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5)) |
| 253 | 253 | #define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8)) | |
| 254 | enum { | 254 | #define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10)) |
| 255 | F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0), | ||
| 256 | F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2), | ||
| 257 | F10_DCTL_SEL_LOW_DctGangEn = BIT(4), | ||
| 258 | F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5), | ||
| 259 | F10_DCTL_SEL_LOW_DramEnable = BIT(8), | ||
| 260 | F10_DCTL_SEL_LOW_MemCleared = BIT(10), | ||
| 261 | }; | ||
| 262 | |||
| 263 | #define dct_high_range_enabled(pvt) \ | ||
| 264 | (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn) | ||
| 265 | |||
| 266 | #define dct_interleave_enabled(pvt) \ | ||
| 267 | (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn) | ||
| 268 | |||
| 269 | #define dct_ganging_enabled(pvt) \ | ||
| 270 | (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn) | ||
| 271 | |||
| 272 | #define dct_data_intlv_enabled(pvt) \ | ||
| 273 | (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv) | ||
| 274 | |||
| 275 | #define dct_dram_enabled(pvt) \ | ||
| 276 | (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable) | ||
| 277 | |||
| 278 | #define dct_memory_cleared(pvt) \ | ||
| 279 | (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared) | ||
| 280 | |||
| 281 | 255 | ||
| 282 | #define F10_DCTL_SEL_HIGH 0x114 | 256 | #define F10_DCTL_SEL_HIGH 0x114 |
| 283 | 257 | ||
| 284 | |||
| 285 | /* | 258 | /* |
| 286 | * Function 3 - Misc Control | 259 | * Function 3 - Misc Control |
| 287 | */ | 260 | */ |
| @@ -382,6 +355,8 @@ enum { | |||
| 382 | #define K8_NBCAP_SECDED BIT(3) | 355 | #define K8_NBCAP_SECDED BIT(3) |
| 383 | #define K8_NBCAP_DCT_DUAL BIT(0) | 356 | #define K8_NBCAP_DCT_DUAL BIT(0) |
| 384 | 357 | ||
| 358 | #define EXT_NB_MCA_CFG 0x180 | ||
| 359 | |||
| 385 | /* MSRs */ | 360 | /* MSRs */ |
| 386 | #define K8_MSR_MCGCTL_NBE BIT(4) | 361 | #define K8_MSR_MCGCTL_NBE BIT(4) |
| 387 | 362 | ||
| @@ -471,6 +446,9 @@ struct amd64_pvt { | |||
| 471 | u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ | 446 | u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ |
| 472 | u32 online_spare; /* On-Line spare Reg */ | 447 | u32 online_spare; /* On-Line spare Reg */ |
| 473 | 448 | ||
| 449 | /* x4 or x8 syndromes in use */ | ||
| 450 | u8 syn_type; | ||
| 451 | |||
| 474 | /* temp storage for when input is received from sysfs */ | 452 | /* temp storage for when input is received from sysfs */ |
| 475 | struct err_regs ctl_error_info; | 453 | struct err_regs ctl_error_info; |
| 476 | 454 | ||
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index ae3f80c54198..073f5a06d238 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
| @@ -958,7 +958,7 @@ static void e752x_check(struct mem_ctl_info *mci) | |||
| 958 | } | 958 | } |
| 959 | 959 | ||
| 960 | /* Program byte/sec bandwidth scrub rate to hardware */ | 960 | /* Program byte/sec bandwidth scrub rate to hardware */ |
| 961 | static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *new_bw) | 961 | static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) |
| 962 | { | 962 | { |
| 963 | const struct scrubrate *scrubrates; | 963 | const struct scrubrate *scrubrates; |
| 964 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 964 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
| @@ -975,7 +975,7 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *new_bw) | |||
| 975 | * desired rate and program the cooresponding register value. | 975 | * desired rate and program the cooresponding register value. |
| 976 | */ | 976 | */ |
| 977 | for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) | 977 | for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) |
| 978 | if (scrubrates[i].bandwidth >= *new_bw) | 978 | if (scrubrates[i].bandwidth >= new_bw) |
| 979 | break; | 979 | break; |
| 980 | 980 | ||
| 981 | if (scrubrates[i].bandwidth == SDRATE_EOT) | 981 | if (scrubrates[i].bandwidth == SDRATE_EOT) |
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index efca9343d26a..ce7146677e9b 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
| @@ -49,21 +49,15 @@ | |||
| 49 | #define edac_printk(level, prefix, fmt, arg...) \ | 49 | #define edac_printk(level, prefix, fmt, arg...) \ |
| 50 | printk(level "EDAC " prefix ": " fmt, ##arg) | 50 | printk(level "EDAC " prefix ": " fmt, ##arg) |
| 51 | 51 | ||
| 52 | #define edac_printk_verbose(level, prefix, fmt, arg...) \ | ||
| 53 | printk(level "EDAC " prefix ": " "in %s, line at %d: " fmt, \ | ||
| 54 | __FILE__, __LINE__, ##arg) | ||
| 55 | |||
| 56 | #define edac_mc_printk(mci, level, fmt, arg...) \ | 52 | #define edac_mc_printk(mci, level, fmt, arg...) \ |
| 57 | printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) | 53 | printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) |
| 58 | 54 | ||
| 59 | #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ | 55 | #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ |
| 60 | printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) | 56 | printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) |
| 61 | 57 | ||
| 62 | /* edac_device printk */ | ||
| 63 | #define edac_device_printk(ctl, level, fmt, arg...) \ | 58 | #define edac_device_printk(ctl, level, fmt, arg...) \ |
| 64 | printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg) | 59 | printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg) |
| 65 | 60 | ||
| 66 | /* edac_pci printk */ | ||
| 67 | #define edac_pci_printk(ctl, level, fmt, arg...) \ | 61 | #define edac_pci_printk(ctl, level, fmt, arg...) \ |
| 68 | printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg) | 62 | printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg) |
| 69 | 63 | ||
| @@ -76,21 +70,12 @@ | |||
| 76 | extern int edac_debug_level; | 70 | extern int edac_debug_level; |
| 77 | extern const char *edac_mem_types[]; | 71 | extern const char *edac_mem_types[]; |
| 78 | 72 | ||
| 79 | #ifndef CONFIG_EDAC_DEBUG_VERBOSE | ||
| 80 | #define edac_debug_printk(level, fmt, arg...) \ | 73 | #define edac_debug_printk(level, fmt, arg...) \ |
| 81 | do { \ | 74 | do { \ |
| 82 | if (level <= edac_debug_level) \ | 75 | if (level <= edac_debug_level) \ |
| 83 | edac_printk(KERN_DEBUG, EDAC_DEBUG, \ | 76 | edac_printk(KERN_DEBUG, EDAC_DEBUG, \ |
| 84 | "%s: " fmt, __func__, ##arg); \ | 77 | "%s: " fmt, __func__, ##arg); \ |
| 85 | } while (0) | 78 | } while (0) |
| 86 | #else /* CONFIG_EDAC_DEBUG_VERBOSE */ | ||
| 87 | #define edac_debug_printk(level, fmt, arg...) \ | ||
| 88 | do { \ | ||
| 89 | if (level <= edac_debug_level) \ | ||
| 90 | edac_printk_verbose(KERN_DEBUG, EDAC_DEBUG, fmt, \ | ||
| 91 | ##arg); \ | ||
| 92 | } while (0) | ||
| 93 | #endif | ||
| 94 | 79 | ||
| 95 | #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) | 80 | #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) |
| 96 | #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) | 81 | #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) |
| @@ -393,7 +378,7 @@ struct mem_ctl_info { | |||
| 393 | internal representation and configures whatever else needs | 378 | internal representation and configures whatever else needs |
| 394 | to be configured. | 379 | to be configured. |
| 395 | */ | 380 | */ |
| 396 | int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); | 381 | int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); |
| 397 | 382 | ||
| 398 | /* Get the current sdram memory scrub rate from the internal | 383 | /* Get the current sdram memory scrub rate from the internal |
| 399 | representation and converts it to the closest matching | 384 | representation and converts it to the closest matching |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index c200c2fd43ea..8aad94d10c0c 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
| @@ -124,19 +124,6 @@ static const char *edac_caps[] = { | |||
| 124 | [EDAC_S16ECD16ED] = "S16ECD16ED" | 124 | [EDAC_S16ECD16ED] = "S16ECD16ED" |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | |||
| 128 | |||
| 129 | static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) | ||
| 130 | { | ||
| 131 | int *value = (int *)ptr; | ||
| 132 | |||
| 133 | if (isdigit(*buffer)) | ||
| 134 | *value = simple_strtoul(buffer, NULL, 0); | ||
| 135 | |||
| 136 | return count; | ||
| 137 | } | ||
| 138 | |||
| 139 | |||
| 140 | /* EDAC sysfs CSROW data structures and methods | 127 | /* EDAC sysfs CSROW data structures and methods |
| 141 | */ | 128 | */ |
| 142 | 129 | ||
| @@ -450,53 +437,54 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, | |||
| 450 | 437 | ||
| 451 | /* memory scrubbing */ | 438 | /* memory scrubbing */ |
| 452 | static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, | 439 | static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, |
| 453 | const char *data, size_t count) | 440 | const char *data, size_t count) |
| 454 | { | 441 | { |
| 455 | u32 bandwidth = -1; | 442 | unsigned long bandwidth = 0; |
| 443 | int err; | ||
| 456 | 444 | ||
| 457 | if (mci->set_sdram_scrub_rate) { | 445 | if (!mci->set_sdram_scrub_rate) { |
| 446 | edac_printk(KERN_WARNING, EDAC_MC, | ||
| 447 | "Memory scrub rate setting not implemented!\n"); | ||
| 448 | return -EINVAL; | ||
| 449 | } | ||
| 458 | 450 | ||
| 459 | memctrl_int_store(&bandwidth, data, count); | 451 | if (strict_strtoul(data, 10, &bandwidth) < 0) |
| 452 | return -EINVAL; | ||
| 460 | 453 | ||
| 461 | if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) { | 454 | err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth); |
| 462 | edac_printk(KERN_DEBUG, EDAC_MC, | 455 | if (err) { |
| 463 | "Scrub rate set successfully, applied: %d\n", | 456 | edac_printk(KERN_DEBUG, EDAC_MC, |
| 464 | bandwidth); | 457 | "Failed setting scrub rate to %lu\n", bandwidth); |
| 465 | } else { | 458 | return -EINVAL; |
| 466 | /* FIXME: error codes maybe? */ | 459 | } |
| 467 | edac_printk(KERN_DEBUG, EDAC_MC, | 460 | else { |
| 468 | "Scrub rate set FAILED, could not apply: %d\n", | 461 | edac_printk(KERN_DEBUG, EDAC_MC, |
| 469 | bandwidth); | 462 | "Scrub rate set to: %lu\n", bandwidth); |
| 470 | } | 463 | return count; |
| 471 | } else { | ||
| 472 | /* FIXME: produce "not implemented" ERROR for user-side. */ | ||
| 473 | edac_printk(KERN_WARNING, EDAC_MC, | ||
| 474 | "Memory scrubbing 'set'control is not implemented!\n"); | ||
| 475 | } | 464 | } |
| 476 | return count; | ||
| 477 | } | 465 | } |
| 478 | 466 | ||
| 479 | static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) | 467 | static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) |
| 480 | { | 468 | { |
| 481 | u32 bandwidth = -1; | 469 | u32 bandwidth = 0; |
| 482 | 470 | int err; | |
| 483 | if (mci->get_sdram_scrub_rate) { | 471 | |
| 484 | if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) { | 472 | if (!mci->get_sdram_scrub_rate) { |
| 485 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
| 486 | "Scrub rate successfully, fetched: %d\n", | ||
| 487 | bandwidth); | ||
| 488 | } else { | ||
| 489 | /* FIXME: error codes maybe? */ | ||
| 490 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
| 491 | "Scrub rate fetch FAILED, got: %d\n", | ||
| 492 | bandwidth); | ||
| 493 | } | ||
| 494 | } else { | ||
| 495 | /* FIXME: produce "not implemented" ERROR for user-side. */ | ||
| 496 | edac_printk(KERN_WARNING, EDAC_MC, | 473 | edac_printk(KERN_WARNING, EDAC_MC, |
| 497 | "Memory scrubbing 'get' control is not implemented\n"); | 474 | "Memory scrub rate reading not implemented\n"); |
| 475 | return -EINVAL; | ||
| 476 | } | ||
| 477 | |||
| 478 | err = mci->get_sdram_scrub_rate(mci, &bandwidth); | ||
| 479 | if (err) { | ||
| 480 | edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); | ||
| 481 | return err; | ||
| 482 | } | ||
| 483 | else { | ||
| 484 | edac_printk(KERN_DEBUG, EDAC_MC, | ||
| 485 | "Read scrub rate: %d\n", bandwidth); | ||
| 486 | return sprintf(data, "%d\n", bandwidth); | ||
| 498 | } | 487 | } |
| 499 | return sprintf(data, "%d\n", bandwidth); | ||
| 500 | } | 488 | } |
| 501 | 489 | ||
| 502 | /* default attribute files for the MCI object */ | 490 | /* default attribute files for the MCI object */ |
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index 97e64bcdbc06..bae9351e9473 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c | |||
| @@ -133,7 +133,7 @@ static void amd_decode_dc_mce(u64 mc0_status) | |||
| 133 | u32 ec = mc0_status & 0xffff; | 133 | u32 ec = mc0_status & 0xffff; |
| 134 | u32 xec = (mc0_status >> 16) & 0xf; | 134 | u32 xec = (mc0_status >> 16) & 0xf; |
| 135 | 135 | ||
| 136 | pr_emerg(" Data Cache Error"); | 136 | pr_emerg("Data Cache Error"); |
| 137 | 137 | ||
| 138 | if (xec == 1 && TLB_ERROR(ec)) | 138 | if (xec == 1 && TLB_ERROR(ec)) |
| 139 | pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); | 139 | pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); |
| @@ -176,7 +176,7 @@ static void amd_decode_ic_mce(u64 mc1_status) | |||
| 176 | u32 ec = mc1_status & 0xffff; | 176 | u32 ec = mc1_status & 0xffff; |
| 177 | u32 xec = (mc1_status >> 16) & 0xf; | 177 | u32 xec = (mc1_status >> 16) & 0xf; |
| 178 | 178 | ||
| 179 | pr_emerg(" Instruction Cache Error"); | 179 | pr_emerg("Instruction Cache Error"); |
| 180 | 180 | ||
| 181 | if (xec == 1 && TLB_ERROR(ec)) | 181 | if (xec == 1 && TLB_ERROR(ec)) |
| 182 | pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); | 182 | pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); |
| @@ -233,7 +233,7 @@ static void amd_decode_bu_mce(u64 mc2_status) | |||
| 233 | u32 ec = mc2_status & 0xffff; | 233 | u32 ec = mc2_status & 0xffff; |
| 234 | u32 xec = (mc2_status >> 16) & 0xf; | 234 | u32 xec = (mc2_status >> 16) & 0xf; |
| 235 | 235 | ||
| 236 | pr_emerg(" Bus Unit Error"); | 236 | pr_emerg("Bus Unit Error"); |
| 237 | 237 | ||
| 238 | if (xec == 0x1) | 238 | if (xec == 0x1) |
| 239 | pr_cont(" in the write data buffers.\n"); | 239 | pr_cont(" in the write data buffers.\n"); |
| @@ -275,7 +275,7 @@ static void amd_decode_ls_mce(u64 mc3_status) | |||
| 275 | u32 ec = mc3_status & 0xffff; | 275 | u32 ec = mc3_status & 0xffff; |
| 276 | u32 xec = (mc3_status >> 16) & 0xf; | 276 | u32 xec = (mc3_status >> 16) & 0xf; |
| 277 | 277 | ||
| 278 | pr_emerg(" Load Store Error"); | 278 | pr_emerg("Load Store Error"); |
| 279 | 279 | ||
| 280 | if (xec == 0x0) { | 280 | if (xec == 0x0) { |
| 281 | u8 rrrr = (ec >> 4) & 0xf; | 281 | u8 rrrr = (ec >> 4) & 0xf; |
| @@ -304,7 +304,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) | |||
| 304 | if (TLB_ERROR(ec) && !report_gart_errors) | 304 | if (TLB_ERROR(ec) && !report_gart_errors) |
| 305 | return; | 305 | return; |
| 306 | 306 | ||
| 307 | pr_emerg(" Northbridge Error, node %d", node_id); | 307 | pr_emerg("Northbridge Error, node %d", node_id); |
| 308 | 308 | ||
| 309 | /* | 309 | /* |
| 310 | * F10h, revD can disable ErrCpu[3:0] so check that first and also the | 310 | * F10h, revD can disable ErrCpu[3:0] so check that first and also the |
| @@ -342,13 +342,13 @@ static void amd_decode_fr_mce(u64 mc5_status) | |||
| 342 | static inline void amd_decode_err_code(unsigned int ec) | 342 | static inline void amd_decode_err_code(unsigned int ec) |
| 343 | { | 343 | { |
| 344 | if (TLB_ERROR(ec)) { | 344 | if (TLB_ERROR(ec)) { |
| 345 | pr_emerg(" Transaction: %s, Cache Level %s\n", | 345 | pr_emerg("Transaction: %s, Cache Level %s\n", |
| 346 | TT_MSG(ec), LL_MSG(ec)); | 346 | TT_MSG(ec), LL_MSG(ec)); |
| 347 | } else if (MEM_ERROR(ec)) { | 347 | } else if (MEM_ERROR(ec)) { |
| 348 | pr_emerg(" Transaction: %s, Type: %s, Cache Level: %s", | 348 | pr_emerg("Transaction: %s, Type: %s, Cache Level: %s", |
| 349 | RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); | 349 | RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); |
| 350 | } else if (BUS_ERROR(ec)) { | 350 | } else if (BUS_ERROR(ec)) { |
| 351 | pr_emerg(" Transaction type: %s(%s), %s, Cache Level: %s, " | 351 | pr_emerg("Transaction type: %s(%s), %s, Cache Level: %s, " |
| 352 | "Participating Processor: %s\n", | 352 | "Participating Processor: %s\n", |
| 353 | RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), | 353 | RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), |
| 354 | PP_MSG(ec)); | 354 | PP_MSG(ec)); |
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index ee9753cf362c..f459a6c0886b 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c | |||
| @@ -589,14 +589,13 @@ static void i5100_refresh_scrubbing(struct work_struct *work) | |||
| 589 | /* | 589 | /* |
| 590 | * The bandwidth is based on experimentation, feel free to refine it. | 590 | * The bandwidth is based on experimentation, feel free to refine it. |
| 591 | */ | 591 | */ |
| 592 | static int i5100_set_scrub_rate(struct mem_ctl_info *mci, | 592 | static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) |
| 593 | u32 *bandwidth) | ||
| 594 | { | 593 | { |
| 595 | struct i5100_priv *priv = mci->pvt_info; | 594 | struct i5100_priv *priv = mci->pvt_info; |
| 596 | u32 dw; | 595 | u32 dw; |
| 597 | 596 | ||
| 598 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 597 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
| 599 | if (*bandwidth) { | 598 | if (bandwidth) { |
| 600 | priv->scrub_enable = 1; | 599 | priv->scrub_enable = 1; |
| 601 | dw |= I5100_MC_SCRBEN_MASK; | 600 | dw |= I5100_MC_SCRBEN_MASK; |
| 602 | schedule_delayed_work(&(priv->i5100_scrubbing), | 601 | schedule_delayed_work(&(priv->i5100_scrubbing), |
| @@ -610,7 +609,7 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, | |||
| 610 | 609 | ||
| 611 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 610 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
| 612 | 611 | ||
| 613 | *bandwidth = 5900000 * i5100_mc_scrben(dw); | 612 | bandwidth = 5900000 * i5100_mc_scrben(dw); |
| 614 | 613 | ||
| 615 | return 0; | 614 | return 0; |
| 616 | } | 615 | } |
