diff options
author | Borislav Petkov <borislav.petkov@amd.com> | 2010-10-07 12:29:15 -0400 |
---|---|---|
committer | Borislav Petkov <borislav.petkov@amd.com> | 2011-01-07 05:33:56 -0500 |
commit | 24f9a7fe3f19f3fd310f556364d01a22911724b3 (patch) | |
tree | 4fa8aff338bd58d42fc95b930f29a39b91bacedd /drivers/edac | |
parent | 8d5b5d9c7b86e44fda29a367db3ccd2815a52f7c (diff) |
amd64_edac: Rework printk macros
Add a macro per printk level, shorten up error messages. Add relevant
information to KERN_INFO level. No functional change.
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac')
-rw-r--r-- | drivers/edac/amd64_edac.c | 145 | ||||
-rw-r--r-- | drivers/edac/amd64_edac.h | 29 | ||||
-rw-r--r-- | drivers/edac/amd64_edac_inj.c | 13 | ||||
-rw-r--r-- | drivers/edac/edac_core.h | 3 | ||||
-rw-r--r-- | drivers/edac/edac_mc.c | 4 |
5 files changed, 87 insertions, 107 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 26fb0f962605..84c565d4f56b 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -62,7 +62,7 @@ static int ddr3_dbam[] = { [0] = -1, | |||
62 | [5 ... 6] = 1024, | 62 | [5 ... 6] = 1024, |
63 | [7 ... 8] = 2048, | 63 | [7 ... 8] = 2048, |
64 | [9 ... 10] = 4096, | 64 | [9 ... 10] = 4096, |
65 | [11] = 8192, | 65 | [11] = 8192, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | /* | 68 | /* |
@@ -148,11 +148,10 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) | |||
148 | 148 | ||
149 | scrubval = scrubrates[i].scrubval; | 149 | scrubval = scrubrates[i].scrubval; |
150 | if (scrubval) | 150 | if (scrubval) |
151 | edac_printk(KERN_DEBUG, EDAC_MC, | 151 | amd64_info("Setting scrub rate bandwidth: %u\n", |
152 | "Setting scrub rate bandwidth: %u\n", | 152 | scrubrates[i].bandwidth); |
153 | scrubrates[i].bandwidth); | ||
154 | else | 153 | else |
155 | edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); | 154 | amd64_info("Turning scrubbing off.\n"); |
156 | 155 | ||
157 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); | 156 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); |
158 | 157 | ||
@@ -176,8 +175,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |||
176 | 175 | ||
177 | scrubval = scrubval & 0x001F; | 176 | scrubval = scrubval & 0x001F; |
178 | 177 | ||
179 | edac_printk(KERN_DEBUG, EDAC_MC, | 178 | amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval); |
180 | "pci-read, sdram scrub control value: %d \n", scrubval); | ||
181 | 179 | ||
182 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { | 180 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
183 | if (scrubrates[i].scrubval == scrubval) { | 181 | if (scrubrates[i].scrubval == scrubval) { |
@@ -296,9 +294,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
296 | if (unlikely((intlv_en != 0x01) && | 294 | if (unlikely((intlv_en != 0x01) && |
297 | (intlv_en != 0x03) && | 295 | (intlv_en != 0x03) && |
298 | (intlv_en != 0x07))) { | 296 | (intlv_en != 0x07))) { |
299 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " | 297 | amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); |
300 | "IntlvEn field of DRAM Base Register for node 0: " | ||
301 | "this probably indicates a BIOS bug.\n", intlv_en); | ||
302 | return NULL; | 298 | return NULL; |
303 | } | 299 | } |
304 | 300 | ||
@@ -314,11 +310,9 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
314 | 310 | ||
315 | /* sanity test for sys_addr */ | 311 | /* sanity test for sys_addr */ |
316 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | 312 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { |
317 | amd64_printk(KERN_WARNING, | 313 | amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" |
318 | "%s(): sys_addr 0x%llx falls outside base/limit " | 314 | "range for node %d with node interleaving enabled.\n", |
319 | "address range for node %d with node interleaving " | 315 | __func__, sys_addr, node_id); |
320 | "enabled.\n", | ||
321 | __func__, sys_addr, node_id); | ||
322 | return NULL; | 316 | return NULL; |
323 | } | 317 | } |
324 | 318 | ||
@@ -770,9 +764,8 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |||
770 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); | 764 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); |
771 | 765 | ||
772 | if (csrow == -1) | 766 | if (csrow == -1) |
773 | amd64_mc_printk(mci, KERN_ERR, | 767 | amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " |
774 | "Failed to translate InputAddr to csrow for " | 768 | "address 0x%lx\n", (unsigned long)sys_addr); |
775 | "address 0x%lx\n", (unsigned long)sys_addr); | ||
776 | return csrow; | 769 | return csrow; |
777 | } | 770 | } |
778 | 771 | ||
@@ -860,8 +853,7 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |||
860 | return; | 853 | return; |
861 | } | 854 | } |
862 | 855 | ||
863 | amd64_printk(KERN_INFO, "using %s syndromes.\n", | 856 | amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); |
864 | ((pvt->syn_type == 8) ? "x8" : "x4")); | ||
865 | 857 | ||
866 | /* Only if NOT ganged does dclr1 have valid info */ | 858 | /* Only if NOT ganged does dclr1 have valid info */ |
867 | if (!dct_ganging_enabled(pvt)) | 859 | if (!dct_ganging_enabled(pvt)) |
@@ -983,7 +975,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |||
983 | } | 975 | } |
984 | } | 976 | } |
985 | 977 | ||
986 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | 978 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) |
987 | { | 979 | { |
988 | enum mem_type type; | 980 | enum mem_type type; |
989 | 981 | ||
@@ -996,7 +988,7 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | |||
996 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; | 988 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; |
997 | } | 989 | } |
998 | 990 | ||
999 | debugf1(" Memory type is: %s\n", edac_mem_types[type]); | 991 | amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); |
1000 | 992 | ||
1001 | return type; | 993 | return type; |
1002 | } | 994 | } |
@@ -1087,9 +1079,8 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1087 | * 2 DIMMs is in error. So we need to ID 'both' of them | 1079 | * 2 DIMMs is in error. So we need to ID 'both' of them |
1088 | * as suspect. | 1080 | * as suspect. |
1089 | */ | 1081 | */ |
1090 | amd64_mc_printk(mci, KERN_WARNING, | 1082 | amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " |
1091 | "unknown syndrome 0x%04x - possible " | 1083 | "error reporting race\n", syndrome); |
1092 | "error reporting race\n", syndrome); | ||
1093 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1084 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1094 | return; | 1085 | return; |
1095 | } | 1086 | } |
@@ -1111,8 +1102,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1111 | */ | 1102 | */ |
1112 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | 1103 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
1113 | if (!src_mci) { | 1104 | if (!src_mci) { |
1114 | amd64_mc_printk(mci, KERN_ERR, | 1105 | amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", |
1115 | "failed to map error address 0x%lx to a node\n", | ||
1116 | (unsigned long)sys_addr); | 1106 | (unsigned long)sys_addr); |
1117 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1107 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1118 | return; | 1108 | return; |
@@ -1194,7 +1184,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1194 | if (channels > 2) | 1184 | if (channels > 2) |
1195 | channels = 2; | 1185 | channels = 2; |
1196 | 1186 | ||
1197 | debugf0("MCT channel count: %d\n", channels); | 1187 | amd64_info("MCT channel count: %d\n", channels); |
1198 | 1188 | ||
1199 | return channels; | 1189 | return channels; |
1200 | 1190 | ||
@@ -1698,9 +1688,9 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1698 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | 1688 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) |
1699 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); | 1689 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
1700 | 1690 | ||
1701 | edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", | 1691 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
1702 | dimm * 2, size0 << factor, | 1692 | dimm * 2, size0 << factor, |
1703 | dimm * 2 + 1, size1 << factor); | 1693 | dimm * 2 + 1, size1 << factor); |
1704 | } | 1694 | } |
1705 | } | 1695 | } |
1706 | 1696 | ||
@@ -1906,8 +1896,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |||
1906 | ARRAY_SIZE(x4_vectors), | 1896 | ARRAY_SIZE(x4_vectors), |
1907 | pvt->syn_type); | 1897 | pvt->syn_type); |
1908 | else { | 1898 | else { |
1909 | amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n", | 1899 | amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type); |
1910 | __func__, pvt->syn_type); | ||
1911 | return err_sym; | 1900 | return err_sym; |
1912 | } | 1901 | } |
1913 | 1902 | ||
@@ -1925,17 +1914,15 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, | |||
1925 | u64 sys_addr; | 1914 | u64 sys_addr; |
1926 | 1915 | ||
1927 | /* Ensure that the Error Address is VALID */ | 1916 | /* Ensure that the Error Address is VALID */ |
1928 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | 1917 | if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { |
1929 | amd64_mc_printk(mci, KERN_ERR, | 1918 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
1930 | "HW has no ERROR_ADDRESS available\n"); | ||
1931 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1919 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1932 | return; | 1920 | return; |
1933 | } | 1921 | } |
1934 | 1922 | ||
1935 | sys_addr = pvt->ops->get_error_address(mci, info); | 1923 | sys_addr = pvt->ops->get_error_address(mci, info); |
1936 | 1924 | ||
1937 | amd64_mc_printk(mci, KERN_ERR, | 1925 | amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
1938 | "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); | ||
1939 | 1926 | ||
1940 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); | 1927 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); |
1941 | } | 1928 | } |
@@ -1952,9 +1939,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
1952 | 1939 | ||
1953 | log_mci = mci; | 1940 | log_mci = mci; |
1954 | 1941 | ||
1955 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | 1942 | if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { |
1956 | amd64_mc_printk(mci, KERN_CRIT, | 1943 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); |
1957 | "HW has no ERROR_ADDRESS available\n"); | ||
1958 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1944 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
1959 | return; | 1945 | return; |
1960 | } | 1946 | } |
@@ -1967,9 +1953,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
1967 | */ | 1953 | */ |
1968 | src_mci = find_mc_by_sys_addr(mci, sys_addr); | 1954 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
1969 | if (!src_mci) { | 1955 | if (!src_mci) { |
1970 | amd64_mc_printk(mci, KERN_CRIT, | 1956 | amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", |
1971 | "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", | 1957 | (unsigned long)sys_addr); |
1972 | (unsigned long)sys_addr); | ||
1973 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1958 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
1974 | return; | 1959 | return; |
1975 | } | 1960 | } |
@@ -1978,9 +1963,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
1978 | 1963 | ||
1979 | csrow = sys_addr_to_csrow(log_mci, sys_addr); | 1964 | csrow = sys_addr_to_csrow(log_mci, sys_addr); |
1980 | if (csrow < 0) { | 1965 | if (csrow < 0) { |
1981 | amd64_mc_printk(mci, KERN_CRIT, | 1966 | amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", |
1982 | "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", | 1967 | (unsigned long)sys_addr); |
1983 | (unsigned long)sys_addr); | ||
1984 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 1968 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
1985 | } else { | 1969 | } else { |
1986 | error_address_to_page_and_offset(sys_addr, &page, &offset); | 1970 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
@@ -2044,9 +2028,9 @@ static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id, | |||
2044 | /* Reserve the ADDRESS MAP Device */ | 2028 | /* Reserve the ADDRESS MAP Device */ |
2045 | pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); | 2029 | pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); |
2046 | if (!pvt->F1) { | 2030 | if (!pvt->F1) { |
2047 | amd64_printk(KERN_ERR, "error address map device not found: " | 2031 | amd64_err("error address map device not found: " |
2048 | "vendor %x device 0x%x (broken BIOS?)\n", | 2032 | "vendor %x device 0x%x (broken BIOS?)\n", |
2049 | PCI_VENDOR_ID_AMD, f1_id); | 2033 | PCI_VENDOR_ID_AMD, f1_id); |
2050 | return -ENODEV; | 2034 | return -ENODEV; |
2051 | } | 2035 | } |
2052 | 2036 | ||
@@ -2056,9 +2040,9 @@ static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id, | |||
2056 | pci_dev_put(pvt->F1); | 2040 | pci_dev_put(pvt->F1); |
2057 | pvt->F1 = NULL; | 2041 | pvt->F1 = NULL; |
2058 | 2042 | ||
2059 | amd64_printk(KERN_ERR, "error F3 device not found: " | 2043 | amd64_err("error F3 device not found: " |
2060 | "vendor %x device 0x%x (broken BIOS?)\n", | 2044 | "vendor %x device 0x%x (broken BIOS?)\n", |
2061 | PCI_VENDOR_ID_AMD, f3_id); | 2045 | PCI_VENDOR_ID_AMD, f3_id); |
2062 | 2046 | ||
2063 | return -ENODEV; | 2047 | return -ENODEV; |
2064 | } | 2048 | } |
@@ -2268,7 +2252,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2268 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); | 2252 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); |
2269 | /* 8 bytes of resolution */ | 2253 | /* 8 bytes of resolution */ |
2270 | 2254 | ||
2271 | csrow->mtype = amd64_determine_memory_type(pvt); | 2255 | csrow->mtype = amd64_determine_memory_type(pvt, i); |
2272 | 2256 | ||
2273 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); | 2257 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); |
2274 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", | 2258 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", |
@@ -2313,8 +2297,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) | |||
2313 | bool ret = false; | 2297 | bool ret = false; |
2314 | 2298 | ||
2315 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | 2299 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { |
2316 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | 2300 | amd64_warn("%s: Error allocating mask\n", __func__); |
2317 | __func__); | ||
2318 | return false; | 2301 | return false; |
2319 | } | 2302 | } |
2320 | 2303 | ||
@@ -2346,8 +2329,7 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |||
2346 | int cpu; | 2329 | int cpu; |
2347 | 2330 | ||
2348 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | 2331 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { |
2349 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | 2332 | amd64_warn("%s: error allocating mask\n", __func__); |
2350 | __func__); | ||
2351 | return false; | 2333 | return false; |
2352 | } | 2334 | } |
2353 | 2335 | ||
@@ -2394,8 +2376,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | |||
2394 | pci_write_config_dword(pvt->F3, K8_NBCTL, value); | 2376 | pci_write_config_dword(pvt->F3, K8_NBCTL, value); |
2395 | 2377 | ||
2396 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) | 2378 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) |
2397 | amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " | 2379 | amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); |
2398 | "MCGCTL!\n"); | ||
2399 | 2380 | ||
2400 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); | 2381 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); |
2401 | 2382 | ||
@@ -2404,9 +2385,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | |||
2404 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | 2385 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); |
2405 | 2386 | ||
2406 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2387 | if (!(value & K8_NBCFG_ECC_ENABLE)) { |
2407 | amd64_printk(KERN_WARNING, | 2388 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); |
2408 | "This node reports that DRAM ECC is " | ||
2409 | "currently Disabled; ENABLING now\n"); | ||
2410 | 2389 | ||
2411 | pvt->flags.nb_ecc_prev = 0; | 2390 | pvt->flags.nb_ecc_prev = 0; |
2412 | 2391 | ||
@@ -2417,12 +2396,10 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | |||
2417 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); | 2396 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); |
2418 | 2397 | ||
2419 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2398 | if (!(value & K8_NBCFG_ECC_ENABLE)) { |
2420 | amd64_printk(KERN_WARNING, | 2399 | amd64_warn("Hardware rejected DRAM ECC enable," |
2421 | "Hardware rejects Enabling DRAM ECC checking\n" | 2400 | "check memory DIMM configuration.\n"); |
2422 | "Check memory DIMM configuration\n"); | ||
2423 | } else { | 2401 | } else { |
2424 | amd64_printk(KERN_DEBUG, | 2402 | amd64_info("Hardware accepted DRAM ECC Enable\n"); |
2425 | "Hardware accepted DRAM ECC Enable\n"); | ||
2426 | } | 2403 | } |
2427 | } else { | 2404 | } else { |
2428 | pvt->flags.nb_ecc_prev = 1; | 2405 | pvt->flags.nb_ecc_prev = 1; |
@@ -2457,7 +2434,7 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | |||
2457 | 2434 | ||
2458 | /* restore the NB Enable MCGCTL bit */ | 2435 | /* restore the NB Enable MCGCTL bit */ |
2459 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) | 2436 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) |
2460 | amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n"); | 2437 | amd64_warn("Error restoring NB MCGCTL settings!\n"); |
2461 | } | 2438 | } |
2462 | 2439 | ||
2463 | /* | 2440 | /* |
@@ -2481,25 +2458,20 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | |||
2481 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); | 2458 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); |
2482 | 2459 | ||
2483 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | 2460 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); |
2484 | if (!ecc_enabled) | 2461 | amd64_info("DRAM ECC %s.\n", (ecc_enabled ? "enabled" : "disabled")); |
2485 | amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " | ||
2486 | "is currently disabled, set F3x%x[22] (%s).\n", | ||
2487 | K8_NBCFG, pci_name(pvt->F3)); | ||
2488 | else | ||
2489 | amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n"); | ||
2490 | 2462 | ||
2491 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); | 2463 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); |
2492 | if (!nb_mce_en) | 2464 | if (!nb_mce_en) |
2493 | amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " | 2465 | amd64_notice("NB MCE bank disabled, " |
2494 | "0x%08x[4] on node %d to enable.\n", | 2466 | "set MSR 0x%08x[4] on node %d to enable.\n", |
2495 | MSR_IA32_MCG_CTL, pvt->mc_node_id); | 2467 | MSR_IA32_MCG_CTL, pvt->mc_node_id); |
2496 | 2468 | ||
2497 | if (!ecc_enabled || !nb_mce_en) { | 2469 | if (!ecc_enabled || !nb_mce_en) { |
2498 | if (!ecc_enable_override) { | 2470 | if (!ecc_enable_override) { |
2499 | amd64_printk(KERN_NOTICE, "%s", ecc_msg); | 2471 | amd64_notice("%s", ecc_msg); |
2500 | return -ENODEV; | 2472 | return -ENODEV; |
2501 | } else { | 2473 | } else { |
2502 | amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n"); | 2474 | amd64_warn("Forcing ECC on!\n"); |
2503 | } | 2475 | } |
2504 | } | 2476 | } |
2505 | 2477 | ||
@@ -2575,17 +2547,17 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) | |||
2575 | break; | 2547 | break; |
2576 | 2548 | ||
2577 | default: | 2549 | default: |
2578 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | 2550 | amd64_err("Unsupported family!\n"); |
2579 | return NULL; | 2551 | return NULL; |
2580 | } | 2552 | } |
2581 | 2553 | ||
2582 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | 2554 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
2583 | 2555 | ||
2584 | amd64_printk(KERN_INFO, "%s %s detected.\n", pvt->ctl_name, | 2556 | amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name, |
2585 | (fam == 0xf ? | 2557 | (fam == 0xf ? |
2586 | (pvt->ext_model >= K8_REV_F ? "revF or later" | 2558 | (pvt->ext_model >= K8_REV_F ? "revF or later " |
2587 | : "revE or earlier") | 2559 | : "revE or earlier ") |
2588 | : "")); | 2560 | : ""), pvt->mc_node_id); |
2589 | return fam_type; | 2561 | return fam_type; |
2590 | } | 2562 | } |
2591 | 2563 | ||
@@ -2736,8 +2708,6 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | |||
2736 | { | 2708 | { |
2737 | int ret = 0; | 2709 | int ret = 0; |
2738 | 2710 | ||
2739 | debugf0("(MC node=%d)\n", get_node_id(pdev)); | ||
2740 | |||
2741 | ret = pci_enable_device(pdev); | 2711 | ret = pci_enable_device(pdev); |
2742 | if (ret < 0) { | 2712 | if (ret < 0) { |
2743 | debugf0("ret=%d\n", ret); | 2713 | debugf0("ret=%d\n", ret); |
@@ -2746,8 +2716,7 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | |||
2746 | 2716 | ||
2747 | ret = amd64_probe_one_instance(pdev); | 2717 | ret = amd64_probe_one_instance(pdev); |
2748 | if (ret < 0) | 2718 | if (ret < 0) |
2749 | amd64_printk(KERN_ERR, "Error probing instance: %d\n", | 2719 | amd64_err("Error probing instance: %d\n", get_node_id(pdev)); |
2750 | get_node_id(pdev)); | ||
2751 | 2720 | ||
2752 | return ret; | 2721 | return ret; |
2753 | } | 2722 | } |
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 76760a8043ca..f15e2b257e72 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -74,11 +74,26 @@ | |||
74 | #include "edac_core.h" | 74 | #include "edac_core.h" |
75 | #include "mce_amd.h" | 75 | #include "mce_amd.h" |
76 | 76 | ||
77 | #define amd64_printk(level, fmt, arg...) \ | 77 | #define amd64_debug(fmt, arg...) \ |
78 | edac_printk(level, "amd64", fmt, ##arg) | 78 | edac_printk(KERN_DEBUG, "amd64", fmt, ##arg) |
79 | 79 | ||
80 | #define amd64_mc_printk(mci, level, fmt, arg...) \ | 80 | #define amd64_info(fmt, arg...) \ |
81 | edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg) | 81 | edac_printk(KERN_INFO, "amd64", fmt, ##arg) |
82 | |||
83 | #define amd64_notice(fmt, arg...) \ | ||
84 | edac_printk(KERN_NOTICE, "amd64", fmt, ##arg) | ||
85 | |||
86 | #define amd64_warn(fmt, arg...) \ | ||
87 | edac_printk(KERN_WARNING, "amd64", fmt, ##arg) | ||
88 | |||
89 | #define amd64_err(fmt, arg...) \ | ||
90 | edac_printk(KERN_ERR, "amd64", fmt, ##arg) | ||
91 | |||
92 | #define amd64_mc_warn(mci, fmt, arg...) \ | ||
93 | edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg) | ||
94 | |||
95 | #define amd64_mc_err(mci, fmt, arg...) \ | ||
96 | edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg) | ||
82 | 97 | ||
83 | /* | 98 | /* |
84 | * Throughout the comments in this code, the following terms are used: | 99 | * Throughout the comments in this code, the following terms are used: |
@@ -129,7 +144,7 @@ | |||
129 | * sections 3.5.4 and 3.5.5 for more information. | 144 | * sections 3.5.4 and 3.5.5 for more information. |
130 | */ | 145 | */ |
131 | 146 | ||
132 | #define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__ | 147 | #define EDAC_AMD64_VERSION "v3.3.0" |
133 | #define EDAC_MOD_STR "amd64_edac" | 148 | #define EDAC_MOD_STR "amd64_edac" |
134 | 149 | ||
135 | #define EDAC_MAX_NUMNODES 8 | 150 | #define EDAC_MAX_NUMNODES 8 |
@@ -527,8 +542,8 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | |||
527 | 542 | ||
528 | err = pci_read_config_dword(pdev, offset, val); | 543 | err = pci_read_config_dword(pdev, offset, val); |
529 | if (err) | 544 | if (err) |
530 | amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n", | 545 | amd64_warn("%s: error reading F%dx%x.\n", |
531 | func, PCI_FUNC(pdev->devfn), offset); | 546 | func, PCI_FUNC(pdev->devfn), offset); |
532 | 547 | ||
533 | return err; | 548 | return err; |
534 | } | 549 | } |
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index 523ce4a6edc6..688478de1cbd 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c | |||
@@ -23,9 +23,7 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, | |||
23 | if (ret != -EINVAL) { | 23 | if (ret != -EINVAL) { |
24 | 24 | ||
25 | if (value > 3) { | 25 | if (value > 3) { |
26 | amd64_printk(KERN_WARNING, | 26 | amd64_warn("%s: invalid section 0x%lx\n", __func__, value); |
27 | "%s: invalid section 0x%lx\n", | ||
28 | __func__, value); | ||
29 | return -EINVAL; | 27 | return -EINVAL; |
30 | } | 28 | } |
31 | 29 | ||
@@ -58,9 +56,7 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, | |||
58 | if (ret != -EINVAL) { | 56 | if (ret != -EINVAL) { |
59 | 57 | ||
60 | if (value > 8) { | 58 | if (value > 8) { |
61 | amd64_printk(KERN_WARNING, | 59 | amd64_warn("%s: invalid word 0x%lx\n", __func__, value); |
62 | "%s: invalid word 0x%lx\n", | ||
63 | __func__, value); | ||
64 | return -EINVAL; | 60 | return -EINVAL; |
65 | } | 61 | } |
66 | 62 | ||
@@ -92,9 +88,8 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, | |||
92 | if (ret != -EINVAL) { | 88 | if (ret != -EINVAL) { |
93 | 89 | ||
94 | if (value & 0xFFFF0000) { | 90 | if (value & 0xFFFF0000) { |
95 | amd64_printk(KERN_WARNING, | 91 | amd64_warn("%s: invalid EccVector: 0x%lx\n", |
96 | "%s: invalid EccVector: 0x%lx\n", | 92 | __func__, value); |
97 | __func__, value); | ||
98 | return -EINVAL; | 93 | return -EINVAL; |
99 | } | 94 | } |
100 | 95 | ||
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 251440cd50a3..e8d3dd0b5e9c 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -68,9 +68,10 @@ | |||
68 | #define EDAC_PCI "PCI" | 68 | #define EDAC_PCI "PCI" |
69 | #define EDAC_DEBUG "DEBUG" | 69 | #define EDAC_DEBUG "DEBUG" |
70 | 70 | ||
71 | extern const char *edac_mem_types[]; | ||
72 | |||
71 | #ifdef CONFIG_EDAC_DEBUG | 73 | #ifdef CONFIG_EDAC_DEBUG |
72 | extern int edac_debug_level; | 74 | extern int edac_debug_level; |
73 | extern const char *edac_mem_types[]; | ||
74 | 75 | ||
75 | #define edac_debug_printk(level, fmt, arg...) \ | 76 | #define edac_debug_printk(level, fmt, arg...) \ |
76 | do { \ | 77 | do { \ |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 795ea69c4d8f..a4e9db2d6524 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -76,6 +76,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci) | |||
76 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); | 76 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); |
77 | } | 77 | } |
78 | 78 | ||
79 | #endif /* CONFIG_EDAC_DEBUG */ | ||
80 | |||
79 | /* | 81 | /* |
80 | * keep those in sync with the enum mem_type | 82 | * keep those in sync with the enum mem_type |
81 | */ | 83 | */ |
@@ -100,8 +102,6 @@ const char *edac_mem_types[] = { | |||
100 | }; | 102 | }; |
101 | EXPORT_SYMBOL_GPL(edac_mem_types); | 103 | EXPORT_SYMBOL_GPL(edac_mem_types); |
102 | 104 | ||
103 | #endif /* CONFIG_EDAC_DEBUG */ | ||
104 | |||
105 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. | 105 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. |
106 | * Adjust 'ptr' so that its alignment is at least as stringent as what the | 106 | * Adjust 'ptr' so that its alignment is at least as stringent as what the |
107 | * compiler would provide for X and return the aligned result. | 107 | * compiler would provide for X and return the aligned result. |