diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-11 00:55:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-11 00:55:54 -0500 |
commit | 4e5df8069b0e4e36c6b528b3be7da298e6f454cd (patch) | |
tree | 19714ceff2e1e25806141e93e603063cd3afb1ee | |
parent | aa2cf420593b67cc93de7a3f675b2a88eba0505f (diff) | |
parent | df5b1606bd077401831759171c355dc38cfaa59a (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: (21 commits)
amd64_edac: bump driver version
amd64_edac: fix use-uninitialised bug
amd64_edac: correct sys address to chip select mapping
amd64_edac: add a leaner syndrome decoding algorithm
amd64_edac: remove early hw support check
amd64_edac: detect DDR3 memory type
edac: add memory types strings for debugging
edac, mce: update AMD F10h revD check
amd64_edac: remove unneeded extract_error_address wrapper
amd64_edac: rename StinkyIdentifier
amd64_edac: remove superfluous dbg printk
amd64_edac: enhance address to DRAM bank mapping
amd64_edac: cleanup f10_early_channel_count
amd64_edac: dump DIMM sizes on K8 too
amd64_edac: cleanup rest of amd64_dump_misc_regs
amd64_edac: cleanup DRAM cfg low debug output
amd64_edac: wrap-up pci config read error handling
amd64_edac: unify MCGCTL ECC switching
cpumask: use modern cpumask style in drivers/edac/amd64_edac.c
amd64_edac: make DRAM regions output more human-readable
...
-rw-r--r-- | drivers/edac/amd64_edac.c | 1251 | ||||
-rw-r--r-- | drivers/edac/amd64_edac.h | 62 | ||||
-rw-r--r-- | drivers/edac/edac_core.h | 1 | ||||
-rw-r--r-- | drivers/edac/edac_mc.c | 24 | ||||
-rw-r--r-- | drivers/edac/edac_mce_amd.c | 2 |
5 files changed, 621 insertions, 719 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index a38831c82649..5fdd6daa40ea 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -19,26 +19,48 @@ static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; | |||
19 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; | 19 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only | 22 | * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and |
23 | * for DDR2 DRAM mapping. | 23 | * later. |
24 | */ | 24 | */ |
25 | u32 revf_quad_ddr2_shift[] = { | 25 | static int ddr2_dbam_revCG[] = { |
26 | 0, /* 0000b NULL DIMM (128mb) */ | 26 | [0] = 32, |
27 | 28, /* 0001b 256mb */ | 27 | [1] = 64, |
28 | 29, /* 0010b 512mb */ | 28 | [2] = 128, |
29 | 29, /* 0011b 512mb */ | 29 | [3] = 256, |
30 | 29, /* 0100b 512mb */ | 30 | [4] = 512, |
31 | 30, /* 0101b 1gb */ | 31 | [5] = 1024, |
32 | 30, /* 0110b 1gb */ | 32 | [6] = 2048, |
33 | 31, /* 0111b 2gb */ | 33 | }; |
34 | 31, /* 1000b 2gb */ | 34 | |
35 | 32, /* 1001b 4gb */ | 35 | static int ddr2_dbam_revD[] = { |
36 | 32, /* 1010b 4gb */ | 36 | [0] = 32, |
37 | 33, /* 1011b 8gb */ | 37 | [1] = 64, |
38 | 0, /* 1100b future */ | 38 | [2 ... 3] = 128, |
39 | 0, /* 1101b future */ | 39 | [4] = 256, |
40 | 0, /* 1110b future */ | 40 | [5] = 512, |
41 | 0 /* 1111b future */ | 41 | [6] = 256, |
42 | [7] = 512, | ||
43 | [8 ... 9] = 1024, | ||
44 | [10] = 2048, | ||
45 | }; | ||
46 | |||
47 | static int ddr2_dbam[] = { [0] = 128, | ||
48 | [1] = 256, | ||
49 | [2 ... 4] = 512, | ||
50 | [5 ... 6] = 1024, | ||
51 | [7 ... 8] = 2048, | ||
52 | [9 ... 10] = 4096, | ||
53 | [11] = 8192, | ||
54 | }; | ||
55 | |||
56 | static int ddr3_dbam[] = { [0] = -1, | ||
57 | [1] = 256, | ||
58 | [2] = 512, | ||
59 | [3 ... 4] = -1, | ||
60 | [5 ... 6] = 1024, | ||
61 | [7 ... 8] = 2048, | ||
62 | [9 ... 10] = 4096, | ||
63 | [11] = 8192, | ||
42 | }; | 64 | }; |
43 | 65 | ||
44 | /* | 66 | /* |
@@ -164,11 +186,9 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |||
164 | { | 186 | { |
165 | struct amd64_pvt *pvt = mci->pvt_info; | 187 | struct amd64_pvt *pvt = mci->pvt_info; |
166 | u32 scrubval = 0; | 188 | u32 scrubval = 0; |
167 | int status = -1, i, ret = 0; | 189 | int status = -1, i; |
168 | 190 | ||
169 | ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); | 191 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); |
170 | if (ret) | ||
171 | debugf0("Reading K8_SCRCTRL failed\n"); | ||
172 | 192 | ||
173 | scrubval = scrubval & 0x001F; | 193 | scrubval = scrubval & 0x001F; |
174 | 194 | ||
@@ -189,7 +209,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |||
189 | /* Map from a CSROW entry to the mask entry that operates on it */ | 209 | /* Map from a CSROW entry to the mask entry that operates on it */ |
190 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | 210 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) |
191 | { | 211 | { |
192 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) | 212 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) |
193 | return csrow; | 213 | return csrow; |
194 | else | 214 | else |
195 | return csrow >> 1; | 215 | return csrow >> 1; |
@@ -437,7 +457,7 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |||
437 | u64 base; | 457 | u64 base; |
438 | 458 | ||
439 | /* only revE and later have the DRAM Hole Address Register */ | 459 | /* only revE and later have the DRAM Hole Address Register */ |
440 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) { | 460 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { |
441 | debugf1(" revision %d for node %d does not support DHAR\n", | 461 | debugf1(" revision %d for node %d does not support DHAR\n", |
442 | pvt->ext_model, pvt->mc_node_id); | 462 | pvt->ext_model, pvt->mc_node_id); |
443 | return 1; | 463 | return 1; |
@@ -743,21 +763,6 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |||
743 | *input_addr_max = base | mask | pvt->dcs_mask_notused; | 763 | *input_addr_max = base | mask | pvt->dcs_mask_notused; |
744 | } | 764 | } |
745 | 765 | ||
746 | /* | ||
747 | * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB | ||
748 | * Address High (section 3.6.4.6) register values and return the result. Address | ||
749 | * is located in the info structure (nbeah and nbeal), the encoding is device | ||
750 | * specific. | ||
751 | */ | ||
752 | static u64 extract_error_address(struct mem_ctl_info *mci, | ||
753 | struct err_regs *info) | ||
754 | { | ||
755 | struct amd64_pvt *pvt = mci->pvt_info; | ||
756 | |||
757 | return pvt->ops->get_error_address(mci, info); | ||
758 | } | ||
759 | |||
760 | |||
761 | /* Map the Error address to a PAGE and PAGE OFFSET. */ | 766 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
762 | static inline void error_address_to_page_and_offset(u64 error_address, | 767 | static inline void error_address_to_page_and_offset(u64 error_address, |
763 | u32 *page, u32 *offset) | 768 | u32 *page, u32 *offset) |
@@ -787,7 +792,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |||
787 | return csrow; | 792 | return csrow; |
788 | } | 793 | } |
789 | 794 | ||
790 | static int get_channel_from_ecc_syndrome(unsigned short syndrome); | 795 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
791 | 796 | ||
792 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) | 797 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) |
793 | { | 798 | { |
@@ -797,7 +802,7 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt) | |||
797 | edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); | 802 | edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); |
798 | else if (boot_cpu_data.x86 == 0xf) | 803 | else if (boot_cpu_data.x86 == 0xf) |
799 | edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", | 804 | edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", |
800 | (pvt->ext_model >= OPTERON_CPU_REV_F) ? | 805 | (pvt->ext_model >= K8_REV_F) ? |
801 | "Rev F or later" : "Rev E or earlier"); | 806 | "Rev F or later" : "Rev E or earlier"); |
802 | else | 807 | else |
803 | /* we'll hardly ever ever get here */ | 808 | /* we'll hardly ever ever get here */ |
@@ -813,7 +818,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |||
813 | int bit; | 818 | int bit; |
814 | enum dev_type edac_cap = EDAC_FLAG_NONE; | 819 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
815 | 820 | ||
816 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F) | 821 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
817 | ? 19 | 822 | ? 19 |
818 | : 17; | 823 | : 17; |
819 | 824 | ||
@@ -824,111 +829,86 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |||
824 | } | 829 | } |
825 | 830 | ||
826 | 831 | ||
827 | static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt, | 832 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); |
828 | int ganged); | 833 | |
834 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) | ||
835 | { | ||
836 | debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); | ||
837 | |||
838 | debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", | ||
839 | (dclr & BIT(16)) ? "un" : "", | ||
840 | (dclr & BIT(19)) ? "yes" : "no"); | ||
841 | |||
842 | debugf1(" PAR/ERR parity: %s\n", | ||
843 | (dclr & BIT(8)) ? "enabled" : "disabled"); | ||
844 | |||
845 | debugf1(" DCT 128bit mode width: %s\n", | ||
846 | (dclr & BIT(11)) ? "128b" : "64b"); | ||
847 | |||
848 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | ||
849 | (dclr & BIT(12)) ? "yes" : "no", | ||
850 | (dclr & BIT(13)) ? "yes" : "no", | ||
851 | (dclr & BIT(14)) ? "yes" : "no", | ||
852 | (dclr & BIT(15)) ? "yes" : "no"); | ||
853 | } | ||
829 | 854 | ||
830 | /* Display and decode various NB registers for debug purposes. */ | 855 | /* Display and decode various NB registers for debug purposes. */ |
831 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | 856 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) |
832 | { | 857 | { |
833 | int ganged; | 858 | int ganged; |
834 | 859 | ||
835 | debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n", | 860 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
836 | pvt->nbcap, | ||
837 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False", | ||
838 | (pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False", | ||
839 | (pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False"); | ||
840 | debugf1(" ECC Capable=%s ChipKill Capable=%s\n", | ||
841 | (pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False", | ||
842 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False"); | ||
843 | debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n", | ||
844 | pvt->dclr0, | ||
845 | (pvt->dclr0 & BIT(19)) ? "Enabled" : "Disabled", | ||
846 | (pvt->dclr0 & BIT(8)) ? "Enabled" : "Disabled", | ||
847 | (pvt->dclr0 & BIT(11)) ? "128b" : "64b"); | ||
848 | debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n", | ||
849 | (pvt->dclr0 & BIT(12)) ? "Y" : "N", | ||
850 | (pvt->dclr0 & BIT(13)) ? "Y" : "N", | ||
851 | (pvt->dclr0 & BIT(14)) ? "Y" : "N", | ||
852 | (pvt->dclr0 & BIT(15)) ? "Y" : "N", | ||
853 | (pvt->dclr0 & BIT(16)) ? "UN-Buffered" : "Buffered"); | ||
854 | |||
855 | |||
856 | debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare); | ||
857 | 861 | ||
858 | if (boot_cpu_data.x86 == 0xf) { | 862 | debugf1(" NB two channel DRAM capable: %s\n", |
859 | debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n", | 863 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); |
860 | pvt->dhar, dhar_base(pvt->dhar), | ||
861 | k8_dhar_offset(pvt->dhar)); | ||
862 | debugf1(" DramHoleValid=%s\n", | ||
863 | (pvt->dhar & DHAR_VALID) ? "True" : "False"); | ||
864 | 864 | ||
865 | debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0); | 865 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
866 | (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", | ||
867 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); | ||
866 | 868 | ||
867 | /* everything below this point is Fam10h and above */ | 869 | amd64_dump_dramcfg_low(pvt->dclr0, 0); |
868 | return; | ||
869 | 870 | ||
870 | } else { | 871 | debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); |
871 | debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n", | ||
872 | pvt->dhar, dhar_base(pvt->dhar), | ||
873 | f10_dhar_offset(pvt->dhar)); | ||
874 | debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n", | ||
875 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ? | ||
876 | "True" : "False", | ||
877 | (pvt->dhar & DHAR_VALID) ? | ||
878 | "True" : "False"); | ||
879 | } | ||
880 | 872 | ||
881 | /* Only if NOT ganged does dcl1 have valid info */ | 873 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
882 | if (!dct_ganging_enabled(pvt)) { | 874 | "offset: 0x%08x\n", |
883 | debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s " | 875 | pvt->dhar, |
884 | "Width=%s\n", pvt->dclr1, | 876 | dhar_base(pvt->dhar), |
885 | (pvt->dclr1 & BIT(19)) ? "Enabled" : "Disabled", | 877 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) |
886 | (pvt->dclr1 & BIT(8)) ? "Enabled" : "Disabled", | 878 | : f10_dhar_offset(pvt->dhar)); |
887 | (pvt->dclr1 & BIT(11)) ? "128b" : "64b"); | 879 | |
888 | debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s " | 880 | debugf1(" DramHoleValid: %s\n", |
889 | "DIMM Type=%s\n", | 881 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); |
890 | (pvt->dclr1 & BIT(12)) ? "Y" : "N", | 882 | |
891 | (pvt->dclr1 & BIT(13)) ? "Y" : "N", | 883 | /* everything below this point is Fam10h and above */ |
892 | (pvt->dclr1 & BIT(14)) ? "Y" : "N", | 884 | if (boot_cpu_data.x86 == 0xf) { |
893 | (pvt->dclr1 & BIT(15)) ? "Y" : "N", | 885 | amd64_debug_display_dimm_sizes(0, pvt); |
894 | (pvt->dclr1 & BIT(16)) ? "UN-Buffered" : "Buffered"); | 886 | return; |
895 | } | 887 | } |
896 | 888 | ||
889 | /* Only if NOT ganged does dclr1 have valid info */ | ||
890 | if (!dct_ganging_enabled(pvt)) | ||
891 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | ||
892 | |||
897 | /* | 893 | /* |
898 | * Determine if ganged and then dump memory sizes for first controller, | 894 | * Determine if ganged and then dump memory sizes for first controller, |
899 | * and if NOT ganged dump info for 2nd controller. | 895 | * and if NOT ganged dump info for 2nd controller. |
900 | */ | 896 | */ |
901 | ganged = dct_ganging_enabled(pvt); | 897 | ganged = dct_ganging_enabled(pvt); |
902 | 898 | ||
903 | f10_debug_display_dimm_sizes(0, pvt, ganged); | 899 | amd64_debug_display_dimm_sizes(0, pvt); |
904 | 900 | ||
905 | if (!ganged) | 901 | if (!ganged) |
906 | f10_debug_display_dimm_sizes(1, pvt, ganged); | 902 | amd64_debug_display_dimm_sizes(1, pvt); |
907 | } | 903 | } |
908 | 904 | ||
909 | /* Read in both of DBAM registers */ | 905 | /* Read in both of DBAM registers */ |
910 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | 906 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) |
911 | { | 907 | { |
912 | int err = 0; | 908 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); |
913 | unsigned int reg; | ||
914 | |||
915 | reg = DBAM0; | ||
916 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0); | ||
917 | if (err) | ||
918 | goto err_reg; | ||
919 | 909 | ||
920 | if (boot_cpu_data.x86 >= 0x10) { | 910 | if (boot_cpu_data.x86 >= 0x10) |
921 | reg = DBAM1; | 911 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); |
922 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1); | ||
923 | |||
924 | if (err) | ||
925 | goto err_reg; | ||
926 | } | ||
927 | |||
928 | return; | ||
929 | |||
930 | err_reg: | ||
931 | debugf0("Error reading F2x%03x.\n", reg); | ||
932 | } | 912 | } |
933 | 913 | ||
934 | /* | 914 | /* |
@@ -963,7 +943,7 @@ err_reg: | |||
963 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | 943 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) |
964 | { | 944 | { |
965 | 945 | ||
966 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) { | 946 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
967 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | 947 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; |
968 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | 948 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; |
969 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | 949 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; |
@@ -991,28 +971,21 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | |||
991 | */ | 971 | */ |
992 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | 972 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) |
993 | { | 973 | { |
994 | int cs, reg, err = 0; | 974 | int cs, reg; |
995 | 975 | ||
996 | amd64_set_dct_base_and_mask(pvt); | 976 | amd64_set_dct_base_and_mask(pvt); |
997 | 977 | ||
998 | for (cs = 0; cs < pvt->cs_count; cs++) { | 978 | for (cs = 0; cs < pvt->cs_count; cs++) { |
999 | reg = K8_DCSB0 + (cs * 4); | 979 | reg = K8_DCSB0 + (cs * 4); |
1000 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, | 980 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) |
1001 | &pvt->dcsb0[cs]); | ||
1002 | if (unlikely(err)) | ||
1003 | debugf0("Reading K8_DCSB0[%d] failed\n", cs); | ||
1004 | else | ||
1005 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", | 981 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
1006 | cs, pvt->dcsb0[cs], reg); | 982 | cs, pvt->dcsb0[cs], reg); |
1007 | 983 | ||
1008 | /* If DCT are NOT ganged, then read in DCT1's base */ | 984 | /* If DCT are NOT ganged, then read in DCT1's base */ |
1009 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 985 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { |
1010 | reg = F10_DCSB1 + (cs * 4); | 986 | reg = F10_DCSB1 + (cs * 4); |
1011 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, | 987 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, |
1012 | &pvt->dcsb1[cs]); | 988 | &pvt->dcsb1[cs])) |
1013 | if (unlikely(err)) | ||
1014 | debugf0("Reading F10_DCSB1[%d] failed\n", cs); | ||
1015 | else | ||
1016 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", | 989 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
1017 | cs, pvt->dcsb1[cs], reg); | 990 | cs, pvt->dcsb1[cs], reg); |
1018 | } else { | 991 | } else { |
@@ -1022,26 +995,20 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |||
1022 | 995 | ||
1023 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | 996 | for (cs = 0; cs < pvt->num_dcsm; cs++) { |
1024 | reg = K8_DCSM0 + (cs * 4); | 997 | reg = K8_DCSM0 + (cs * 4); |
1025 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, | 998 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) |
1026 | &pvt->dcsm0[cs]); | ||
1027 | if (unlikely(err)) | ||
1028 | debugf0("Reading K8_DCSM0 failed\n"); | ||
1029 | else | ||
1030 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", | 999 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
1031 | cs, pvt->dcsm0[cs], reg); | 1000 | cs, pvt->dcsm0[cs], reg); |
1032 | 1001 | ||
1033 | /* If DCT are NOT ganged, then read in DCT1's mask */ | 1002 | /* If DCT are NOT ganged, then read in DCT1's mask */ |
1034 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | 1003 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { |
1035 | reg = F10_DCSM1 + (cs * 4); | 1004 | reg = F10_DCSM1 + (cs * 4); |
1036 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, | 1005 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, |
1037 | &pvt->dcsm1[cs]); | 1006 | &pvt->dcsm1[cs])) |
1038 | if (unlikely(err)) | ||
1039 | debugf0("Reading F10_DCSM1[%d] failed\n", cs); | ||
1040 | else | ||
1041 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", | 1007 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
1042 | cs, pvt->dcsm1[cs], reg); | 1008 | cs, pvt->dcsm1[cs], reg); |
1043 | } else | 1009 | } else { |
1044 | pvt->dcsm1[cs] = 0; | 1010 | pvt->dcsm1[cs] = 0; |
1011 | } | ||
1045 | } | 1012 | } |
1046 | } | 1013 | } |
1047 | 1014 | ||
@@ -1049,18 +1016,16 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | |||
1049 | { | 1016 | { |
1050 | enum mem_type type; | 1017 | enum mem_type type; |
1051 | 1018 | ||
1052 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) { | 1019 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { |
1053 | /* Rev F and later */ | 1020 | if (pvt->dchr0 & DDR3_MODE) |
1054 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; | 1021 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; |
1022 | else | ||
1023 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; | ||
1055 | } else { | 1024 | } else { |
1056 | /* Rev E and earlier */ | ||
1057 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; | 1025 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; |
1058 | } | 1026 | } |
1059 | 1027 | ||
1060 | debugf1(" Memory type is: %s\n", | 1028 | debugf1(" Memory type is: %s\n", edac_mem_types[type]); |
1061 | (type == MEM_DDR2) ? "MEM_DDR2" : | ||
1062 | (type == MEM_RDDR2) ? "MEM_RDDR2" : | ||
1063 | (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR"); | ||
1064 | 1029 | ||
1065 | return type; | 1030 | return type; |
1066 | } | 1031 | } |
@@ -1078,11 +1043,11 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) | |||
1078 | { | 1043 | { |
1079 | int flag, err = 0; | 1044 | int flag, err = 0; |
1080 | 1045 | ||
1081 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | 1046 | err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); |
1082 | if (err) | 1047 | if (err) |
1083 | return err; | 1048 | return err; |
1084 | 1049 | ||
1085 | if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) { | 1050 | if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) { |
1086 | /* RevF (NPT) and later */ | 1051 | /* RevF (NPT) and later */ |
1087 | flag = pvt->dclr0 & F10_WIDTH_128; | 1052 | flag = pvt->dclr0 & F10_WIDTH_128; |
1088 | } else { | 1053 | } else { |
@@ -1114,22 +1079,15 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
1114 | { | 1079 | { |
1115 | u32 low; | 1080 | u32 low; |
1116 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ | 1081 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ |
1117 | int err; | ||
1118 | 1082 | ||
1119 | err = pci_read_config_dword(pvt->addr_f1_ctl, | 1083 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); |
1120 | K8_DRAM_BASE_LOW + off, &low); | ||
1121 | if (err) | ||
1122 | debugf0("Reading K8_DRAM_BASE_LOW failed\n"); | ||
1123 | 1084 | ||
1124 | /* Extract parts into separate data entries */ | 1085 | /* Extract parts into separate data entries */ |
1125 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; | 1086 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; |
1126 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; | 1087 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; |
1127 | pvt->dram_rw_en[dram] = (low & 0x3); | 1088 | pvt->dram_rw_en[dram] = (low & 0x3); |
1128 | 1089 | ||
1129 | err = pci_read_config_dword(pvt->addr_f1_ctl, | 1090 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); |
1130 | K8_DRAM_LIMIT_LOW + off, &low); | ||
1131 | if (err) | ||
1132 | debugf0("Reading K8_DRAM_LIMIT_LOW failed\n"); | ||
1133 | 1091 | ||
1134 | /* | 1092 | /* |
1135 | * Extract parts into separate data entries. Limit is the HIGHEST memory | 1093 | * Extract parts into separate data entries. Limit is the HIGHEST memory |
@@ -1142,7 +1100,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
1142 | 1100 | ||
1143 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1101 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, |
1144 | struct err_regs *info, | 1102 | struct err_regs *info, |
1145 | u64 SystemAddress) | 1103 | u64 sys_addr) |
1146 | { | 1104 | { |
1147 | struct mem_ctl_info *src_mci; | 1105 | struct mem_ctl_info *src_mci; |
1148 | unsigned short syndrome; | 1106 | unsigned short syndrome; |
@@ -1155,7 +1113,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1155 | 1113 | ||
1156 | /* CHIPKILL enabled */ | 1114 | /* CHIPKILL enabled */ |
1157 | if (info->nbcfg & K8_NBCFG_CHIPKILL) { | 1115 | if (info->nbcfg & K8_NBCFG_CHIPKILL) { |
1158 | channel = get_channel_from_ecc_syndrome(syndrome); | 1116 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
1159 | if (channel < 0) { | 1117 | if (channel < 0) { |
1160 | /* | 1118 | /* |
1161 | * Syndrome didn't map, so we don't know which of the | 1119 | * Syndrome didn't map, so we don't know which of the |
@@ -1177,64 +1135,46 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1177 | * was obtained from email communication with someone at AMD. | 1135 | * was obtained from email communication with someone at AMD. |
1178 | * (Wish the email was placed in this comment - norsk) | 1136 | * (Wish the email was placed in this comment - norsk) |
1179 | */ | 1137 | */ |
1180 | channel = ((SystemAddress & BIT(3)) != 0); | 1138 | channel = ((sys_addr & BIT(3)) != 0); |
1181 | } | 1139 | } |
1182 | 1140 | ||
1183 | /* | 1141 | /* |
1184 | * Find out which node the error address belongs to. This may be | 1142 | * Find out which node the error address belongs to. This may be |
1185 | * different from the node that detected the error. | 1143 | * different from the node that detected the error. |
1186 | */ | 1144 | */ |
1187 | src_mci = find_mc_by_sys_addr(mci, SystemAddress); | 1145 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
1188 | if (!src_mci) { | 1146 | if (!src_mci) { |
1189 | amd64_mc_printk(mci, KERN_ERR, | 1147 | amd64_mc_printk(mci, KERN_ERR, |
1190 | "failed to map error address 0x%lx to a node\n", | 1148 | "failed to map error address 0x%lx to a node\n", |
1191 | (unsigned long)SystemAddress); | 1149 | (unsigned long)sys_addr); |
1192 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1150 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1193 | return; | 1151 | return; |
1194 | } | 1152 | } |
1195 | 1153 | ||
1196 | /* Now map the SystemAddress to a CSROW */ | 1154 | /* Now map the sys_addr to a CSROW */ |
1197 | csrow = sys_addr_to_csrow(src_mci, SystemAddress); | 1155 | csrow = sys_addr_to_csrow(src_mci, sys_addr); |
1198 | if (csrow < 0) { | 1156 | if (csrow < 0) { |
1199 | edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); | 1157 | edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); |
1200 | } else { | 1158 | } else { |
1201 | error_address_to_page_and_offset(SystemAddress, &page, &offset); | 1159 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
1202 | 1160 | ||
1203 | edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, | 1161 | edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, |
1204 | channel, EDAC_MOD_STR); | 1162 | channel, EDAC_MOD_STR); |
1205 | } | 1163 | } |
1206 | } | 1164 | } |
1207 | 1165 | ||
1208 | /* | 1166 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
1209 | * determrine the number of PAGES in for this DIMM's size based on its DRAM | ||
1210 | * Address Mapping. | ||
1211 | * | ||
1212 | * First step is to calc the number of bits to shift a value of 1 left to | ||
1213 | * indicate show many pages. Start with the DBAM value as the starting bits, | ||
1214 | * then proceed to adjust those shift bits, based on CPU rev and the table. | ||
1215 | * See BKDG on the DBAM | ||
1216 | */ | ||
1217 | static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map) | ||
1218 | { | 1167 | { |
1219 | int nr_pages; | 1168 | int *dbam_map; |
1220 | 1169 | ||
1221 | if (pvt->ext_model >= OPTERON_CPU_REV_F) { | 1170 | if (pvt->ext_model >= K8_REV_F) |
1222 | nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT); | 1171 | dbam_map = ddr2_dbam; |
1223 | } else { | 1172 | else if (pvt->ext_model >= K8_REV_D) |
1224 | /* | 1173 | dbam_map = ddr2_dbam_revD; |
1225 | * RevE and less section; this line is tricky. It collapses the | 1174 | else |
1226 | * table used by RevD and later to one that matches revisions CG | 1175 | dbam_map = ddr2_dbam_revCG; |
1227 | * and earlier. | ||
1228 | */ | ||
1229 | dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ? | ||
1230 | (dram_map > 8 ? 4 : (dram_map > 5 ? | ||
1231 | 3 : (dram_map > 2 ? 1 : 0))) : 0; | ||
1232 | |||
1233 | /* 25 shift is 32MiB minimum DIMM size in RevE and prior */ | ||
1234 | nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT); | ||
1235 | } | ||
1236 | 1176 | ||
1237 | return nr_pages; | 1177 | return dbam_map[cs_mode]; |
1238 | } | 1178 | } |
1239 | 1179 | ||
1240 | /* | 1180 | /* |
@@ -1248,34 +1188,24 @@ static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map) | |||
1248 | static int f10_early_channel_count(struct amd64_pvt *pvt) | 1188 | static int f10_early_channel_count(struct amd64_pvt *pvt) |
1249 | { | 1189 | { |
1250 | int dbams[] = { DBAM0, DBAM1 }; | 1190 | int dbams[] = { DBAM0, DBAM1 }; |
1251 | int err = 0, channels = 0; | 1191 | int i, j, channels = 0; |
1252 | int i, j; | ||
1253 | u32 dbam; | 1192 | u32 dbam; |
1254 | 1193 | ||
1255 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | ||
1256 | if (err) | ||
1257 | goto err_reg; | ||
1258 | |||
1259 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); | ||
1260 | if (err) | ||
1261 | goto err_reg; | ||
1262 | |||
1263 | /* If we are in 128 bit mode, then we are using 2 channels */ | 1194 | /* If we are in 128 bit mode, then we are using 2 channels */ |
1264 | if (pvt->dclr0 & F10_WIDTH_128) { | 1195 | if (pvt->dclr0 & F10_WIDTH_128) { |
1265 | debugf0("Data WIDTH is 128 bits - 2 channels\n"); | ||
1266 | channels = 2; | 1196 | channels = 2; |
1267 | return channels; | 1197 | return channels; |
1268 | } | 1198 | } |
1269 | 1199 | ||
1270 | /* | 1200 | /* |
1271 | * Need to check if in UN-ganged mode: In such, there are 2 channels, | 1201 | * Need to check if in unganged mode: In such, there are 2 channels, |
1272 | * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit | 1202 | * but they are not in 128 bit mode and thus the above 'dclr0' status |
1273 | * will be OFF. | 1203 | * bit will be OFF. |
1274 | * | 1204 | * |
1275 | * Need to check DCT0[0] and DCT1[0] to see if only one of them has | 1205 | * Need to check DCT0[0] and DCT1[0] to see if only one of them has |
1276 | * their CSEnable bit on. If so, then SINGLE DIMM case. | 1206 | * their CSEnable bit on. If so, then SINGLE DIMM case. |
1277 | */ | 1207 | */ |
1278 | debugf0("Data WIDTH is NOT 128 bits - need more decoding\n"); | 1208 | debugf0("Data width is not 128 bits - need more decoding\n"); |
1279 | 1209 | ||
1280 | /* | 1210 | /* |
1281 | * Check DRAM Bank Address Mapping values for each DIMM to see if there | 1211 | * Check DRAM Bank Address Mapping values for each DIMM to see if there |
@@ -1283,8 +1213,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1283 | * both controllers since DIMMs can be placed in either one. | 1213 | * both controllers since DIMMs can be placed in either one. |
1284 | */ | 1214 | */ |
1285 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { | 1215 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { |
1286 | err = pci_read_config_dword(pvt->dram_f2_ctl, dbams[i], &dbam); | 1216 | if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) |
1287 | if (err) | ||
1288 | goto err_reg; | 1217 | goto err_reg; |
1289 | 1218 | ||
1290 | for (j = 0; j < 4; j++) { | 1219 | for (j = 0; j < 4; j++) { |
@@ -1295,6 +1224,9 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1295 | } | 1224 | } |
1296 | } | 1225 | } |
1297 | 1226 | ||
1227 | if (channels > 2) | ||
1228 | channels = 2; | ||
1229 | |||
1298 | debugf0("MCT channel count: %d\n", channels); | 1230 | debugf0("MCT channel count: %d\n", channels); |
1299 | 1231 | ||
1300 | return channels; | 1232 | return channels; |
@@ -1304,9 +1236,16 @@ err_reg: | |||
1304 | 1236 | ||
1305 | } | 1237 | } |
1306 | 1238 | ||
1307 | static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map) | 1239 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
1308 | { | 1240 | { |
1309 | return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT); | 1241 | int *dbam_map; |
1242 | |||
1243 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | ||
1244 | dbam_map = ddr3_dbam; | ||
1245 | else | ||
1246 | dbam_map = ddr2_dbam; | ||
1247 | |||
1248 | return dbam_map[cs_mode]; | ||
1310 | } | 1249 | } |
1311 | 1250 | ||
1312 | /* Enable extended configuration access via 0xCF8 feature */ | 1251 | /* Enable extended configuration access via 0xCF8 feature */ |
@@ -1314,7 +1253,7 @@ static void amd64_setup(struct amd64_pvt *pvt) | |||
1314 | { | 1253 | { |
1315 | u32 reg; | 1254 | u32 reg; |
1316 | 1255 | ||
1317 | pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); | 1256 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); |
1318 | 1257 | ||
1319 | pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); | 1258 | pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); |
1320 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | 1259 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; |
@@ -1326,7 +1265,7 @@ static void amd64_teardown(struct amd64_pvt *pvt) | |||
1326 | { | 1265 | { |
1327 | u32 reg; | 1266 | u32 reg; |
1328 | 1267 | ||
1329 | pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); | 1268 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); |
1330 | 1269 | ||
1331 | reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; | 1270 | reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; |
1332 | if (pvt->flags.cf8_extcfg) | 1271 | if (pvt->flags.cf8_extcfg) |
@@ -1355,10 +1294,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
1355 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); | 1294 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); |
1356 | 1295 | ||
1357 | /* read the 'raw' DRAM BASE Address register */ | 1296 | /* read the 'raw' DRAM BASE Address register */ |
1358 | pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base); | 1297 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); |
1359 | 1298 | ||
1360 | /* Read from the ECS data register */ | 1299 | /* Read from the ECS data register */ |
1361 | pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base); | 1300 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base); |
1362 | 1301 | ||
1363 | /* Extract parts into separate data entries */ | 1302 | /* Extract parts into separate data entries */ |
1364 | pvt->dram_rw_en[dram] = (low_base & 0x3); | 1303 | pvt->dram_rw_en[dram] = (low_base & 0x3); |
@@ -1375,13 +1314,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
1375 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | 1314 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); |
1376 | 1315 | ||
1377 | /* read the 'raw' LIMIT registers */ | 1316 | /* read the 'raw' LIMIT registers */ |
1378 | pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit); | 1317 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); |
1379 | 1318 | ||
1380 | /* Read from the ECS data register for the HIGH portion */ | 1319 | /* Read from the ECS data register for the HIGH portion */ |
1381 | pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit); | 1320 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit); |
1382 | |||
1383 | debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n", | ||
1384 | high_base, low_base, high_limit, low_limit); | ||
1385 | 1321 | ||
1386 | pvt->dram_DstNode[dram] = (low_limit & 0x7); | 1322 | pvt->dram_DstNode[dram] = (low_limit & 0x7); |
1387 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; | 1323 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; |
@@ -1397,32 +1333,35 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
1397 | 1333 | ||
1398 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | 1334 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) |
1399 | { | 1335 | { |
1400 | int err = 0; | ||
1401 | 1336 | ||
1402 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, | 1337 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, |
1403 | &pvt->dram_ctl_select_low); | 1338 | &pvt->dram_ctl_select_low)) { |
1404 | if (err) { | 1339 | debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " |
1405 | debugf0("Reading F10_DCTL_SEL_LOW failed\n"); | 1340 | "High range addresses at: 0x%x\n", |
1406 | } else { | 1341 | pvt->dram_ctl_select_low, |
1407 | debugf0("DRAM_DCTL_SEL_LOW=0x%x DctSelBaseAddr=0x%x\n", | 1342 | dct_sel_baseaddr(pvt)); |
1408 | pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt)); | 1343 | |
1409 | 1344 | debugf0(" DCT mode: %s, All DCTs on: %s\n", | |
1410 | debugf0(" DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-" | 1345 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), |
1411 | "sel-hi-range=%s\n", | 1346 | (dct_dram_enabled(pvt) ? "yes" : "no")); |
1412 | (dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"), | 1347 | |
1413 | (dct_dram_enabled(pvt) ? "Enabled" : "Disabled"), | 1348 | if (!dct_ganging_enabled(pvt)) |
1414 | (dct_high_range_enabled(pvt) ? "Enabled" : "Disabled")); | 1349 | debugf0(" Address range split per DCT: %s\n", |
1415 | 1350 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | |
1416 | debugf0(" DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n", | 1351 | |
1417 | (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"), | 1352 | debugf0(" DCT data interleave for ECC: %s, " |
1418 | (dct_memory_cleared(pvt) ? "True " : "False "), | 1353 | "DRAM cleared since last warm reset: %s\n", |
1354 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | ||
1355 | (dct_memory_cleared(pvt) ? "yes" : "no")); | ||
1356 | |||
1357 | debugf0(" DCT channel interleave: %s, " | ||
1358 | "DCT interleave bits selector: 0x%x\n", | ||
1359 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | ||
1419 | dct_sel_interleave_addr(pvt)); | 1360 | dct_sel_interleave_addr(pvt)); |
1420 | } | 1361 | } |
1421 | 1362 | ||
1422 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, | 1363 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, |
1423 | &pvt->dram_ctl_select_high); | 1364 | &pvt->dram_ctl_select_high); |
1424 | if (err) | ||
1425 | debugf0("Reading F10_DCTL_SEL_HIGH failed\n"); | ||
1426 | } | 1365 | } |
1427 | 1366 | ||
1428 | /* | 1367 | /* |
@@ -1706,10 +1645,11 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |||
1706 | } | 1645 | } |
1707 | 1646 | ||
1708 | /* | 1647 | /* |
1709 | * This the F10h reference code from AMD to map a @sys_addr to NodeID, | 1648 | * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps |
1710 | * CSROW, Channel. | 1649 | * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). |
1711 | * | 1650 | * |
1712 | * The @sys_addr is usually an error address received from the hardware. | 1651 | * The @sys_addr is usually an error address received from the hardware |
1652 | * (MCX_ADDR). | ||
1713 | */ | 1653 | */ |
1714 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | 1654 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, |
1715 | struct err_regs *info, | 1655 | struct err_regs *info, |
@@ -1722,133 +1662,76 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
1722 | 1662 | ||
1723 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | 1663 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); |
1724 | 1664 | ||
1725 | if (csrow >= 0) { | 1665 | if (csrow < 0) { |
1726 | error_address_to_page_and_offset(sys_addr, &page, &offset); | ||
1727 | |||
1728 | syndrome = HIGH_SYNDROME(info->nbsl) << 8; | ||
1729 | syndrome |= LOW_SYNDROME(info->nbsh); | ||
1730 | |||
1731 | /* | ||
1732 | * Is CHIPKILL on? If so, then we can attempt to use the | ||
1733 | * syndrome to isolate which channel the error was on. | ||
1734 | */ | ||
1735 | if (pvt->nbcfg & K8_NBCFG_CHIPKILL) | ||
1736 | chan = get_channel_from_ecc_syndrome(syndrome); | ||
1737 | |||
1738 | if (chan >= 0) { | ||
1739 | edac_mc_handle_ce(mci, page, offset, syndrome, | ||
1740 | csrow, chan, EDAC_MOD_STR); | ||
1741 | } else { | ||
1742 | /* | ||
1743 | * Channel unknown, report all channels on this | ||
1744 | * CSROW as failed. | ||
1745 | */ | ||
1746 | for (chan = 0; chan < mci->csrows[csrow].nr_channels; | ||
1747 | chan++) { | ||
1748 | edac_mc_handle_ce(mci, page, offset, | ||
1749 | syndrome, | ||
1750 | csrow, chan, | ||
1751 | EDAC_MOD_STR); | ||
1752 | } | ||
1753 | } | ||
1754 | |||
1755 | } else { | ||
1756 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | 1666 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1667 | return; | ||
1757 | } | 1668 | } |
1758 | } | ||
1759 | 1669 | ||
1760 | /* | 1670 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
1761 | * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift | ||
1762 | * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0 | ||
1763 | * indicates an empty DIMM slot, as reported by Hardware on empty slots. | ||
1764 | * | ||
1765 | * Normalize to 128MB by subracting 27 bit shift. | ||
1766 | */ | ||
1767 | static int map_dbam_to_csrow_size(int index) | ||
1768 | { | ||
1769 | int mega_bytes = 0; | ||
1770 | 1671 | ||
1771 | if (index > 0 && index <= DBAM_MAX_VALUE) | 1672 | syndrome = HIGH_SYNDROME(info->nbsl) << 8; |
1772 | mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27))); | 1673 | syndrome |= LOW_SYNDROME(info->nbsh); |
1773 | 1674 | ||
1774 | return mega_bytes; | 1675 | /* |
1676 | * We need the syndromes for channel detection only when we're | ||
1677 | * ganged. Otherwise @chan should already contain the channel at | ||
1678 | * this point. | ||
1679 | */ | ||
1680 | if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL) | ||
1681 | chan = get_channel_from_ecc_syndrome(mci, syndrome); | ||
1682 | |||
1683 | if (chan >= 0) | ||
1684 | edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, | ||
1685 | EDAC_MOD_STR); | ||
1686 | else | ||
1687 | /* | ||
1688 | * Channel unknown, report all channels on this CSROW as failed. | ||
1689 | */ | ||
1690 | for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) | ||
1691 | edac_mc_handle_ce(mci, page, offset, syndrome, | ||
1692 | csrow, chan, EDAC_MOD_STR); | ||
1775 | } | 1693 | } |
1776 | 1694 | ||
1777 | /* | 1695 | /* |
1778 | * debug routine to display the memory sizes of a DIMM (ganged or not) and it | 1696 | * debug routine to display the memory sizes of all logical DIMMs and its |
1779 | * CSROWs as well | 1697 | * CSROWs as well |
1780 | */ | 1698 | */ |
1781 | static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt, | 1699 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) |
1782 | int ganged) | ||
1783 | { | 1700 | { |
1784 | int dimm, size0, size1; | 1701 | int dimm, size0, size1; |
1785 | u32 dbam; | 1702 | u32 dbam; |
1786 | u32 *dcsb; | 1703 | u32 *dcsb; |
1787 | 1704 | ||
1788 | debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl, | 1705 | if (boot_cpu_data.x86 == 0xf) { |
1789 | ctrl ? pvt->dbam1 : pvt->dbam0, | 1706 | /* K8 families < revF not supported yet */ |
1790 | ganged ? "GANGED - dbam1 not used" : "NON-GANGED"); | 1707 | if (pvt->ext_model < K8_REV_F) |
1708 | return; | ||
1709 | else | ||
1710 | WARN_ON(ctrl != 0); | ||
1711 | } | ||
1712 | |||
1713 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | ||
1714 | ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); | ||
1791 | 1715 | ||
1792 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | 1716 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; |
1793 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; | 1717 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; |
1794 | 1718 | ||
1719 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); | ||
1720 | |||
1795 | /* Dump memory sizes for DIMM and its CSROWs */ | 1721 | /* Dump memory sizes for DIMM and its CSROWs */ |
1796 | for (dimm = 0; dimm < 4; dimm++) { | 1722 | for (dimm = 0; dimm < 4; dimm++) { |
1797 | 1723 | ||
1798 | size0 = 0; | 1724 | size0 = 0; |
1799 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) | 1725 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) |
1800 | size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam)); | 1726 | size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
1801 | 1727 | ||
1802 | size1 = 0; | 1728 | size1 = 0; |
1803 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | 1729 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) |
1804 | size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam)); | 1730 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
1805 | |||
1806 | debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB " | ||
1807 | "CSROW-%d=%5dMB\n", | ||
1808 | ctrl, | ||
1809 | dimm, | ||
1810 | size0 + size1, | ||
1811 | dimm * 2, | ||
1812 | size0, | ||
1813 | dimm * 2 + 1, | ||
1814 | size1); | ||
1815 | } | ||
1816 | } | ||
1817 | |||
1818 | /* | ||
1819 | * Very early hardware probe on pci_probe thread to determine if this module | ||
1820 | * supports the hardware. | ||
1821 | * | ||
1822 | * Return: | ||
1823 | * 0 for OK | ||
1824 | * 1 for error | ||
1825 | */ | ||
1826 | static int f10_probe_valid_hardware(struct amd64_pvt *pvt) | ||
1827 | { | ||
1828 | int ret = 0; | ||
1829 | |||
1830 | /* | ||
1831 | * If we are on a DDR3 machine, we don't know yet if | ||
1832 | * we support that properly at this time | ||
1833 | */ | ||
1834 | if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) || | ||
1835 | (pvt->dchr1 & F10_DCHR_Ddr3Mode)) { | ||
1836 | |||
1837 | amd64_printk(KERN_WARNING, | ||
1838 | "%s() This machine is running with DDR3 memory. " | ||
1839 | "This is not currently supported. " | ||
1840 | "DCHR0=0x%x DCHR1=0x%x\n", | ||
1841 | __func__, pvt->dchr0, pvt->dchr1); | ||
1842 | |||
1843 | amd64_printk(KERN_WARNING, | ||
1844 | " Contact '%s' module MAINTAINER to help add" | ||
1845 | " support.\n", | ||
1846 | EDAC_MOD_STR); | ||
1847 | |||
1848 | ret = 1; | ||
1849 | 1731 | ||
1732 | edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", | ||
1733 | dimm * 2, size0, dimm * 2 + 1, size1); | ||
1850 | } | 1734 | } |
1851 | return ret; | ||
1852 | } | 1735 | } |
1853 | 1736 | ||
1854 | /* | 1737 | /* |
@@ -1868,11 +1751,11 @@ static struct amd64_family_type amd64_family_types[] = { | |||
1868 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, | 1751 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, |
1869 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, | 1752 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, |
1870 | .ops = { | 1753 | .ops = { |
1871 | .early_channel_count = k8_early_channel_count, | 1754 | .early_channel_count = k8_early_channel_count, |
1872 | .get_error_address = k8_get_error_address, | 1755 | .get_error_address = k8_get_error_address, |
1873 | .read_dram_base_limit = k8_read_dram_base_limit, | 1756 | .read_dram_base_limit = k8_read_dram_base_limit, |
1874 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, | 1757 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, |
1875 | .dbam_map_to_pages = k8_dbam_map_to_pages, | 1758 | .dbam_to_cs = k8_dbam_to_chip_select, |
1876 | } | 1759 | } |
1877 | }, | 1760 | }, |
1878 | [F10_CPUS] = { | 1761 | [F10_CPUS] = { |
@@ -1880,13 +1763,12 @@ static struct amd64_family_type amd64_family_types[] = { | |||
1880 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, | 1763 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, |
1881 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, | 1764 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, |
1882 | .ops = { | 1765 | .ops = { |
1883 | .probe_valid_hardware = f10_probe_valid_hardware, | 1766 | .early_channel_count = f10_early_channel_count, |
1884 | .early_channel_count = f10_early_channel_count, | 1767 | .get_error_address = f10_get_error_address, |
1885 | .get_error_address = f10_get_error_address, | 1768 | .read_dram_base_limit = f10_read_dram_base_limit, |
1886 | .read_dram_base_limit = f10_read_dram_base_limit, | 1769 | .read_dram_ctl_register = f10_read_dram_ctl_register, |
1887 | .read_dram_ctl_register = f10_read_dram_ctl_register, | 1770 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, |
1888 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | 1771 | .dbam_to_cs = f10_dbam_to_chip_select, |
1889 | .dbam_map_to_pages = f10_dbam_map_to_pages, | ||
1890 | } | 1772 | } |
1891 | }, | 1773 | }, |
1892 | [F11_CPUS] = { | 1774 | [F11_CPUS] = { |
@@ -1894,13 +1776,12 @@ static struct amd64_family_type amd64_family_types[] = { | |||
1894 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, | 1776 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, |
1895 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, | 1777 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, |
1896 | .ops = { | 1778 | .ops = { |
1897 | .probe_valid_hardware = f10_probe_valid_hardware, | 1779 | .early_channel_count = f10_early_channel_count, |
1898 | .early_channel_count = f10_early_channel_count, | 1780 | .get_error_address = f10_get_error_address, |
1899 | .get_error_address = f10_get_error_address, | 1781 | .read_dram_base_limit = f10_read_dram_base_limit, |
1900 | .read_dram_base_limit = f10_read_dram_base_limit, | 1782 | .read_dram_ctl_register = f10_read_dram_ctl_register, |
1901 | .read_dram_ctl_register = f10_read_dram_ctl_register, | 1783 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, |
1902 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | 1784 | .dbam_to_cs = f10_dbam_to_chip_select, |
1903 | .dbam_map_to_pages = f10_dbam_map_to_pages, | ||
1904 | } | 1785 | } |
1905 | }, | 1786 | }, |
1906 | }; | 1787 | }; |
@@ -1923,142 +1804,170 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor, | |||
1923 | } | 1804 | } |
1924 | 1805 | ||
1925 | /* | 1806 | /* |
1926 | * syndrome mapping table for ECC ChipKill devices | 1807 | * These are tables of eigenvectors (one per line) which can be used for the |
1927 | * | 1808 | * construction of the syndrome tables. The modified syndrome search algorithm |
1928 | * The comment in each row is the token (nibble) number that is in error. | 1809 | * uses those to find the symbol in error and thus the DIMM. |
1929 | * The least significant nibble of the syndrome is the mask for the bits | ||
1930 | * that are in error (need to be toggled) for the particular nibble. | ||
1931 | * | ||
1932 | * Each row contains 16 entries. | ||
1933 | * The first entry (0th) is the channel number for that row of syndromes. | ||
1934 | * The remaining 15 entries are the syndromes for the respective Error | ||
1935 | * bit mask index. | ||
1936 | * | ||
1937 | * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the | ||
1938 | * bit in error. | ||
1939 | * The 2nd index entry is 0x0010 that the second bit is damaged. | ||
1940 | * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits | ||
1941 | * are damaged. | ||
1942 | * Thus so on until index 15, 0x1111, whose entry has the syndrome | ||
1943 | * indicating that all 4 bits are damaged. | ||
1944 | * | ||
1945 | * A search is performed on this table looking for a given syndrome. | ||
1946 | * | 1810 | * |
1947 | * See the AMD documentation for ECC syndromes. This ECC table is valid | 1811 | * Algorithm courtesy of Ross LaFetra from AMD. |
1948 | * across all the versions of the AMD64 processors. | ||
1949 | * | ||
1950 | * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a | ||
1951 | * COLUMN index, then search all ROWS of that column, looking for a match | ||
1952 | * with the input syndrome. The ROW value will be the token number. | ||
1953 | * | ||
1954 | * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this | ||
1955 | * error. | ||
1956 | */ | 1812 | */ |
1957 | #define NUMBER_ECC_ROWS 36 | 1813 | static u16 x4_vectors[] = { |
1958 | static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = { | 1814 | 0x2f57, 0x1afe, 0x66cc, 0xdd88, |
1959 | /* Channel 0 syndromes */ | 1815 | 0x11eb, 0x3396, 0x7f4c, 0xeac8, |
1960 | {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57, | 1816 | 0x0001, 0x0002, 0x0004, 0x0008, |
1961 | 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df }, | 1817 | 0x1013, 0x3032, 0x4044, 0x8088, |
1962 | {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7, | 1818 | 0x106b, 0x30d6, 0x70fc, 0xe0a8, |
1963 | 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f }, | 1819 | 0x4857, 0xc4fe, 0x13cc, 0x3288, |
1964 | {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, | 1820 | 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, |
1965 | 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f }, | 1821 | 0x1f39, 0x251e, 0xbd6c, 0x6bd8, |
1966 | {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057, | 1822 | 0x15c1, 0x2a42, 0x89ac, 0x4758, |
1967 | 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df }, | 1823 | 0x2b03, 0x1602, 0x4f0c, 0xca08, |
1968 | {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097, | 1824 | 0x1f07, 0x3a0e, 0x6b04, 0xbd08, |
1969 | 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f }, | 1825 | 0x8ba7, 0x465e, 0x244c, 0x1cc8, |
1970 | {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857, | 1826 | 0x2b87, 0x164e, 0x642c, 0xdc18, |
1971 | 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf }, | 1827 | 0x40b9, 0x80de, 0x1094, 0x20e8, |
1972 | {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467, | 1828 | 0x27db, 0x1eb6, 0x9dac, 0x7b58, |
1973 | 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f }, | 1829 | 0x11c1, 0x2242, 0x84ac, 0x4c58, |
1974 | {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27, | 1830 | 0x1be5, 0x2d7a, 0x5e34, 0xa718, |
1975 | 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff }, | 1831 | 0x4b39, 0x8d1e, 0x14b4, 0x28d8, |
1976 | {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177, | 1832 | 0x4c97, 0xc87e, 0x11fc, 0x33a8, |
1977 | 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f }, | 1833 | 0x8e97, 0x497e, 0x2ffc, 0x1aa8, |
1978 | {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07, | 1834 | 0x16b3, 0x3d62, 0x4f34, 0x8518, |
1979 | 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f }, | 1835 | 0x1e2f, 0x391a, 0x5cac, 0xf858, |
1980 | {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07, | 1836 | 0x1d9f, 0x3b7a, 0x572c, 0xfe18, |
1981 | 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f }, | 1837 | 0x15f5, 0x2a5a, 0x5264, 0xa3b8, |
1982 | {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7, | 1838 | 0x1dbb, 0x3b66, 0x715c, 0xe3f8, |
1983 | 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f }, | 1839 | 0x4397, 0xc27e, 0x17fc, 0x3ea8, |
1984 | {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87, | 1840 | 0x1617, 0x3d3e, 0x6464, 0xb8b8, |
1985 | 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f }, | 1841 | 0x23ff, 0x12aa, 0xab6c, 0x56d8, |
1986 | {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067, | 1842 | 0x2dfb, 0x1ba6, 0x913c, 0x7328, |
1987 | 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f }, | 1843 | 0x185d, 0x2ca6, 0x7914, 0x9e28, |
1988 | {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77, | 1844 | 0x171b, 0x3e36, 0x7d7c, 0xebe8, |
1989 | 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f }, | 1845 | 0x4199, 0x82ee, 0x19f4, 0x2e58, |
1990 | {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77, | 1846 | 0x4807, 0xc40e, 0x130c, 0x3208, |
1991 | 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f }, | 1847 | 0x1905, 0x2e0a, 0x5804, 0xac08, |
1992 | 1848 | 0x213f, 0x132a, 0xadfc, 0x5ba8, | |
1993 | /* Channel 1 syndromes */ | 1849 | 0x19a9, 0x2efe, 0xb5cc, 0x6f88, |
1994 | {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187, | ||
1995 | 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f }, | ||
1996 | {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627, | ||
1997 | 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff }, | ||
1998 | {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97, | ||
1999 | 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f }, | ||
2000 | {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97, | ||
2001 | 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f }, | ||
2002 | {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987, | ||
2003 | 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f }, | ||
2004 | {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677, | ||
2005 | 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f }, | ||
2006 | {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387, | ||
2007 | 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f }, | ||
2008 | {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17, | ||
2009 | 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf }, | ||
2010 | {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7, | ||
2011 | 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f }, | ||
2012 | {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397, | ||
2013 | 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f }, | ||
2014 | {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617, | ||
2015 | 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf }, | ||
2016 | {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527, | ||
2017 | 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff }, | ||
2018 | {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7, | ||
2019 | 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef }, | ||
2020 | {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7, | ||
2021 | 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def }, | ||
2022 | {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67, | ||
2023 | 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f }, | ||
2024 | {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377, | ||
2025 | 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f }, | ||
2026 | |||
2027 | /* ECC bits are also in the set of tokens and they too can go bad | ||
2028 | * first 2 cover channel 0, while the second 2 cover channel 1 | ||
2029 | */ | ||
2030 | {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807, | ||
2031 | 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f }, | ||
2032 | {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07, | ||
2033 | 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f }, | ||
2034 | {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97, | ||
2035 | 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f }, | ||
2036 | {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757, | ||
2037 | 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df } | ||
2038 | }; | 1850 | }; |
2039 | 1851 | ||
2040 | /* | 1852 | static u16 x8_vectors[] = { |
2041 | * Given the syndrome argument, scan each of the channel tables for a syndrome | 1853 | 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, |
2042 | * match. Depending on which table it is found, return the channel number. | 1854 | 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, |
2043 | */ | 1855 | 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, |
2044 | static int get_channel_from_ecc_syndrome(unsigned short syndrome) | 1856 | 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, |
1857 | 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, | ||
1858 | 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, | ||
1859 | 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, | ||
1860 | 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, | ||
1861 | 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, | ||
1862 | 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, | ||
1863 | 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, | ||
1864 | 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, | ||
1865 | 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, | ||
1866 | 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, | ||
1867 | 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, | ||
1868 | 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, | ||
1869 | 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, | ||
1870 | 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, | ||
1871 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, | ||
1872 | }; | ||
1873 | |||
1874 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | ||
1875 | int v_dim) | ||
2045 | { | 1876 | { |
2046 | int row; | 1877 | unsigned int i, err_sym; |
2047 | int column; | 1878 | |
1879 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { | ||
1880 | u16 s = syndrome; | ||
1881 | int v_idx = err_sym * v_dim; | ||
1882 | int v_end = (err_sym + 1) * v_dim; | ||
1883 | |||
1884 | /* walk over all 16 bits of the syndrome */ | ||
1885 | for (i = 1; i < (1U << 16); i <<= 1) { | ||
2048 | 1886 | ||
2049 | /* Determine column to scan */ | 1887 | /* if bit is set in that eigenvector... */ |
2050 | column = syndrome & 0xF; | 1888 | if (v_idx < v_end && vectors[v_idx] & i) { |
1889 | u16 ev_comp = vectors[v_idx++]; | ||
2051 | 1890 | ||
2052 | /* Scan all rows, looking for syndrome, or end of table */ | 1891 | /* ... and bit set in the modified syndrome, */ |
2053 | for (row = 0; row < NUMBER_ECC_ROWS; row++) { | 1892 | if (s & i) { |
2054 | if (ecc_chipkill_syndromes[row][column] == syndrome) | 1893 | /* remove it. */ |
2055 | return ecc_chipkill_syndromes[row][0]; | 1894 | s ^= ev_comp; |
1895 | |||
1896 | if (!s) | ||
1897 | return err_sym; | ||
1898 | } | ||
1899 | |||
1900 | } else if (s & i) | ||
1901 | /* can't get to zero, move to next symbol */ | ||
1902 | break; | ||
1903 | } | ||
2056 | } | 1904 | } |
2057 | 1905 | ||
2058 | debugf0("syndrome(%x) not found\n", syndrome); | 1906 | debugf0("syndrome(%x) not found\n", syndrome); |
2059 | return -1; | 1907 | return -1; |
2060 | } | 1908 | } |
2061 | 1909 | ||
1910 | static int map_err_sym_to_channel(int err_sym, int sym_size) | ||
1911 | { | ||
1912 | if (sym_size == 4) | ||
1913 | switch (err_sym) { | ||
1914 | case 0x20: | ||
1915 | case 0x21: | ||
1916 | return 0; | ||
1917 | break; | ||
1918 | case 0x22: | ||
1919 | case 0x23: | ||
1920 | return 1; | ||
1921 | break; | ||
1922 | default: | ||
1923 | return err_sym >> 4; | ||
1924 | break; | ||
1925 | } | ||
1926 | /* x8 symbols */ | ||
1927 | else | ||
1928 | switch (err_sym) { | ||
1929 | /* imaginary bits not in a DIMM */ | ||
1930 | case 0x10: | ||
1931 | WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", | ||
1932 | err_sym); | ||
1933 | return -1; | ||
1934 | break; | ||
1935 | |||
1936 | case 0x11: | ||
1937 | return 0; | ||
1938 | break; | ||
1939 | case 0x12: | ||
1940 | return 1; | ||
1941 | break; | ||
1942 | default: | ||
1943 | return err_sym >> 3; | ||
1944 | break; | ||
1945 | } | ||
1946 | return -1; | ||
1947 | } | ||
1948 | |||
1949 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | ||
1950 | { | ||
1951 | struct amd64_pvt *pvt = mci->pvt_info; | ||
1952 | u32 value = 0; | ||
1953 | int err_sym = 0; | ||
1954 | |||
1955 | amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); | ||
1956 | |||
1957 | /* F3x180[EccSymbolSize]=1, x8 symbols */ | ||
1958 | if (boot_cpu_data.x86 == 0x10 && | ||
1959 | boot_cpu_data.x86_model > 7 && | ||
1960 | value & BIT(25)) { | ||
1961 | err_sym = decode_syndrome(syndrome, x8_vectors, | ||
1962 | ARRAY_SIZE(x8_vectors), 8); | ||
1963 | return map_err_sym_to_channel(err_sym, 8); | ||
1964 | } else { | ||
1965 | err_sym = decode_syndrome(syndrome, x4_vectors, | ||
1966 | ARRAY_SIZE(x4_vectors), 4); | ||
1967 | return map_err_sym_to_channel(err_sym, 4); | ||
1968 | } | ||
1969 | } | ||
1970 | |||
2062 | /* | 1971 | /* |
2063 | * Check for valid error in the NB Status High register. If so, proceed to read | 1972 | * Check for valid error in the NB Status High register. If so, proceed to read |
2064 | * NB Status Low, NB Address Low and NB Address High registers and store data | 1973 | * NB Status Low, NB Address Low and NB Address High registers and store data |
@@ -2073,40 +1982,24 @@ static int amd64_get_error_info_regs(struct mem_ctl_info *mci, | |||
2073 | { | 1982 | { |
2074 | struct amd64_pvt *pvt; | 1983 | struct amd64_pvt *pvt; |
2075 | struct pci_dev *misc_f3_ctl; | 1984 | struct pci_dev *misc_f3_ctl; |
2076 | int err = 0; | ||
2077 | 1985 | ||
2078 | pvt = mci->pvt_info; | 1986 | pvt = mci->pvt_info; |
2079 | misc_f3_ctl = pvt->misc_f3_ctl; | 1987 | misc_f3_ctl = pvt->misc_f3_ctl; |
2080 | 1988 | ||
2081 | err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, ®s->nbsh); | 1989 | if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, ®s->nbsh)) |
2082 | if (err) | 1990 | return 0; |
2083 | goto err_reg; | ||
2084 | 1991 | ||
2085 | if (!(regs->nbsh & K8_NBSH_VALID_BIT)) | 1992 | if (!(regs->nbsh & K8_NBSH_VALID_BIT)) |
2086 | return 0; | 1993 | return 0; |
2087 | 1994 | ||
2088 | /* valid error, read remaining error information registers */ | 1995 | /* valid error, read remaining error information registers */ |
2089 | err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, ®s->nbsl); | 1996 | if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, ®s->nbsl) || |
2090 | if (err) | 1997 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, ®s->nbeal) || |
2091 | goto err_reg; | 1998 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, ®s->nbeah) || |
2092 | 1999 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, ®s->nbcfg)) | |
2093 | err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, ®s->nbeal); | 2000 | return 0; |
2094 | if (err) | ||
2095 | goto err_reg; | ||
2096 | |||
2097 | err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, ®s->nbeah); | ||
2098 | if (err) | ||
2099 | goto err_reg; | ||
2100 | |||
2101 | err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, ®s->nbcfg); | ||
2102 | if (err) | ||
2103 | goto err_reg; | ||
2104 | 2001 | ||
2105 | return 1; | 2002 | return 1; |
2106 | |||
2107 | err_reg: | ||
2108 | debugf0("Reading error info register failed\n"); | ||
2109 | return 0; | ||
2110 | } | 2003 | } |
2111 | 2004 | ||
2112 | /* | 2005 | /* |
@@ -2184,7 +2077,7 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, | |||
2184 | struct err_regs *info) | 2077 | struct err_regs *info) |
2185 | { | 2078 | { |
2186 | struct amd64_pvt *pvt = mci->pvt_info; | 2079 | struct amd64_pvt *pvt = mci->pvt_info; |
2187 | u64 SystemAddress; | 2080 | u64 sys_addr; |
2188 | 2081 | ||
2189 | /* Ensure that the Error Address is VALID */ | 2082 | /* Ensure that the Error Address is VALID */ |
2190 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | 2083 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { |
@@ -2194,22 +2087,23 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, | |||
2194 | return; | 2087 | return; |
2195 | } | 2088 | } |
2196 | 2089 | ||
2197 | SystemAddress = extract_error_address(mci, info); | 2090 | sys_addr = pvt->ops->get_error_address(mci, info); |
2198 | 2091 | ||
2199 | amd64_mc_printk(mci, KERN_ERR, | 2092 | amd64_mc_printk(mci, KERN_ERR, |
2200 | "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress); | 2093 | "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
2201 | 2094 | ||
2202 | pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress); | 2095 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); |
2203 | } | 2096 | } |
2204 | 2097 | ||
2205 | /* Handle any Un-correctable Errors (UEs) */ | 2098 | /* Handle any Un-correctable Errors (UEs) */ |
2206 | static void amd64_handle_ue(struct mem_ctl_info *mci, | 2099 | static void amd64_handle_ue(struct mem_ctl_info *mci, |
2207 | struct err_regs *info) | 2100 | struct err_regs *info) |
2208 | { | 2101 | { |
2102 | struct amd64_pvt *pvt = mci->pvt_info; | ||
2103 | struct mem_ctl_info *log_mci, *src_mci = NULL; | ||
2209 | int csrow; | 2104 | int csrow; |
2210 | u64 SystemAddress; | 2105 | u64 sys_addr; |
2211 | u32 page, offset; | 2106 | u32 page, offset; |
2212 | struct mem_ctl_info *log_mci, *src_mci = NULL; | ||
2213 | 2107 | ||
2214 | log_mci = mci; | 2108 | log_mci = mci; |
2215 | 2109 | ||
@@ -2220,31 +2114,31 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, | |||
2220 | return; | 2114 | return; |
2221 | } | 2115 | } |
2222 | 2116 | ||
2223 | SystemAddress = extract_error_address(mci, info); | 2117 | sys_addr = pvt->ops->get_error_address(mci, info); |
2224 | 2118 | ||
2225 | /* | 2119 | /* |
2226 | * Find out which node the error address belongs to. This may be | 2120 | * Find out which node the error address belongs to. This may be |
2227 | * different from the node that detected the error. | 2121 | * different from the node that detected the error. |
2228 | */ | 2122 | */ |
2229 | src_mci = find_mc_by_sys_addr(mci, SystemAddress); | 2123 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
2230 | if (!src_mci) { | 2124 | if (!src_mci) { |
2231 | amd64_mc_printk(mci, KERN_CRIT, | 2125 | amd64_mc_printk(mci, KERN_CRIT, |
2232 | "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", | 2126 | "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", |
2233 | (unsigned long)SystemAddress); | 2127 | (unsigned long)sys_addr); |
2234 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 2128 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2235 | return; | 2129 | return; |
2236 | } | 2130 | } |
2237 | 2131 | ||
2238 | log_mci = src_mci; | 2132 | log_mci = src_mci; |
2239 | 2133 | ||
2240 | csrow = sys_addr_to_csrow(log_mci, SystemAddress); | 2134 | csrow = sys_addr_to_csrow(log_mci, sys_addr); |
2241 | if (csrow < 0) { | 2135 | if (csrow < 0) { |
2242 | amd64_mc_printk(mci, KERN_CRIT, | 2136 | amd64_mc_printk(mci, KERN_CRIT, |
2243 | "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", | 2137 | "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", |
2244 | (unsigned long)SystemAddress); | 2138 | (unsigned long)sys_addr); |
2245 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | 2139 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2246 | } else { | 2140 | } else { |
2247 | error_address_to_page_and_offset(SystemAddress, &page, &offset); | 2141 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
2248 | edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); | 2142 | edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); |
2249 | } | 2143 | } |
2250 | } | 2144 | } |
@@ -2384,30 +2278,26 @@ static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) | |||
2384 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) | 2278 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) |
2385 | { | 2279 | { |
2386 | u64 msr_val; | 2280 | u64 msr_val; |
2387 | int dram, err = 0; | 2281 | int dram; |
2388 | 2282 | ||
2389 | /* | 2283 | /* |
2390 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | 2284 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since |
2391 | * those are Read-As-Zero | 2285 | * those are Read-As-Zero |
2392 | */ | 2286 | */ |
2393 | rdmsrl(MSR_K8_TOP_MEM1, msr_val); | 2287 | rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); |
2394 | pvt->top_mem = msr_val >> 23; | 2288 | debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); |
2395 | debugf0(" TOP_MEM=0x%08llx\n", pvt->top_mem); | ||
2396 | 2289 | ||
2397 | /* check first whether TOP_MEM2 is enabled */ | 2290 | /* check first whether TOP_MEM2 is enabled */ |
2398 | rdmsrl(MSR_K8_SYSCFG, msr_val); | 2291 | rdmsrl(MSR_K8_SYSCFG, msr_val); |
2399 | if (msr_val & (1U << 21)) { | 2292 | if (msr_val & (1U << 21)) { |
2400 | rdmsrl(MSR_K8_TOP_MEM2, msr_val); | 2293 | rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); |
2401 | pvt->top_mem2 = msr_val >> 23; | 2294 | debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); |
2402 | debugf0(" TOP_MEM2=0x%08llx\n", pvt->top_mem2); | ||
2403 | } else | 2295 | } else |
2404 | debugf0(" TOP_MEM2 disabled.\n"); | 2296 | debugf0(" TOP_MEM2 disabled.\n"); |
2405 | 2297 | ||
2406 | amd64_cpu_display_info(pvt); | 2298 | amd64_cpu_display_info(pvt); |
2407 | 2299 | ||
2408 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); | 2300 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); |
2409 | if (err) | ||
2410 | goto err_reg; | ||
2411 | 2301 | ||
2412 | if (pvt->ops->read_dram_ctl_register) | 2302 | if (pvt->ops->read_dram_ctl_register) |
2413 | pvt->ops->read_dram_ctl_register(pvt); | 2303 | pvt->ops->read_dram_ctl_register(pvt); |
@@ -2425,13 +2315,12 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
2425 | * debug output block away. | 2315 | * debug output block away. |
2426 | */ | 2316 | */ |
2427 | if (pvt->dram_rw_en[dram] != 0) { | 2317 | if (pvt->dram_rw_en[dram] != 0) { |
2428 | debugf1(" DRAM_BASE[%d]: 0x%8.08x-%8.08x " | 2318 | debugf1(" DRAM-BASE[%d]: 0x%016llx " |
2429 | "DRAM_LIMIT: 0x%8.08x-%8.08x\n", | 2319 | "DRAM-LIMIT: 0x%016llx\n", |
2430 | dram, | 2320 | dram, |
2431 | (u32)(pvt->dram_base[dram] >> 32), | 2321 | pvt->dram_base[dram], |
2432 | (u32)(pvt->dram_base[dram] & 0xFFFFFFFF), | 2322 | pvt->dram_limit[dram]); |
2433 | (u32)(pvt->dram_limit[dram] >> 32), | 2323 | |
2434 | (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF)); | ||
2435 | debugf1(" IntlvEn=%s %s %s " | 2324 | debugf1(" IntlvEn=%s %s %s " |
2436 | "IntlvSel=%d DstNode=%d\n", | 2325 | "IntlvSel=%d DstNode=%d\n", |
2437 | pvt->dram_IntlvEn[dram] ? | 2326 | pvt->dram_IntlvEn[dram] ? |
@@ -2445,44 +2334,20 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
2445 | 2334 | ||
2446 | amd64_read_dct_base_mask(pvt); | 2335 | amd64_read_dct_base_mask(pvt); |
2447 | 2336 | ||
2448 | err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); | 2337 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); |
2449 | if (err) | ||
2450 | goto err_reg; | ||
2451 | |||
2452 | amd64_read_dbam_reg(pvt); | 2338 | amd64_read_dbam_reg(pvt); |
2453 | 2339 | ||
2454 | err = pci_read_config_dword(pvt->misc_f3_ctl, | 2340 | amd64_read_pci_cfg(pvt->misc_f3_ctl, |
2455 | F10_ONLINE_SPARE, &pvt->online_spare); | 2341 | F10_ONLINE_SPARE, &pvt->online_spare); |
2456 | if (err) | ||
2457 | goto err_reg; | ||
2458 | |||
2459 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); | ||
2460 | if (err) | ||
2461 | goto err_reg; | ||
2462 | 2342 | ||
2463 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); | 2343 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); |
2464 | if (err) | 2344 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); |
2465 | goto err_reg; | ||
2466 | 2345 | ||
2467 | if (!dct_ganging_enabled(pvt)) { | 2346 | if (!dct_ganging_enabled(pvt)) { |
2468 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, | 2347 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); |
2469 | &pvt->dclr1); | 2348 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); |
2470 | if (err) | ||
2471 | goto err_reg; | ||
2472 | |||
2473 | err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1, | ||
2474 | &pvt->dchr1); | ||
2475 | if (err) | ||
2476 | goto err_reg; | ||
2477 | } | 2349 | } |
2478 | |||
2479 | amd64_dump_misc_regs(pvt); | 2350 | amd64_dump_misc_regs(pvt); |
2480 | |||
2481 | return; | ||
2482 | |||
2483 | err_reg: | ||
2484 | debugf0("Reading an MC register failed\n"); | ||
2485 | |||
2486 | } | 2351 | } |
2487 | 2352 | ||
2488 | /* | 2353 | /* |
@@ -2521,7 +2386,7 @@ err_reg: | |||
2521 | */ | 2386 | */ |
2522 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | 2387 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) |
2523 | { | 2388 | { |
2524 | u32 dram_map, nr_pages; | 2389 | u32 cs_mode, nr_pages; |
2525 | 2390 | ||
2526 | /* | 2391 | /* |
2527 | * The math on this doesn't look right on the surface because x/2*4 can | 2392 | * The math on this doesn't look right on the surface because x/2*4 can |
@@ -2530,9 +2395,9 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |||
2530 | * number of bits to shift the DBAM register to extract the proper CSROW | 2395 | * number of bits to shift the DBAM register to extract the proper CSROW |
2531 | * field. | 2396 | * field. |
2532 | */ | 2397 | */ |
2533 | dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; | 2398 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
2534 | 2399 | ||
2535 | nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map); | 2400 | nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); |
2536 | 2401 | ||
2537 | /* | 2402 | /* |
2538 | * If dual channel then double the memory size of single channel. | 2403 | * If dual channel then double the memory size of single channel. |
@@ -2540,7 +2405,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |||
2540 | */ | 2405 | */ |
2541 | nr_pages <<= (pvt->channel_count - 1); | 2406 | nr_pages <<= (pvt->channel_count - 1); |
2542 | 2407 | ||
2543 | debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map); | 2408 | debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); |
2544 | debugf0(" nr_pages= %u channel-count = %d\n", | 2409 | debugf0(" nr_pages= %u channel-count = %d\n", |
2545 | nr_pages, pvt->channel_count); | 2410 | nr_pages, pvt->channel_count); |
2546 | 2411 | ||
@@ -2556,13 +2421,11 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2556 | struct csrow_info *csrow; | 2421 | struct csrow_info *csrow; |
2557 | struct amd64_pvt *pvt; | 2422 | struct amd64_pvt *pvt; |
2558 | u64 input_addr_min, input_addr_max, sys_addr; | 2423 | u64 input_addr_min, input_addr_max, sys_addr; |
2559 | int i, err = 0, empty = 1; | 2424 | int i, empty = 1; |
2560 | 2425 | ||
2561 | pvt = mci->pvt_info; | 2426 | pvt = mci->pvt_info; |
2562 | 2427 | ||
2563 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); | 2428 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); |
2564 | if (err) | ||
2565 | debugf0("Reading K8_NBCFG failed\n"); | ||
2566 | 2429 | ||
2567 | debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, | 2430 | debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, |
2568 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2431 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", |
@@ -2618,6 +2481,109 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2618 | return empty; | 2481 | return empty; |
2619 | } | 2482 | } |
2620 | 2483 | ||
2484 | /* get all cores on this DCT */ | ||
2485 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | ||
2486 | { | ||
2487 | int cpu; | ||
2488 | |||
2489 | for_each_online_cpu(cpu) | ||
2490 | if (amd_get_nb_id(cpu) == nid) | ||
2491 | cpumask_set_cpu(cpu, mask); | ||
2492 | } | ||
2493 | |||
2494 | /* check MCG_CTL on all the cpus on this node */ | ||
2495 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | ||
2496 | { | ||
2497 | cpumask_var_t mask; | ||
2498 | struct msr *msrs; | ||
2499 | int cpu, nbe, idx = 0; | ||
2500 | bool ret = false; | ||
2501 | |||
2502 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
2503 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | ||
2504 | __func__); | ||
2505 | return false; | ||
2506 | } | ||
2507 | |||
2508 | get_cpus_on_this_dct_cpumask(mask, nid); | ||
2509 | |||
2510 | msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL); | ||
2511 | if (!msrs) { | ||
2512 | amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", | ||
2513 | __func__); | ||
2514 | free_cpumask_var(mask); | ||
2515 | return false; | ||
2516 | } | ||
2517 | |||
2518 | rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); | ||
2519 | |||
2520 | for_each_cpu(cpu, mask) { | ||
2521 | nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; | ||
2522 | |||
2523 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | ||
2524 | cpu, msrs[idx].q, | ||
2525 | (nbe ? "enabled" : "disabled")); | ||
2526 | |||
2527 | if (!nbe) | ||
2528 | goto out; | ||
2529 | |||
2530 | idx++; | ||
2531 | } | ||
2532 | ret = true; | ||
2533 | |||
2534 | out: | ||
2535 | kfree(msrs); | ||
2536 | free_cpumask_var(mask); | ||
2537 | return ret; | ||
2538 | } | ||
2539 | |||
2540 | static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | ||
2541 | { | ||
2542 | cpumask_var_t cmask; | ||
2543 | struct msr *msrs = NULL; | ||
2544 | int cpu, idx = 0; | ||
2545 | |||
2546 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | ||
2547 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | ||
2548 | __func__); | ||
2549 | return false; | ||
2550 | } | ||
2551 | |||
2552 | get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); | ||
2553 | |||
2554 | msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL); | ||
2555 | if (!msrs) { | ||
2556 | amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", | ||
2557 | __func__); | ||
2558 | return -ENOMEM; | ||
2559 | } | ||
2560 | |||
2561 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | ||
2562 | |||
2563 | for_each_cpu(cpu, cmask) { | ||
2564 | |||
2565 | if (on) { | ||
2566 | if (msrs[idx].l & K8_MSR_MCGCTL_NBE) | ||
2567 | pvt->flags.ecc_report = 1; | ||
2568 | |||
2569 | msrs[idx].l |= K8_MSR_MCGCTL_NBE; | ||
2570 | } else { | ||
2571 | /* | ||
2572 | * Turn off ECC reporting only when it was off before | ||
2573 | */ | ||
2574 | if (!pvt->flags.ecc_report) | ||
2575 | msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; | ||
2576 | } | ||
2577 | idx++; | ||
2578 | } | ||
2579 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | ||
2580 | |||
2581 | kfree(msrs); | ||
2582 | free_cpumask_var(cmask); | ||
2583 | |||
2584 | return 0; | ||
2585 | } | ||
2586 | |||
2621 | /* | 2587 | /* |
2622 | * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" | 2588 | * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" |
2623 | * enable it. | 2589 | * enable it. |
@@ -2625,24 +2591,16 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
2625 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | 2591 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) |
2626 | { | 2592 | { |
2627 | struct amd64_pvt *pvt = mci->pvt_info; | 2593 | struct amd64_pvt *pvt = mci->pvt_info; |
2628 | const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); | 2594 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
2629 | int cpu, idx = 0, err = 0; | ||
2630 | struct msr msrs[cpumask_weight(cpumask)]; | ||
2631 | u32 value; | ||
2632 | u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | ||
2633 | 2595 | ||
2634 | if (!ecc_enable_override) | 2596 | if (!ecc_enable_override) |
2635 | return; | 2597 | return; |
2636 | 2598 | ||
2637 | memset(msrs, 0, sizeof(msrs)); | ||
2638 | |||
2639 | amd64_printk(KERN_WARNING, | 2599 | amd64_printk(KERN_WARNING, |
2640 | "'ecc_enable_override' parameter is active, " | 2600 | "'ecc_enable_override' parameter is active, " |
2641 | "Enabling AMD ECC hardware now: CAUTION\n"); | 2601 | "Enabling AMD ECC hardware now: CAUTION\n"); |
2642 | 2602 | ||
2643 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); | 2603 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); |
2644 | if (err) | ||
2645 | debugf0("Reading K8_NBCTL failed\n"); | ||
2646 | 2604 | ||
2647 | /* turn on UECCn and CECCEn bits */ | 2605 | /* turn on UECCn and CECCEn bits */ |
2648 | pvt->old_nbctl = value & mask; | 2606 | pvt->old_nbctl = value & mask; |
@@ -2651,20 +2609,11 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | |||
2651 | value |= mask; | 2609 | value |= mask; |
2652 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | 2610 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); |
2653 | 2611 | ||
2654 | rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | 2612 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) |
2655 | 2613 | amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " | |
2656 | for_each_cpu(cpu, cpumask) { | 2614 | "MCGCTL!\n"); |
2657 | if (msrs[idx].l & K8_MSR_MCGCTL_NBE) | ||
2658 | set_bit(idx, &pvt->old_mcgctl); | ||
2659 | 2615 | ||
2660 | msrs[idx].l |= K8_MSR_MCGCTL_NBE; | 2616 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
2661 | idx++; | ||
2662 | } | ||
2663 | wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | ||
2664 | |||
2665 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); | ||
2666 | if (err) | ||
2667 | debugf0("Reading K8_NBCFG failed\n"); | ||
2668 | 2617 | ||
2669 | debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | 2618 | debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, |
2670 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | 2619 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", |
@@ -2679,9 +2628,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | |||
2679 | value |= K8_NBCFG_ECC_ENABLE; | 2628 | value |= K8_NBCFG_ECC_ENABLE; |
2680 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | 2629 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); |
2681 | 2630 | ||
2682 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2631 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
2683 | if (err) | ||
2684 | debugf0("Reading K8_NBCFG failed\n"); | ||
2685 | 2632 | ||
2686 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | 2633 | if (!(value & K8_NBCFG_ECC_ENABLE)) { |
2687 | amd64_printk(KERN_WARNING, | 2634 | amd64_printk(KERN_WARNING, |
@@ -2701,86 +2648,21 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | |||
2701 | 2648 | ||
2702 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | 2649 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) |
2703 | { | 2650 | { |
2704 | const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); | 2651 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
2705 | int cpu, idx = 0, err = 0; | ||
2706 | struct msr msrs[cpumask_weight(cpumask)]; | ||
2707 | u32 value; | ||
2708 | u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | ||
2709 | 2652 | ||
2710 | if (!pvt->nbctl_mcgctl_saved) | 2653 | if (!pvt->nbctl_mcgctl_saved) |
2711 | return; | 2654 | return; |
2712 | 2655 | ||
2713 | memset(msrs, 0, sizeof(msrs)); | 2656 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); |
2714 | |||
2715 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); | ||
2716 | if (err) | ||
2717 | debugf0("Reading K8_NBCTL failed\n"); | ||
2718 | value &= ~mask; | 2657 | value &= ~mask; |
2719 | value |= pvt->old_nbctl; | 2658 | value |= pvt->old_nbctl; |
2720 | 2659 | ||
2721 | /* restore the NB Enable MCGCTL bit */ | 2660 | /* restore the NB Enable MCGCTL bit */ |
2722 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | 2661 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); |
2723 | 2662 | ||
2724 | rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | 2663 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) |
2725 | 2664 | amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " | |
2726 | for_each_cpu(cpu, cpumask) { | 2665 | "MCGCTL!\n"); |
2727 | msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; | ||
2728 | msrs[idx].l |= | ||
2729 | test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE; | ||
2730 | idx++; | ||
2731 | } | ||
2732 | |||
2733 | wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | ||
2734 | } | ||
2735 | |||
2736 | /* get all cores on this DCT */ | ||
2737 | static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid) | ||
2738 | { | ||
2739 | int cpu; | ||
2740 | |||
2741 | for_each_online_cpu(cpu) | ||
2742 | if (amd_get_nb_id(cpu) == nid) | ||
2743 | cpumask_set_cpu(cpu, mask); | ||
2744 | } | ||
2745 | |||
2746 | /* check MCG_CTL on all the cpus on this node */ | ||
2747 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | ||
2748 | { | ||
2749 | cpumask_t mask; | ||
2750 | struct msr *msrs; | ||
2751 | int cpu, nbe, idx = 0; | ||
2752 | bool ret = false; | ||
2753 | |||
2754 | cpumask_clear(&mask); | ||
2755 | |||
2756 | get_cpus_on_this_dct_cpumask(&mask, nid); | ||
2757 | |||
2758 | msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL); | ||
2759 | if (!msrs) { | ||
2760 | amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", | ||
2761 | __func__); | ||
2762 | return false; | ||
2763 | } | ||
2764 | |||
2765 | rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs); | ||
2766 | |||
2767 | for_each_cpu(cpu, &mask) { | ||
2768 | nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; | ||
2769 | |||
2770 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | ||
2771 | cpu, msrs[idx].q, | ||
2772 | (nbe ? "enabled" : "disabled")); | ||
2773 | |||
2774 | if (!nbe) | ||
2775 | goto out; | ||
2776 | |||
2777 | idx++; | ||
2778 | } | ||
2779 | ret = true; | ||
2780 | |||
2781 | out: | ||
2782 | kfree(msrs); | ||
2783 | return ret; | ||
2784 | } | 2666 | } |
2785 | 2667 | ||
2786 | /* | 2668 | /* |
@@ -2797,13 +2679,10 @@ static const char *ecc_warning = | |||
2797 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | 2679 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) |
2798 | { | 2680 | { |
2799 | u32 value; | 2681 | u32 value; |
2800 | int err = 0; | ||
2801 | u8 ecc_enabled = 0; | 2682 | u8 ecc_enabled = 0; |
2802 | bool nb_mce_en = false; | 2683 | bool nb_mce_en = false; |
2803 | 2684 | ||
2804 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); | 2685 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
2805 | if (err) | ||
2806 | debugf0("Reading K8_NBCTL failed\n"); | ||
2807 | 2686 | ||
2808 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | 2687 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); |
2809 | if (!ecc_enabled) | 2688 | if (!ecc_enabled) |
@@ -2909,7 +2788,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, | |||
2909 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | 2788 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
2910 | pvt->mc_type_index = mc_type_index; | 2789 | pvt->mc_type_index = mc_type_index; |
2911 | pvt->ops = family_ops(mc_type_index); | 2790 | pvt->ops = family_ops(mc_type_index); |
2912 | pvt->old_mcgctl = 0; | ||
2913 | 2791 | ||
2914 | /* | 2792 | /* |
2915 | * We have the dram_f2_ctl device as an argument, now go reserve its | 2793 | * We have the dram_f2_ctl device as an argument, now go reserve its |
@@ -2959,17 +2837,10 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |||
2959 | { | 2837 | { |
2960 | int node_id = pvt->mc_node_id; | 2838 | int node_id = pvt->mc_node_id; |
2961 | struct mem_ctl_info *mci; | 2839 | struct mem_ctl_info *mci; |
2962 | int ret, err = 0; | 2840 | int ret = -ENODEV; |
2963 | 2841 | ||
2964 | amd64_read_mc_registers(pvt); | 2842 | amd64_read_mc_registers(pvt); |
2965 | 2843 | ||
2966 | ret = -ENODEV; | ||
2967 | if (pvt->ops->probe_valid_hardware) { | ||
2968 | err = pvt->ops->probe_valid_hardware(pvt); | ||
2969 | if (err) | ||
2970 | goto err_exit; | ||
2971 | } | ||
2972 | |||
2973 | /* | 2844 | /* |
2974 | * We need to determine how many memory channels there are. Then use | 2845 | * We need to determine how many memory channels there are. Then use |
2975 | * that information for calculating the size of the dynamic instance | 2846 | * that information for calculating the size of the dynamic instance |
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index c6f359a85207..41bc561e5981 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -129,24 +129,22 @@ | |||
129 | * sections 3.5.4 and 3.5.5 for more information. | 129 | * sections 3.5.4 and 3.5.5 for more information. |
130 | */ | 130 | */ |
131 | 131 | ||
132 | #define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ | 132 | #define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__ |
133 | #define EDAC_MOD_STR "amd64_edac" | 133 | #define EDAC_MOD_STR "amd64_edac" |
134 | 134 | ||
135 | #define EDAC_MAX_NUMNODES 8 | 135 | #define EDAC_MAX_NUMNODES 8 |
136 | 136 | ||
137 | /* Extended Model from CPUID, for CPU Revision numbers */ | 137 | /* Extended Model from CPUID, for CPU Revision numbers */ |
138 | #define OPTERON_CPU_LE_REV_C 0 | 138 | #define K8_REV_D 1 |
139 | #define OPTERON_CPU_REV_D 1 | 139 | #define K8_REV_E 2 |
140 | #define OPTERON_CPU_REV_E 2 | 140 | #define K8_REV_F 4 |
141 | |||
142 | /* NPT processors have the following Extended Models */ | ||
143 | #define OPTERON_CPU_REV_F 4 | ||
144 | #define OPTERON_CPU_REV_FA 5 | ||
145 | 141 | ||
146 | /* Hardware limit on ChipSelect rows per MC and processors per system */ | 142 | /* Hardware limit on ChipSelect rows per MC and processors per system */ |
147 | #define MAX_CS_COUNT 8 | 143 | #define MAX_CS_COUNT 8 |
148 | #define DRAM_REG_COUNT 8 | 144 | #define DRAM_REG_COUNT 8 |
149 | 145 | ||
146 | #define ON true | ||
147 | #define OFF false | ||
150 | 148 | ||
151 | /* | 149 | /* |
152 | * PCI-defined configuration space registers | 150 | * PCI-defined configuration space registers |
@@ -241,7 +239,7 @@ | |||
241 | #define F10_DCHR_1 0x194 | 239 | #define F10_DCHR_1 0x194 |
242 | 240 | ||
243 | #define F10_DCHR_FOUR_RANK_DIMM BIT(18) | 241 | #define F10_DCHR_FOUR_RANK_DIMM BIT(18) |
244 | #define F10_DCHR_Ddr3Mode BIT(8) | 242 | #define DDR3_MODE BIT(8) |
245 | #define F10_DCHR_MblMode BIT(6) | 243 | #define F10_DCHR_MblMode BIT(6) |
246 | 244 | ||
247 | 245 | ||
@@ -382,14 +380,9 @@ enum { | |||
382 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) | 380 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) |
383 | #define K8_NBCAP_CHIPKILL BIT(4) | 381 | #define K8_NBCAP_CHIPKILL BIT(4) |
384 | #define K8_NBCAP_SECDED BIT(3) | 382 | #define K8_NBCAP_SECDED BIT(3) |
385 | #define K8_NBCAP_8_NODE BIT(2) | ||
386 | #define K8_NBCAP_DUAL_NODE BIT(1) | ||
387 | #define K8_NBCAP_DCT_DUAL BIT(0) | 383 | #define K8_NBCAP_DCT_DUAL BIT(0) |
388 | 384 | ||
389 | /* | 385 | /* MSRs */ |
390 | * MSR Regs | ||
391 | */ | ||
392 | #define K8_MSR_MCGCTL 0x017b | ||
393 | #define K8_MSR_MCGCTL_NBE BIT(4) | 386 | #define K8_MSR_MCGCTL_NBE BIT(4) |
394 | 387 | ||
395 | #define K8_MSR_MC4CTL 0x0410 | 388 | #define K8_MSR_MC4CTL 0x0410 |
@@ -487,7 +480,6 @@ struct amd64_pvt { | |||
487 | /* Save old hw registers' values before we modified them */ | 480 | /* Save old hw registers' values before we modified them */ |
488 | u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ | 481 | u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ |
489 | u32 old_nbctl; | 482 | u32 old_nbctl; |
490 | unsigned long old_mcgctl; /* per core on this node */ | ||
491 | 483 | ||
492 | /* MC Type Index value: socket F vs Family 10h */ | 484 | /* MC Type Index value: socket F vs Family 10h */ |
493 | u32 mc_type_index; | 485 | u32 mc_type_index; |
@@ -495,6 +487,7 @@ struct amd64_pvt { | |||
495 | /* misc settings */ | 487 | /* misc settings */ |
496 | struct flags { | 488 | struct flags { |
497 | unsigned long cf8_extcfg:1; | 489 | unsigned long cf8_extcfg:1; |
490 | unsigned long ecc_report:1; | ||
498 | } flags; | 491 | } flags; |
499 | }; | 492 | }; |
500 | 493 | ||
@@ -504,7 +497,6 @@ struct scrubrate { | |||
504 | }; | 497 | }; |
505 | 498 | ||
506 | extern struct scrubrate scrubrates[23]; | 499 | extern struct scrubrate scrubrates[23]; |
507 | extern u32 revf_quad_ddr2_shift[16]; | ||
508 | extern const char *tt_msgs[4]; | 500 | extern const char *tt_msgs[4]; |
509 | extern const char *ll_msgs[4]; | 501 | extern const char *ll_msgs[4]; |
510 | extern const char *rrrr_msgs[16]; | 502 | extern const char *rrrr_msgs[16]; |
@@ -534,17 +526,15 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], | |||
534 | * functions and per device encoding/decoding logic. | 526 | * functions and per device encoding/decoding logic. |
535 | */ | 527 | */ |
536 | struct low_ops { | 528 | struct low_ops { |
537 | int (*probe_valid_hardware)(struct amd64_pvt *pvt); | 529 | int (*early_channel_count) (struct amd64_pvt *pvt); |
538 | int (*early_channel_count)(struct amd64_pvt *pvt); | 530 | |
539 | 531 | u64 (*get_error_address) (struct mem_ctl_info *mci, | |
540 | u64 (*get_error_address)(struct mem_ctl_info *mci, | 532 | struct err_regs *info); |
541 | struct err_regs *info); | 533 | void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram); |
542 | void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram); | 534 | void (*read_dram_ctl_register) (struct amd64_pvt *pvt); |
543 | void (*read_dram_ctl_register)(struct amd64_pvt *pvt); | 535 | void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, |
544 | void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci, | 536 | struct err_regs *info, u64 SystemAddr); |
545 | struct err_regs *info, | 537 | int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); |
546 | u64 SystemAddr); | ||
547 | int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map); | ||
548 | }; | 538 | }; |
549 | 539 | ||
550 | struct amd64_family_type { | 540 | struct amd64_family_type { |
@@ -566,6 +556,22 @@ static inline struct low_ops *family_ops(int index) | |||
566 | return &amd64_family_types[index].ops; | 556 | return &amd64_family_types[index].ops; |
567 | } | 557 | } |
568 | 558 | ||
559 | static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, | ||
560 | u32 *val, const char *func) | ||
561 | { | ||
562 | int err = 0; | ||
563 | |||
564 | err = pci_read_config_dword(pdev, offset, val); | ||
565 | if (err) | ||
566 | amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n", | ||
567 | func, PCI_FUNC(pdev->devfn), offset); | ||
568 | |||
569 | return err; | ||
570 | } | ||
571 | |||
572 | #define amd64_read_pci_cfg(pdev, offset, val) \ | ||
573 | amd64_read_pci_cfg_dword(pdev, offset, val, __func__) | ||
574 | |||
569 | /* | 575 | /* |
570 | * For future CPU versions, verify the following as new 'slow' rates appear and | 576 | * For future CPU versions, verify the following as new 'slow' rates appear and |
571 | * modify the necessary skip values for the supported CPU. | 577 | * modify the necessary skip values for the supported CPU. |
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 12f355cafdbe..001b2e797fb3 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -74,6 +74,7 @@ | |||
74 | 74 | ||
75 | #ifdef CONFIG_EDAC_DEBUG | 75 | #ifdef CONFIG_EDAC_DEBUG |
76 | extern int edac_debug_level; | 76 | extern int edac_debug_level; |
77 | extern const char *edac_mem_types[]; | ||
77 | 78 | ||
78 | #ifndef CONFIG_EDAC_DEBUG_VERBOSE | 79 | #ifndef CONFIG_EDAC_DEBUG_VERBOSE |
79 | #define edac_debug_printk(level, fmt, arg...) \ | 80 | #define edac_debug_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index b629c41756f0..3630308e7b81 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -76,6 +76,30 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci) | |||
76 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); | 76 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); |
77 | } | 77 | } |
78 | 78 | ||
79 | /* | ||
80 | * keep those in sync with the enum mem_type | ||
81 | */ | ||
82 | const char *edac_mem_types[] = { | ||
83 | "Empty csrow", | ||
84 | "Reserved csrow type", | ||
85 | "Unknown csrow type", | ||
86 | "Fast page mode RAM", | ||
87 | "Extended data out RAM", | ||
88 | "Burst Extended data out RAM", | ||
89 | "Single data rate SDRAM", | ||
90 | "Registered single data rate SDRAM", | ||
91 | "Double data rate SDRAM", | ||
92 | "Registered Double data rate SDRAM", | ||
93 | "Rambus DRAM", | ||
94 | "Unbuffered DDR2 RAM", | ||
95 | "Fully buffered DDR2", | ||
96 | "Registered DDR2 RAM", | ||
97 | "Rambus XDR", | ||
98 | "Unbuffered DDR3 RAM", | ||
99 | "Registered DDR3 RAM", | ||
100 | }; | ||
101 | EXPORT_SYMBOL_GPL(edac_mem_types); | ||
102 | |||
79 | #endif /* CONFIG_EDAC_DEBUG */ | 103 | #endif /* CONFIG_EDAC_DEBUG */ |
80 | 104 | ||
81 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. | 105 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. |
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index 689cc6a6214d..c693fcc2213c 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c | |||
@@ -306,7 +306,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) | |||
306 | * value encoding has changed so interpret those differently | 306 | * value encoding has changed so interpret those differently |
307 | */ | 307 | */ |
308 | if ((boot_cpu_data.x86 == 0x10) && | 308 | if ((boot_cpu_data.x86 == 0x10) && |
309 | (boot_cpu_data.x86_model > 8)) { | 309 | (boot_cpu_data.x86_model > 7)) { |
310 | if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) | 310 | if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) |
311 | pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); | 311 | pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); |
312 | } else { | 312 | } else { |