diff options
-rw-r--r-- | drivers/edac/amd76x_edac.c | 74 | ||||
-rw-r--r-- | drivers/edac/e752x_edac.c | 322 | ||||
-rw-r--r-- | drivers/edac/e7xxx_edac.c | 158 | ||||
-rw-r--r-- | drivers/edac/i82860_edac.c | 102 | ||||
-rw-r--r-- | drivers/edac/i82875p_edac.c | 188 | ||||
-rw-r--r-- | drivers/edac/r82600_edac.c | 118 |
6 files changed, 518 insertions, 444 deletions
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index 303cb500b377..702141c5501b 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c | |||
@@ -182,6 +182,38 @@ static void amd76x_check(struct mem_ctl_info *mci) | |||
182 | amd76x_process_error_info(mci, &info, 1); | 182 | amd76x_process_error_info(mci, &info, 1); |
183 | } | 183 | } |
184 | 184 | ||
185 | static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | ||
186 | enum edac_type edac_mode) | ||
187 | { | ||
188 | struct csrow_info *csrow; | ||
189 | u32 mba, mba_base, mba_mask, dms; | ||
190 | int index; | ||
191 | |||
192 | for (index = 0; index < mci->nr_csrows; index++) { | ||
193 | csrow = &mci->csrows[index]; | ||
194 | |||
195 | /* find the DRAM Chip Select Base address and mask */ | ||
196 | pci_read_config_dword(pdev, | ||
197 | AMD76X_MEM_BASE_ADDR + (index * 4), | ||
198 | &mba); | ||
199 | |||
200 | if (!(mba & BIT(0))) | ||
201 | continue; | ||
202 | |||
203 | mba_base = mba & 0xff800000UL; | ||
204 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | ||
205 | pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); | ||
206 | csrow->first_page = mba_base >> PAGE_SHIFT; | ||
207 | csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | ||
208 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | ||
209 | csrow->page_mask = mba_mask >> PAGE_SHIFT; | ||
210 | csrow->grain = csrow->nr_pages << PAGE_SHIFT; | ||
211 | csrow->mtype = MEM_RDDR; | ||
212 | csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; | ||
213 | csrow->edac_mode = edac_mode; | ||
214 | } | ||
215 | } | ||
216 | |||
185 | /** | 217 | /** |
186 | * amd76x_probe1 - Perform set up for detected device | 218 | * amd76x_probe1 - Perform set up for detected device |
187 | * @pdev; PCI device detected | 219 | * @pdev; PCI device detected |
@@ -193,15 +225,13 @@ static void amd76x_check(struct mem_ctl_info *mci) | |||
193 | */ | 225 | */ |
194 | static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | 226 | static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) |
195 | { | 227 | { |
196 | int rc = -ENODEV; | 228 | static const enum edac_type ems_modes[] = { |
197 | int index; | ||
198 | struct mem_ctl_info *mci = NULL; | ||
199 | enum edac_type ems_modes[] = { | ||
200 | EDAC_NONE, | 229 | EDAC_NONE, |
201 | EDAC_EC, | 230 | EDAC_EC, |
202 | EDAC_SECDED, | 231 | EDAC_SECDED, |
203 | EDAC_SECDED | 232 | EDAC_SECDED |
204 | }; | 233 | }; |
234 | struct mem_ctl_info *mci = NULL; | ||
205 | u32 ems; | 235 | u32 ems; |
206 | u32 ems_mode; | 236 | u32 ems_mode; |
207 | struct amd76x_error_info discard; | 237 | struct amd76x_error_info discard; |
@@ -212,8 +242,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
212 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); | 242 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); |
213 | 243 | ||
214 | if (mci == NULL) { | 244 | if (mci == NULL) { |
215 | rc = -ENOMEM; | 245 | return -ENOMEM; |
216 | goto fail; | ||
217 | } | 246 | } |
218 | 247 | ||
219 | debugf0("%s(): mci = %p\n", __func__, mci); | 248 | debugf0("%s(): mci = %p\n", __func__, mci); |
@@ -228,33 +257,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
228 | mci->edac_check = amd76x_check; | 257 | mci->edac_check = amd76x_check; |
229 | mci->ctl_page_to_phys = NULL; | 258 | mci->ctl_page_to_phys = NULL; |
230 | 259 | ||
231 | for (index = 0; index < mci->nr_csrows; index++) { | 260 | amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]); |
232 | struct csrow_info *csrow = &mci->csrows[index]; | ||
233 | u32 mba; | ||
234 | u32 mba_base; | ||
235 | u32 mba_mask; | ||
236 | u32 dms; | ||
237 | |||
238 | /* find the DRAM Chip Select Base address and mask */ | ||
239 | pci_read_config_dword(pdev, | ||
240 | AMD76X_MEM_BASE_ADDR + (index * 4), &mba); | ||
241 | |||
242 | if (!(mba & BIT(0))) | ||
243 | continue; | ||
244 | |||
245 | mba_base = mba & 0xff800000UL; | ||
246 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | ||
247 | pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); | ||
248 | csrow->first_page = mba_base >> PAGE_SHIFT; | ||
249 | csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | ||
250 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | ||
251 | csrow->page_mask = mba_mask >> PAGE_SHIFT; | ||
252 | csrow->grain = csrow->nr_pages << PAGE_SHIFT; | ||
253 | csrow->mtype = MEM_RDDR; | ||
254 | csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; | ||
255 | csrow->edac_mode = ems_modes[ems_mode]; | ||
256 | } | ||
257 | |||
258 | amd76x_get_error_info(mci, &discard); /* clear counters */ | 261 | amd76x_get_error_info(mci, &discard); /* clear counters */ |
259 | 262 | ||
260 | /* Here we assume that we will never see multiple instances of this | 263 | /* Here we assume that we will never see multiple instances of this |
@@ -270,9 +273,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
270 | return 0; | 273 | return 0; |
271 | 274 | ||
272 | fail: | 275 | fail: |
273 | if (mci != NULL) | 276 | edac_mc_free(mci); |
274 | edac_mc_free(mci); | 277 | return -ENODEV; |
275 | return rc; | ||
276 | } | 278 | } |
277 | 279 | ||
278 | /* returns count (>= 0), or negative on error */ | 280 | /* returns count (>= 0), or negative on error */ |
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 5e773e382e8a..5351a76739e5 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -765,22 +765,174 @@ static void e752x_check(struct mem_ctl_info *mci) | |||
765 | e752x_process_error_info(mci, &info, 1); | 765 | e752x_process_error_info(mci, &info, 1); |
766 | } | 766 | } |
767 | 767 | ||
768 | static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | 768 | /* Return 1 if dual channel mode is active. Else return 0. */ |
769 | static inline int dual_channel_active(u16 ddrcsr) | ||
770 | { | ||
771 | return (((ddrcsr >> 12) & 3) == 3); | ||
772 | } | ||
773 | |||
774 | static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | ||
775 | u16 ddrcsr) | ||
776 | { | ||
777 | struct csrow_info *csrow; | ||
778 | unsigned long last_cumul_size; | ||
779 | int index, mem_dev, drc_chan; | ||
780 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ | ||
781 | int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ | ||
782 | u8 value; | ||
783 | u32 dra, drc, cumul_size; | ||
784 | |||
785 | pci_read_config_dword(pdev, E752X_DRA, &dra); | ||
786 | pci_read_config_dword(pdev, E752X_DRC, &drc); | ||
787 | drc_chan = dual_channel_active(ddrcsr); | ||
788 | drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ | ||
789 | drc_ddim = (drc >> 20) & 0x3; | ||
790 | |||
791 | /* The dram row boundary (DRB) reg values are boundary address for | ||
792 | * each DRAM row with a granularity of 64 or 128MB (single/dual | ||
793 | * channel operation). DRB regs are cumulative; therefore DRB7 will | ||
794 | * contain the total memory contained in all eight rows. | ||
795 | */ | ||
796 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | ||
797 | /* mem_dev 0=x8, 1=x4 */ | ||
798 | mem_dev = (dra >> (index * 4 + 2)) & 0x3; | ||
799 | csrow = &mci->csrows[index]; | ||
800 | |||
801 | mem_dev = (mem_dev == 2); | ||
802 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
803 | /* convert a 128 or 64 MiB DRB to a page size. */ | ||
804 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | ||
805 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
806 | cumul_size); | ||
807 | if (cumul_size == last_cumul_size) | ||
808 | continue; /* not populated */ | ||
809 | |||
810 | csrow->first_page = last_cumul_size; | ||
811 | csrow->last_page = cumul_size - 1; | ||
812 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
813 | last_cumul_size = cumul_size; | ||
814 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
815 | csrow->mtype = MEM_RDDR; /* only one type supported */ | ||
816 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
817 | |||
818 | /* | ||
819 | * if single channel or x8 devices then SECDED | ||
820 | * if dual channel and x4 then S4ECD4ED | ||
821 | */ | ||
822 | if (drc_ddim) { | ||
823 | if (drc_chan && mem_dev) { | ||
824 | csrow->edac_mode = EDAC_S4ECD4ED; | ||
825 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | ||
826 | } else { | ||
827 | csrow->edac_mode = EDAC_SECDED; | ||
828 | mci->edac_cap |= EDAC_FLAG_SECDED; | ||
829 | } | ||
830 | } else | ||
831 | csrow->edac_mode = EDAC_NONE; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | static void e752x_init_mem_map_table(struct pci_dev *pdev, | ||
836 | struct e752x_pvt *pvt) | ||
769 | { | 837 | { |
770 | int rc = -ENODEV; | ||
771 | int index; | 838 | int index; |
839 | u8 value, last, row, stat8; | ||
840 | |||
841 | last = 0; | ||
842 | row = 0; | ||
843 | |||
844 | for (index = 0; index < 8; index += 2) { | ||
845 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
846 | /* test if there is a dimm in this slot */ | ||
847 | if (value == last) { | ||
848 | /* no dimm in the slot, so flag it as empty */ | ||
849 | pvt->map[index] = 0xff; | ||
850 | pvt->map[index + 1] = 0xff; | ||
851 | } else { /* there is a dimm in the slot */ | ||
852 | pvt->map[index] = row; | ||
853 | row++; | ||
854 | last = value; | ||
855 | /* test the next value to see if the dimm is double | ||
856 | * sided | ||
857 | */ | ||
858 | pci_read_config_byte(pdev, E752X_DRB + index + 1, | ||
859 | &value); | ||
860 | pvt->map[index + 1] = (value == last) ? | ||
861 | 0xff : /* the dimm is single sided, | ||
862 | so flag as empty */ | ||
863 | row; /* this is a double sided dimm | ||
864 | to save the next row # */ | ||
865 | row++; | ||
866 | last = value; | ||
867 | } | ||
868 | } | ||
869 | |||
870 | /* set the map type. 1 = normal, 0 = reversed */ | ||
871 | pci_read_config_byte(pdev, E752X_DRM, &stat8); | ||
872 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); | ||
873 | } | ||
874 | |||
875 | /* Return 0 on success or 1 on failure. */ | ||
876 | static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, | ||
877 | struct e752x_pvt *pvt) | ||
878 | { | ||
879 | struct pci_dev *dev; | ||
880 | |||
881 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
882 | pvt->dev_info->err_dev, | ||
883 | pvt->bridge_ck); | ||
884 | |||
885 | if (pvt->bridge_ck == NULL) | ||
886 | pvt->bridge_ck = pci_scan_single_device(pdev->bus, | ||
887 | PCI_DEVFN(0, 1)); | ||
888 | |||
889 | if (pvt->bridge_ck == NULL) { | ||
890 | e752x_printk(KERN_ERR, "error reporting device not found:" | ||
891 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
892 | PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); | ||
893 | return 1; | ||
894 | } | ||
895 | |||
896 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, | ||
897 | NULL); | ||
898 | |||
899 | if (dev == NULL) | ||
900 | goto fail; | ||
901 | |||
902 | pvt->dev_d0f0 = dev; | ||
903 | pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); | ||
904 | |||
905 | return 0; | ||
906 | |||
907 | fail: | ||
908 | pci_dev_put(pvt->bridge_ck); | ||
909 | return 1; | ||
910 | } | ||
911 | |||
912 | static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) | ||
913 | { | ||
914 | struct pci_dev *dev; | ||
915 | |||
916 | dev = pvt->dev_d0f1; | ||
917 | /* Turn off error disable & SMI in case the BIOS turned it on */ | ||
918 | pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | ||
919 | pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | ||
920 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); | ||
921 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); | ||
922 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); | ||
923 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | ||
924 | pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); | ||
925 | pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); | ||
926 | } | ||
927 | |||
928 | static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | ||
929 | { | ||
772 | u16 pci_data; | 930 | u16 pci_data; |
773 | u8 stat8; | 931 | u8 stat8; |
774 | struct mem_ctl_info *mci = NULL; | 932 | struct mem_ctl_info *mci; |
775 | struct e752x_pvt *pvt = NULL; | 933 | struct e752x_pvt *pvt; |
776 | u16 ddrcsr; | 934 | u16 ddrcsr; |
777 | u32 drc; | ||
778 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ | 935 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ |
779 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ | ||
780 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
781 | u32 dra; | ||
782 | unsigned long last_cumul_size; | ||
783 | struct pci_dev *dev = NULL; | ||
784 | struct e752x_error_info discard; | 936 | struct e752x_error_info discard; |
785 | 937 | ||
786 | debugf0("%s(): mci\n", __func__); | 938 | debugf0("%s(): mci\n", __func__); |
@@ -794,25 +946,20 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
794 | if (!force_function_unhide && !(stat8 & (1 << 5))) { | 946 | if (!force_function_unhide && !(stat8 & (1 << 5))) { |
795 | printk(KERN_INFO "Contact your BIOS vendor to see if the " | 947 | printk(KERN_INFO "Contact your BIOS vendor to see if the " |
796 | "E752x error registers can be safely un-hidden\n"); | 948 | "E752x error registers can be safely un-hidden\n"); |
797 | goto fail; | 949 | return -ENOMEM; |
798 | } | 950 | } |
799 | stat8 |= (1 << 5); | 951 | stat8 |= (1 << 5); |
800 | pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); | 952 | pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); |
801 | 953 | ||
802 | /* need to find out the number of channels */ | ||
803 | pci_read_config_dword(pdev, E752X_DRC, &drc); | ||
804 | pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); | 954 | pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); |
805 | /* FIXME: should check >>12 or 0xf, true for all? */ | 955 | /* FIXME: should check >>12 or 0xf, true for all? */ |
806 | /* Dual channel = 1, Single channel = 0 */ | 956 | /* Dual channel = 1, Single channel = 0 */ |
807 | drc_chan = (((ddrcsr >> 12) & 3) == 3); | 957 | drc_chan = dual_channel_active(ddrcsr); |
808 | drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ | ||
809 | drc_ddim = (drc >> 20) & 0x3; | ||
810 | 958 | ||
811 | mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); | 959 | mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); |
812 | 960 | ||
813 | if (mci == NULL) { | 961 | if (mci == NULL) { |
814 | rc = -ENOMEM; | 962 | return -ENOMEM; |
815 | goto fail; | ||
816 | } | 963 | } |
817 | 964 | ||
818 | debugf3("%s(): init mci\n", __func__); | 965 | debugf3("%s(): init mci\n", __func__); |
@@ -827,113 +974,20 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
827 | debugf3("%s(): init pvt\n", __func__); | 974 | debugf3("%s(): init pvt\n", __func__); |
828 | pvt = (struct e752x_pvt *) mci->pvt_info; | 975 | pvt = (struct e752x_pvt *) mci->pvt_info; |
829 | pvt->dev_info = &e752x_devs[dev_idx]; | 976 | pvt->dev_info = &e752x_devs[dev_idx]; |
830 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | 977 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); |
831 | pvt->dev_info->err_dev, | ||
832 | pvt->bridge_ck); | ||
833 | |||
834 | if (pvt->bridge_ck == NULL) | ||
835 | pvt->bridge_ck = pci_scan_single_device(pdev->bus, | ||
836 | PCI_DEVFN(0, 1)); | ||
837 | 978 | ||
838 | if (pvt->bridge_ck == NULL) { | 979 | if (e752x_get_devs(pdev, dev_idx, pvt)) { |
839 | e752x_printk(KERN_ERR, "error reporting device not found:" | 980 | edac_mc_free(mci); |
840 | "vendor %x device 0x%x (broken BIOS?)\n", | 981 | return -ENODEV; |
841 | PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); | ||
842 | goto fail; | ||
843 | } | 982 | } |
844 | 983 | ||
845 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); | ||
846 | debugf3("%s(): more mci init\n", __func__); | 984 | debugf3("%s(): more mci init\n", __func__); |
847 | mci->ctl_name = pvt->dev_info->ctl_name; | 985 | mci->ctl_name = pvt->dev_info->ctl_name; |
848 | mci->edac_check = e752x_check; | 986 | mci->edac_check = e752x_check; |
849 | mci->ctl_page_to_phys = ctl_page_to_phys; | 987 | mci->ctl_page_to_phys = ctl_page_to_phys; |
850 | 988 | ||
851 | /* find out the device types */ | 989 | e752x_init_csrows(mci, pdev, ddrcsr); |
852 | pci_read_config_dword(pdev, E752X_DRA, &dra); | 990 | e752x_init_mem_map_table(pdev, pvt); |
853 | |||
854 | /* | ||
855 | * The dram row boundary (DRB) reg values are boundary address for | ||
856 | * each DRAM row with a granularity of 64 or 128MB (single/dual | ||
857 | * channel operation). DRB regs are cumulative; therefore DRB7 will | ||
858 | * contain the total memory contained in all eight rows. | ||
859 | */ | ||
860 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | ||
861 | u8 value; | ||
862 | u32 cumul_size; | ||
863 | |||
864 | /* mem_dev 0=x8, 1=x4 */ | ||
865 | int mem_dev = (dra >> (index * 4 + 2)) & 0x3; | ||
866 | struct csrow_info *csrow = &mci->csrows[index]; | ||
867 | |||
868 | mem_dev = (mem_dev == 2); | ||
869 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
870 | /* convert a 128 or 64 MiB DRB to a page size. */ | ||
871 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | ||
872 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
873 | cumul_size); | ||
874 | |||
875 | if (cumul_size == last_cumul_size) | ||
876 | continue; /* not populated */ | ||
877 | |||
878 | csrow->first_page = last_cumul_size; | ||
879 | csrow->last_page = cumul_size - 1; | ||
880 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
881 | last_cumul_size = cumul_size; | ||
882 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
883 | csrow->mtype = MEM_RDDR; /* only one type supported */ | ||
884 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
885 | |||
886 | /* | ||
887 | * if single channel or x8 devices then SECDED | ||
888 | * if dual channel and x4 then S4ECD4ED | ||
889 | */ | ||
890 | if (drc_ddim) { | ||
891 | if (drc_chan && mem_dev) { | ||
892 | csrow->edac_mode = EDAC_S4ECD4ED; | ||
893 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | ||
894 | } else { | ||
895 | csrow->edac_mode = EDAC_SECDED; | ||
896 | mci->edac_cap |= EDAC_FLAG_SECDED; | ||
897 | } | ||
898 | } else | ||
899 | csrow->edac_mode = EDAC_NONE; | ||
900 | } | ||
901 | |||
902 | /* Fill in the memory map table */ | ||
903 | { | ||
904 | u8 value; | ||
905 | u8 last = 0; | ||
906 | u8 row = 0; | ||
907 | |||
908 | for (index = 0; index < 8; index += 2) { | ||
909 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
910 | |||
911 | /* test if there is a dimm in this slot */ | ||
912 | if (value == last) { | ||
913 | /* no dimm in the slot, so flag it as empty */ | ||
914 | pvt->map[index] = 0xff; | ||
915 | pvt->map[index + 1] = 0xff; | ||
916 | } else { /* there is a dimm in the slot */ | ||
917 | pvt->map[index] = row; | ||
918 | row++; | ||
919 | last = value; | ||
920 | /* test the next value to see if the dimm is | ||
921 | double sided */ | ||
922 | pci_read_config_byte(pdev, | ||
923 | E752X_DRB + index + 1, | ||
924 | &value); | ||
925 | pvt->map[index + 1] = (value == last) ? | ||
926 | 0xff : /* the dimm is single sided, | ||
927 | * so flag as empty | ||
928 | */ | ||
929 | row; /* this is a double sided dimm | ||
930 | * to save the next row # | ||
931 | */ | ||
932 | row++; | ||
933 | last = value; | ||
934 | } | ||
935 | } | ||
936 | } | ||
937 | 991 | ||
938 | /* set the map type. 1 = normal, 0 = reversed */ | 992 | /* set the map type. 1 = normal, 0 = reversed */ |
939 | pci_read_config_byte(pdev, E752X_DRM, &stat8); | 993 | pci_read_config_byte(pdev, E752X_DRM, &stat8); |
@@ -961,21 +1015,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
961 | goto fail; | 1015 | goto fail; |
962 | } | 1016 | } |
963 | 1017 | ||
964 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, | 1018 | e752x_init_error_reporting_regs(pvt); |
965 | NULL); | ||
966 | pvt->dev_d0f0 = dev; | ||
967 | /* find the error reporting device and clear errors */ | ||
968 | dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); | ||
969 | /* Turn off error disable & SMI in case the BIOS turned it on */ | ||
970 | pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | ||
971 | pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | ||
972 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); | ||
973 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); | ||
974 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); | ||
975 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | ||
976 | pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); | ||
977 | pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); | ||
978 | |||
979 | e752x_get_error_info(mci, &discard); /* clear other MCH errors */ | 1019 | e752x_get_error_info(mci, &discard); /* clear other MCH errors */ |
980 | 1020 | ||
981 | /* get this far and it's successful */ | 1021 | /* get this far and it's successful */ |
@@ -983,20 +1023,12 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
983 | return 0; | 1023 | return 0; |
984 | 1024 | ||
985 | fail: | 1025 | fail: |
986 | if (mci) { | 1026 | pci_dev_put(pvt->dev_d0f0); |
987 | if (pvt->dev_d0f0) | 1027 | pci_dev_put(pvt->dev_d0f1); |
988 | pci_dev_put(pvt->dev_d0f0); | 1028 | pci_dev_put(pvt->bridge_ck); |
989 | 1029 | edac_mc_free(mci); | |
990 | if (pvt->dev_d0f1) | ||
991 | pci_dev_put(pvt->dev_d0f1); | ||
992 | |||
993 | if (pvt->bridge_ck) | ||
994 | pci_dev_put(pvt->bridge_ck); | ||
995 | |||
996 | edac_mc_free(mci); | ||
997 | } | ||
998 | 1030 | ||
999 | return rc; | 1031 | return -ENODEV; |
1000 | } | 1032 | } |
1001 | 1033 | ||
1002 | /* returns count (>= 0), or negative on error */ | 1034 | /* returns count (>= 0), or negative on error */ |
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 1e282c843e77..9878379b4993 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c | |||
@@ -335,99 +335,61 @@ static void e7xxx_check(struct mem_ctl_info *mci) | |||
335 | e7xxx_process_error_info(mci, &info, 1); | 335 | e7xxx_process_error_info(mci, &info, 1); |
336 | } | 336 | } |
337 | 337 | ||
338 | static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | 338 | /* Return 1 if dual channel mode is active. Else return 0. */ |
339 | static inline int dual_channel_active(u32 drc, int dev_idx) | ||
339 | { | 340 | { |
340 | int rc = -ENODEV; | 341 | return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1; |
341 | int index; | 342 | } |
342 | u16 pci_data; | ||
343 | struct mem_ctl_info *mci = NULL; | ||
344 | struct e7xxx_pvt *pvt = NULL; | ||
345 | u32 drc; | ||
346 | int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */ | ||
347 | int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */ | ||
348 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
349 | u32 dra; | ||
350 | unsigned long last_cumul_size; | ||
351 | struct e7xxx_error_info discard; | ||
352 | |||
353 | debugf0("%s(): mci\n", __func__); | ||
354 | 343 | ||
355 | /* need to find out the number of channels */ | ||
356 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | ||
357 | 344 | ||
345 | /* Return DRB granularity (0=32mb, 1=64mb). */ | ||
346 | static inline int drb_granularity(u32 drc, int dev_idx) | ||
347 | { | ||
358 | /* only e7501 can be single channel */ | 348 | /* only e7501 can be single channel */ |
359 | if (dev_idx == E7501) { | 349 | return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1; |
360 | drc_chan = ((drc >> 22) & 0x1); | 350 | } |
361 | drc_drbg = (drc >> 18) & 0x3; | ||
362 | } | ||
363 | |||
364 | drc_ddim = (drc >> 20) & 0x3; | ||
365 | mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); | ||
366 | |||
367 | if (mci == NULL) { | ||
368 | rc = -ENOMEM; | ||
369 | goto fail; | ||
370 | } | ||
371 | |||
372 | debugf3("%s(): init mci\n", __func__); | ||
373 | mci->mtype_cap = MEM_FLAG_RDDR; | ||
374 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | ||
375 | EDAC_FLAG_S4ECD4ED; | ||
376 | /* FIXME - what if different memory types are in different csrows? */ | ||
377 | mci->mod_name = EDAC_MOD_STR; | ||
378 | mci->mod_ver = E7XXX_REVISION; | ||
379 | mci->dev = &pdev->dev; | ||
380 | |||
381 | debugf3("%s(): init pvt\n", __func__); | ||
382 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | ||
383 | pvt->dev_info = &e7xxx_devs[dev_idx]; | ||
384 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
385 | pvt->dev_info->err_dev, | ||
386 | pvt->bridge_ck); | ||
387 | 351 | ||
388 | if (!pvt->bridge_ck) { | ||
389 | e7xxx_printk(KERN_ERR, "error reporting device not found:" | ||
390 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
391 | PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); | ||
392 | goto fail; | ||
393 | } | ||
394 | 352 | ||
395 | debugf3("%s(): more mci init\n", __func__); | 353 | static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, |
396 | mci->ctl_name = pvt->dev_info->ctl_name; | 354 | int dev_idx, u32 drc) |
397 | mci->edac_check = e7xxx_check; | 355 | { |
398 | mci->ctl_page_to_phys = ctl_page_to_phys; | 356 | unsigned long last_cumul_size; |
357 | int index; | ||
358 | u8 value; | ||
359 | u32 dra, cumul_size; | ||
360 | int drc_chan, drc_drbg, drc_ddim, mem_dev; | ||
361 | struct csrow_info *csrow; | ||
399 | 362 | ||
400 | /* find out the device types */ | ||
401 | pci_read_config_dword(pdev, E7XXX_DRA, &dra); | 363 | pci_read_config_dword(pdev, E7XXX_DRA, &dra); |
364 | drc_chan = dual_channel_active(drc, dev_idx); | ||
365 | drc_drbg = drb_granularity(drc, dev_idx); | ||
366 | drc_ddim = (drc >> 20) & 0x3; | ||
367 | last_cumul_size = 0; | ||
402 | 368 | ||
403 | /* | 369 | /* The dram row boundary (DRB) reg values are boundary address |
404 | * The dram row boundary (DRB) reg values are boundary address | ||
405 | * for each DRAM row with a granularity of 32 or 64MB (single/dual | 370 | * for each DRAM row with a granularity of 32 or 64MB (single/dual |
406 | * channel operation). DRB regs are cumulative; therefore DRB7 will | 371 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
407 | * contain the total memory contained in all eight rows. | 372 | * contain the total memory contained in all eight rows. |
408 | */ | 373 | */ |
409 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | 374 | for (index = 0; index < mci->nr_csrows; index++) { |
410 | u8 value; | ||
411 | u32 cumul_size; | ||
412 | /* mem_dev 0=x8, 1=x4 */ | 375 | /* mem_dev 0=x8, 1=x4 */ |
413 | int mem_dev = (dra >> (index * 4 + 3)) & 0x1; | 376 | mem_dev = (dra >> (index * 4 + 3)) & 0x1; |
414 | struct csrow_info *csrow = &mci->csrows[index]; | 377 | csrow = &mci->csrows[index]; |
415 | 378 | ||
416 | pci_read_config_byte(pdev, E7XXX_DRB + index, &value); | 379 | pci_read_config_byte(pdev, E7XXX_DRB + index, &value); |
417 | /* convert a 64 or 32 MiB DRB to a page size. */ | 380 | /* convert a 64 or 32 MiB DRB to a page size. */ |
418 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | 381 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
419 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | 382 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
420 | cumul_size); | 383 | cumul_size); |
421 | |||
422 | if (cumul_size == last_cumul_size) | 384 | if (cumul_size == last_cumul_size) |
423 | continue; /* not populated */ | 385 | continue; /* not populated */ |
424 | 386 | ||
425 | csrow->first_page = last_cumul_size; | 387 | csrow->first_page = last_cumul_size; |
426 | csrow->last_page = cumul_size - 1; | 388 | csrow->last_page = cumul_size - 1; |
427 | csrow->nr_pages = cumul_size - last_cumul_size; | 389 | csrow->nr_pages = cumul_size - last_cumul_size; |
428 | last_cumul_size = cumul_size; | 390 | last_cumul_size = cumul_size; |
429 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | 391 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ |
430 | csrow->mtype = MEM_RDDR; /* only one type supported */ | 392 | csrow->mtype = MEM_RDDR; /* only one type supported */ |
431 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | 393 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; |
432 | 394 | ||
433 | /* | 395 | /* |
@@ -445,9 +407,54 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
445 | } else | 407 | } else |
446 | csrow->edac_mode = EDAC_NONE; | 408 | csrow->edac_mode = EDAC_NONE; |
447 | } | 409 | } |
410 | } | ||
448 | 411 | ||
449 | mci->edac_cap |= EDAC_FLAG_NONE; | 412 | static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) |
413 | { | ||
414 | u16 pci_data; | ||
415 | struct mem_ctl_info *mci = NULL; | ||
416 | struct e7xxx_pvt *pvt = NULL; | ||
417 | u32 drc; | ||
418 | int drc_chan; | ||
419 | struct e7xxx_error_info discard; | ||
420 | |||
421 | debugf0("%s(): mci\n", __func__); | ||
422 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | ||
423 | |||
424 | drc_chan = dual_channel_active(drc, dev_idx); | ||
425 | mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); | ||
426 | |||
427 | if (mci == NULL) | ||
428 | return -ENOMEM; | ||
450 | 429 | ||
430 | debugf3("%s(): init mci\n", __func__); | ||
431 | mci->mtype_cap = MEM_FLAG_RDDR; | ||
432 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | ||
433 | EDAC_FLAG_S4ECD4ED; | ||
434 | /* FIXME - what if different memory types are in different csrows? */ | ||
435 | mci->mod_name = EDAC_MOD_STR; | ||
436 | mci->mod_ver = E7XXX_REVISION; | ||
437 | mci->dev = &pdev->dev; | ||
438 | debugf3("%s(): init pvt\n", __func__); | ||
439 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | ||
440 | pvt->dev_info = &e7xxx_devs[dev_idx]; | ||
441 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
442 | pvt->dev_info->err_dev, | ||
443 | pvt->bridge_ck); | ||
444 | |||
445 | if (!pvt->bridge_ck) { | ||
446 | e7xxx_printk(KERN_ERR, "error reporting device not found:" | ||
447 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
448 | PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); | ||
449 | goto fail0; | ||
450 | } | ||
451 | |||
452 | debugf3("%s(): more mci init\n", __func__); | ||
453 | mci->ctl_name = pvt->dev_info->ctl_name; | ||
454 | mci->edac_check = e7xxx_check; | ||
455 | mci->ctl_page_to_phys = ctl_page_to_phys; | ||
456 | e7xxx_init_csrows(mci, pdev, dev_idx, drc); | ||
457 | mci->edac_cap |= EDAC_FLAG_NONE; | ||
451 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); | 458 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
452 | /* load the top of low memory, remap base, and remap limit vars */ | 459 | /* load the top of low memory, remap base, and remap limit vars */ |
453 | pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); | 460 | pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); |
@@ -468,21 +475,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
468 | */ | 475 | */ |
469 | if (edac_mc_add_mc(mci,0)) { | 476 | if (edac_mc_add_mc(mci,0)) { |
470 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 477 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
471 | goto fail; | 478 | goto fail1; |
472 | } | 479 | } |
473 | 480 | ||
474 | /* get this far and it's successful */ | 481 | /* get this far and it's successful */ |
475 | debugf3("%s(): success\n", __func__); | 482 | debugf3("%s(): success\n", __func__); |
476 | return 0; | 483 | return 0; |
477 | 484 | ||
478 | fail: | 485 | fail1: |
479 | if (mci != NULL) { | 486 | pci_dev_put(pvt->bridge_ck); |
480 | if(pvt != NULL && pvt->bridge_ck) | 487 | |
481 | pci_dev_put(pvt->bridge_ck); | 488 | fail0: |
482 | edac_mc_free(mci); | 489 | edac_mc_free(mci); |
483 | } | ||
484 | 490 | ||
485 | return rc; | 491 | return -ENODEV; |
486 | } | 492 | } |
487 | 493 | ||
488 | /* returns count (>= 0), or negative on error */ | 494 | /* returns count (>= 0), or negative on error */ |
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index e2c3b8bc097b..d196dcc850a8 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c | |||
@@ -133,15 +133,50 @@ static void i82860_check(struct mem_ctl_info *mci) | |||
133 | i82860_process_error_info(mci, &info, 1); | 133 | i82860_process_error_info(mci, &info, 1); |
134 | } | 134 | } |
135 | 135 | ||
136 | static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | 136 | static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) |
137 | { | 137 | { |
138 | int rc = -ENODEV; | ||
139 | int index; | ||
140 | struct mem_ctl_info *mci = NULL; | ||
141 | unsigned long last_cumul_size; | 138 | unsigned long last_cumul_size; |
142 | struct i82860_error_info discard; | 139 | u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ |
140 | u16 value; | ||
141 | u32 cumul_size; | ||
142 | struct csrow_info *csrow; | ||
143 | int index; | ||
144 | |||
145 | pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); | ||
146 | mchcfg_ddim = mchcfg_ddim & 0x180; | ||
147 | last_cumul_size = 0; | ||
148 | |||
149 | /* The group row boundary (GRA) reg values are boundary address | ||
150 | * for each DRAM row with a granularity of 16MB. GRA regs are | ||
151 | * cumulative; therefore GRA15 will contain the total memory contained | ||
152 | * in all eight rows. | ||
153 | */ | ||
154 | for (index = 0; index < mci->nr_csrows; index++) { | ||
155 | csrow = &mci->csrows[index]; | ||
156 | pci_read_config_word(pdev, I82860_GBA + index * 2, &value); | ||
157 | cumul_size = (value & I82860_GBA_MASK) << | ||
158 | (I82860_GBA_SHIFT - PAGE_SHIFT); | ||
159 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
160 | cumul_size); | ||
161 | |||
162 | if (cumul_size == last_cumul_size) | ||
163 | continue; /* not populated */ | ||
143 | 164 | ||
144 | u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | 165 | csrow->first_page = last_cumul_size; |
166 | csrow->last_page = cumul_size - 1; | ||
167 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
168 | last_cumul_size = cumul_size; | ||
169 | csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | ||
170 | csrow->mtype = MEM_RMBS; | ||
171 | csrow->dtype = DEV_UNKNOWN; | ||
172 | csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | ||
177 | { | ||
178 | struct mem_ctl_info *mci; | ||
179 | struct i82860_error_info discard; | ||
145 | 180 | ||
146 | /* RDRAM has channels but these don't map onto the abstractions that | 181 | /* RDRAM has channels but these don't map onto the abstractions that |
147 | edac uses. | 182 | edac uses. |
@@ -159,53 +194,15 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
159 | debugf3("%s(): init mci\n", __func__); | 194 | debugf3("%s(): init mci\n", __func__); |
160 | mci->dev = &pdev->dev; | 195 | mci->dev = &pdev->dev; |
161 | mci->mtype_cap = MEM_FLAG_DDR; | 196 | mci->mtype_cap = MEM_FLAG_DDR; |
162 | |||
163 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 197 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
164 | /* I"m not sure about this but I think that all RDRAM is SECDED */ | 198 | /* I"m not sure about this but I think that all RDRAM is SECDED */ |
165 | mci->edac_cap = EDAC_FLAG_SECDED; | 199 | mci->edac_cap = EDAC_FLAG_SECDED; |
166 | /* adjust FLAGS */ | ||
167 | |||
168 | mci->mod_name = EDAC_MOD_STR; | 200 | mci->mod_name = EDAC_MOD_STR; |
169 | mci->mod_ver = I82860_REVISION; | 201 | mci->mod_ver = I82860_REVISION; |
170 | mci->ctl_name = i82860_devs[dev_idx].ctl_name; | 202 | mci->ctl_name = i82860_devs[dev_idx].ctl_name; |
171 | mci->edac_check = i82860_check; | 203 | mci->edac_check = i82860_check; |
172 | mci->ctl_page_to_phys = NULL; | 204 | mci->ctl_page_to_phys = NULL; |
173 | 205 | i82860_init_csrows(mci, pdev); | |
174 | pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); | ||
175 | mchcfg_ddim = mchcfg_ddim & 0x180; | ||
176 | |||
177 | /* | ||
178 | * The group row boundary (GRA) reg values are boundary address | ||
179 | * for each DRAM row with a granularity of 16MB. GRA regs are | ||
180 | * cumulative; therefore GRA15 will contain the total memory contained | ||
181 | * in all eight rows. | ||
182 | */ | ||
183 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | ||
184 | u16 value; | ||
185 | u32 cumul_size; | ||
186 | struct csrow_info *csrow = &mci->csrows[index]; | ||
187 | |||
188 | pci_read_config_word(pdev, I82860_GBA + index * 2, | ||
189 | &value); | ||
190 | |||
191 | cumul_size = (value & I82860_GBA_MASK) << | ||
192 | (I82860_GBA_SHIFT - PAGE_SHIFT); | ||
193 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
194 | cumul_size); | ||
195 | |||
196 | if (cumul_size == last_cumul_size) | ||
197 | continue; /* not populated */ | ||
198 | |||
199 | csrow->first_page = last_cumul_size; | ||
200 | csrow->last_page = cumul_size - 1; | ||
201 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
202 | last_cumul_size = cumul_size; | ||
203 | csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | ||
204 | csrow->mtype = MEM_RMBS; | ||
205 | csrow->dtype = DEV_UNKNOWN; | ||
206 | csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | ||
207 | } | ||
208 | |||
209 | i82860_get_error_info(mci, &discard); /* clear counters */ | 206 | i82860_get_error_info(mci, &discard); /* clear counters */ |
210 | 207 | ||
211 | /* Here we assume that we will never see multiple instances of this | 208 | /* Here we assume that we will never see multiple instances of this |
@@ -213,14 +210,17 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
213 | */ | 210 | */ |
214 | if (edac_mc_add_mc(mci,0)) { | 211 | if (edac_mc_add_mc(mci,0)) { |
215 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 212 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
216 | edac_mc_free(mci); | 213 | goto fail; |
217 | } else { | ||
218 | /* get this far and it's successful */ | ||
219 | debugf3("%s(): success\n", __func__); | ||
220 | rc = 0; | ||
221 | } | 214 | } |
222 | 215 | ||
223 | return rc; | 216 | /* get this far and it's successful */ |
217 | debugf3("%s(): success\n", __func__); | ||
218 | |||
219 | return 0; | ||
220 | |||
221 | fail: | ||
222 | edac_mc_free(mci); | ||
223 | return -ENODEV; | ||
224 | } | 224 | } |
225 | 225 | ||
226 | /* returns count (>= 0), or negative on error */ | 226 | /* returns count (>= 0), or negative on error */ |
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 2be18ca96408..6787403463a1 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c | |||
@@ -265,116 +265,109 @@ static void i82875p_check(struct mem_ctl_info *mci) | |||
265 | extern int pci_proc_attach_device(struct pci_dev *); | 265 | extern int pci_proc_attach_device(struct pci_dev *); |
266 | #endif | 266 | #endif |
267 | 267 | ||
268 | static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | 268 | /* Return 0 on success or 1 on failure. */ |
269 | static int i82875p_setup_overfl_dev(struct pci_dev *pdev, | ||
270 | struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window) | ||
269 | { | 271 | { |
270 | int rc = -ENODEV; | 272 | struct pci_dev *dev; |
271 | int index; | 273 | void __iomem *window; |
272 | struct mem_ctl_info *mci = NULL; | ||
273 | struct i82875p_pvt *pvt = NULL; | ||
274 | unsigned long last_cumul_size; | ||
275 | struct pci_dev *ovrfl_pdev; | ||
276 | void __iomem *ovrfl_window = NULL; | ||
277 | u32 drc; | ||
278 | u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ | ||
279 | u32 nr_chans; | ||
280 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
281 | struct i82875p_error_info discard; | ||
282 | 274 | ||
283 | debugf0("%s()\n", __func__); | 275 | *ovrfl_pdev = NULL; |
284 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | 276 | *ovrfl_window = NULL; |
277 | dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | ||
285 | 278 | ||
286 | if (!ovrfl_pdev) { | 279 | if (dev == NULL) { |
287 | /* | 280 | /* Intel tells BIOS developers to hide device 6 which |
288 | * Intel tells BIOS developers to hide device 6 which | ||
289 | * configures the overflow device access containing | 281 | * configures the overflow device access containing |
290 | * the DRBs - this is where we expose device 6. | 282 | * the DRBs - this is where we expose device 6. |
291 | * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm | 283 | * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm |
292 | */ | 284 | */ |
293 | pci_write_bits8(pdev, 0xf4, 0x2, 0x2); | 285 | pci_write_bits8(pdev, 0xf4, 0x2, 0x2); |
294 | ovrfl_pdev = | 286 | dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); |
295 | pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); | ||
296 | 287 | ||
297 | if (!ovrfl_pdev) | 288 | if (dev == NULL) |
298 | return -ENODEV; | 289 | return 1; |
299 | } | 290 | } |
300 | 291 | ||
292 | *ovrfl_pdev = dev; | ||
293 | |||
301 | #ifdef CONFIG_PROC_FS | 294 | #ifdef CONFIG_PROC_FS |
302 | if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { | 295 | if ((dev->procent == NULL) && pci_proc_attach_device(dev)) { |
303 | i82875p_printk(KERN_ERR, | 296 | i82875p_printk(KERN_ERR, "%s(): Failed to attach overflow " |
304 | "%s(): Failed to attach overflow device\n", __func__); | 297 | "device\n", __func__); |
305 | return -ENODEV; | 298 | return 1; |
306 | } | 299 | } |
307 | #endif | 300 | #endif /* CONFIG_PROC_FS */ |
308 | /* CONFIG_PROC_FS */ | 301 | if (pci_enable_device(dev)) { |
309 | if (pci_enable_device(ovrfl_pdev)) { | 302 | i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " |
310 | i82875p_printk(KERN_ERR, | 303 | "device\n", __func__); |
311 | "%s(): Failed to enable overflow device\n", __func__); | 304 | return 1; |
312 | return -ENODEV; | ||
313 | } | 305 | } |
314 | 306 | ||
315 | if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { | 307 | if (pci_request_regions(dev, pci_name(dev))) { |
316 | #ifdef CORRECT_BIOS | 308 | #ifdef CORRECT_BIOS |
317 | goto fail0; | 309 | goto fail0; |
318 | #endif | 310 | #endif |
319 | } | 311 | } |
320 | 312 | ||
321 | /* cache is irrelevant for PCI bus reads/writes */ | 313 | /* cache is irrelevant for PCI bus reads/writes */ |
322 | ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), | 314 | window = ioremap_nocache(pci_resource_start(dev, 0), |
323 | pci_resource_len(ovrfl_pdev, 0)); | 315 | pci_resource_len(dev, 0)); |
324 | 316 | ||
325 | if (!ovrfl_window) { | 317 | if (window == NULL) { |
326 | i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", | 318 | i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", |
327 | __func__); | 319 | __func__); |
328 | goto fail1; | 320 | goto fail1; |
329 | } | 321 | } |
330 | 322 | ||
331 | /* need to find out the number of channels */ | 323 | *ovrfl_window = window; |
332 | drc = readl(ovrfl_window + I82875P_DRC); | 324 | return 0; |
333 | drc_chan = ((drc >> 21) & 0x1); | ||
334 | nr_chans = drc_chan + 1; | ||
335 | 325 | ||
336 | drc_ddim = (drc >> 18) & 0x1; | 326 | fail1: |
337 | mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | 327 | pci_release_regions(dev); |
338 | nr_chans); | ||
339 | 328 | ||
340 | if (!mci) { | 329 | #ifdef CORRECT_BIOS |
341 | rc = -ENOMEM; | 330 | fail0: |
342 | goto fail2; | 331 | pci_disable_device(dev); |
343 | } | 332 | #endif |
333 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ | ||
334 | return 1; | ||
335 | } | ||
344 | 336 | ||
345 | debugf3("%s(): init mci\n", __func__); | ||
346 | mci->dev = &pdev->dev; | ||
347 | mci->mtype_cap = MEM_FLAG_DDR; | ||
348 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | ||
349 | mci->edac_cap = EDAC_FLAG_UNKNOWN; | ||
350 | /* adjust FLAGS */ | ||
351 | 337 | ||
352 | mci->mod_name = EDAC_MOD_STR; | 338 | /* Return 1 if dual channel mode is active. Else return 0. */ |
353 | mci->mod_ver = I82875P_REVISION; | 339 | static inline int dual_channel_active(u32 drc) |
354 | mci->ctl_name = i82875p_devs[dev_idx].ctl_name; | 340 | { |
355 | mci->edac_check = i82875p_check; | 341 | return (drc >> 21) & 0x1; |
356 | mci->ctl_page_to_phys = NULL; | 342 | } |
357 | debugf3("%s(): init pvt\n", __func__); | ||
358 | pvt = (struct i82875p_pvt *) mci->pvt_info; | ||
359 | pvt->ovrfl_pdev = ovrfl_pdev; | ||
360 | pvt->ovrfl_window = ovrfl_window; | ||
361 | 343 | ||
362 | /* | 344 | |
363 | * The dram row boundary (DRB) reg values are boundary address | 345 | static void i82875p_init_csrows(struct mem_ctl_info *mci, |
346 | struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc) | ||
347 | { | ||
348 | struct csrow_info *csrow; | ||
349 | unsigned long last_cumul_size; | ||
350 | u8 value; | ||
351 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
352 | u32 cumul_size; | ||
353 | int index; | ||
354 | |||
355 | drc_ddim = (drc >> 18) & 0x1; | ||
356 | last_cumul_size = 0; | ||
357 | |||
358 | /* The dram row boundary (DRB) reg values are boundary address | ||
364 | * for each DRAM row with a granularity of 32 or 64MB (single/dual | 359 | * for each DRAM row with a granularity of 32 or 64MB (single/dual |
365 | * channel operation). DRB regs are cumulative; therefore DRB7 will | 360 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
366 | * contain the total memory contained in all eight rows. | 361 | * contain the total memory contained in all eight rows. |
367 | */ | 362 | */ |
368 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | 363 | |
369 | u8 value; | 364 | for (index = 0; index < mci->nr_csrows; index++) { |
370 | u32 cumul_size; | 365 | csrow = &mci->csrows[index]; |
371 | struct csrow_info *csrow = &mci->csrows[index]; | ||
372 | 366 | ||
373 | value = readb(ovrfl_window + I82875P_DRB + index); | 367 | value = readb(ovrfl_window + I82875P_DRB + index); |
374 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); | 368 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); |
375 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | 369 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
376 | cumul_size); | 370 | cumul_size); |
377 | |||
378 | if (cumul_size == last_cumul_size) | 371 | if (cumul_size == last_cumul_size) |
379 | continue; /* not populated */ | 372 | continue; /* not populated */ |
380 | 373 | ||
@@ -382,12 +375,54 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
382 | csrow->last_page = cumul_size - 1; | 375 | csrow->last_page = cumul_size - 1; |
383 | csrow->nr_pages = cumul_size - last_cumul_size; | 376 | csrow->nr_pages = cumul_size - last_cumul_size; |
384 | last_cumul_size = cumul_size; | 377 | last_cumul_size = cumul_size; |
385 | csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ | 378 | csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ |
386 | csrow->mtype = MEM_DDR; | 379 | csrow->mtype = MEM_DDR; |
387 | csrow->dtype = DEV_UNKNOWN; | 380 | csrow->dtype = DEV_UNKNOWN; |
388 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; | 381 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; |
389 | } | 382 | } |
383 | } | ||
384 | |||
385 | static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | ||
386 | { | ||
387 | int rc = -ENODEV; | ||
388 | struct mem_ctl_info *mci; | ||
389 | struct i82875p_pvt *pvt; | ||
390 | struct pci_dev *ovrfl_pdev; | ||
391 | void __iomem *ovrfl_window; | ||
392 | u32 drc; | ||
393 | u32 nr_chans; | ||
394 | struct i82875p_error_info discard; | ||
395 | |||
396 | debugf0("%s()\n", __func__); | ||
397 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | ||
398 | |||
399 | if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) | ||
400 | return -ENODEV; | ||
401 | drc = readl(ovrfl_window + I82875P_DRC); | ||
402 | nr_chans = dual_channel_active(drc) + 1; | ||
403 | mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | ||
404 | nr_chans); | ||
405 | |||
406 | if (!mci) { | ||
407 | rc = -ENOMEM; | ||
408 | goto fail0; | ||
409 | } | ||
390 | 410 | ||
411 | debugf3("%s(): init mci\n", __func__); | ||
412 | mci->dev = &pdev->dev; | ||
413 | mci->mtype_cap = MEM_FLAG_DDR; | ||
414 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | ||
415 | mci->edac_cap = EDAC_FLAG_UNKNOWN; | ||
416 | mci->mod_name = EDAC_MOD_STR; | ||
417 | mci->mod_ver = I82875P_REVISION; | ||
418 | mci->ctl_name = i82875p_devs[dev_idx].ctl_name; | ||
419 | mci->edac_check = i82875p_check; | ||
420 | mci->ctl_page_to_phys = NULL; | ||
421 | debugf3("%s(): init pvt\n", __func__); | ||
422 | pvt = (struct i82875p_pvt *) mci->pvt_info; | ||
423 | pvt->ovrfl_pdev = ovrfl_pdev; | ||
424 | pvt->ovrfl_window = ovrfl_window; | ||
425 | i82875p_init_csrows(mci, pdev, ovrfl_window, drc); | ||
391 | i82875p_get_error_info(mci, &discard); /* clear counters */ | 426 | i82875p_get_error_info(mci, &discard); /* clear counters */ |
392 | 427 | ||
393 | /* Here we assume that we will never see multiple instances of this | 428 | /* Here we assume that we will never see multiple instances of this |
@@ -395,25 +430,20 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
395 | */ | 430 | */ |
396 | if (edac_mc_add_mc(mci,0)) { | 431 | if (edac_mc_add_mc(mci,0)) { |
397 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 432 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
398 | goto fail3; | 433 | goto fail1; |
399 | } | 434 | } |
400 | 435 | ||
401 | /* get this far and it's successful */ | 436 | /* get this far and it's successful */ |
402 | debugf3("%s(): success\n", __func__); | 437 | debugf3("%s(): success\n", __func__); |
403 | return 0; | 438 | return 0; |
404 | 439 | ||
405 | fail3: | 440 | fail1: |
406 | edac_mc_free(mci); | 441 | edac_mc_free(mci); |
407 | 442 | ||
408 | fail2: | 443 | fail0: |
409 | iounmap(ovrfl_window); | 444 | iounmap(ovrfl_window); |
410 | |||
411 | fail1: | ||
412 | pci_release_regions(ovrfl_pdev); | 445 | pci_release_regions(ovrfl_pdev); |
413 | 446 | ||
414 | #ifdef CORRECT_BIOS | ||
415 | fail0: | ||
416 | #endif | ||
417 | pci_disable_device(ovrfl_pdev); | 447 | pci_disable_device(ovrfl_pdev); |
418 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ | 448 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ |
419 | return rc; | 449 | return rc; |
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index eb3aa615dc57..fecdb2c9ee28 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c | |||
@@ -205,25 +205,72 @@ static void r82600_check(struct mem_ctl_info *mci) | |||
205 | r82600_process_error_info(mci, &info, 1); | 205 | r82600_process_error_info(mci, &info, 1); |
206 | } | 206 | } |
207 | 207 | ||
208 | static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | 208 | static inline int ecc_enabled(u8 dramcr) |
209 | { | 209 | { |
210 | int rc = -ENODEV; | 210 | return dramcr & BIT(5); |
211 | } | ||
212 | |||
213 | static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | ||
214 | u8 dramcr) | ||
215 | { | ||
216 | struct csrow_info *csrow; | ||
211 | int index; | 217 | int index; |
212 | struct mem_ctl_info *mci = NULL; | 218 | u8 drbar; /* SDRAM Row Boundry Address Register */ |
219 | u32 row_high_limit, row_high_limit_last; | ||
220 | u32 reg_sdram, ecc_on, row_base; | ||
221 | |||
222 | ecc_on = ecc_enabled(dramcr); | ||
223 | reg_sdram = dramcr & BIT(4); | ||
224 | row_high_limit_last = 0; | ||
225 | |||
226 | for (index = 0; index < mci->nr_csrows; index++) { | ||
227 | csrow = &mci->csrows[index]; | ||
228 | |||
229 | /* find the DRAM Chip Select Base address and mask */ | ||
230 | pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); | ||
231 | |||
232 | debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); | ||
233 | |||
234 | row_high_limit = ((u32) drbar << 24); | ||
235 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | ||
236 | |||
237 | debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n", | ||
238 | __func__, index, row_high_limit, row_high_limit_last); | ||
239 | |||
240 | /* Empty row [p.57] */ | ||
241 | if (row_high_limit == row_high_limit_last) | ||
242 | continue; | ||
243 | |||
244 | row_base = row_high_limit_last; | ||
245 | |||
246 | csrow->first_page = row_base >> PAGE_SHIFT; | ||
247 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | ||
248 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | ||
249 | /* Error address is top 19 bits - so granularity is * | ||
250 | * 14 bits */ | ||
251 | csrow->grain = 1 << 14; | ||
252 | csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; | ||
253 | /* FIXME - check that this is unknowable with this chipset */ | ||
254 | csrow->dtype = DEV_UNKNOWN; | ||
255 | |||
256 | /* Mode is global on 82600 */ | ||
257 | csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; | ||
258 | row_high_limit_last = row_high_limit; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | ||
263 | { | ||
264 | struct mem_ctl_info *mci; | ||
213 | u8 dramcr; | 265 | u8 dramcr; |
214 | u32 ecc_on; | ||
215 | u32 reg_sdram; | ||
216 | u32 eapr; | 266 | u32 eapr; |
217 | u32 scrub_disabled; | 267 | u32 scrub_disabled; |
218 | u32 sdram_refresh_rate; | 268 | u32 sdram_refresh_rate; |
219 | u32 row_high_limit_last = 0; | ||
220 | struct r82600_error_info discard; | 269 | struct r82600_error_info discard; |
221 | 270 | ||
222 | debugf0("%s()\n", __func__); | 271 | debugf0("%s()\n", __func__); |
223 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); | 272 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); |
224 | pci_read_config_dword(pdev, R82600_EAP, &eapr); | 273 | pci_read_config_dword(pdev, R82600_EAP, &eapr); |
225 | ecc_on = dramcr & BIT(5); | ||
226 | reg_sdram = dramcr & BIT(4); | ||
227 | scrub_disabled = eapr & BIT(31); | 274 | scrub_disabled = eapr & BIT(31); |
228 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); | 275 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); |
229 | debugf2("%s(): sdram refresh rate = %#0x\n", __func__, | 276 | debugf2("%s(): sdram refresh rate = %#0x\n", __func__, |
@@ -231,10 +278,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
231 | debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); | 278 | debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); |
232 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); | 279 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); |
233 | 280 | ||
234 | if (mci == NULL) { | 281 | if (mci == NULL) |
235 | rc = -ENOMEM; | 282 | return -ENOMEM; |
236 | goto fail; | ||
237 | } | ||
238 | 283 | ||
239 | debugf0("%s(): mci = %p\n", __func__, mci); | 284 | debugf0("%s(): mci = %p\n", __func__, mci); |
240 | mci->dev = &pdev->dev; | 285 | mci->dev = &pdev->dev; |
@@ -250,7 +295,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
250 | * is possible. */ | 295 | * is possible. */ |
251 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 296 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
252 | 297 | ||
253 | if (ecc_on) { | 298 | if (ecc_enabled(dramcr)) { |
254 | if (scrub_disabled) | 299 | if (scrub_disabled) |
255 | debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " | 300 | debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " |
256 | "%#0x\n", __func__, mci, eapr); | 301 | "%#0x\n", __func__, mci, eapr); |
@@ -262,46 +307,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
262 | mci->ctl_name = "R82600"; | 307 | mci->ctl_name = "R82600"; |
263 | mci->edac_check = r82600_check; | 308 | mci->edac_check = r82600_check; |
264 | mci->ctl_page_to_phys = NULL; | 309 | mci->ctl_page_to_phys = NULL; |
265 | 310 | r82600_init_csrows(mci, pdev, dramcr); | |
266 | for (index = 0; index < mci->nr_csrows; index++) { | ||
267 | struct csrow_info *csrow = &mci->csrows[index]; | ||
268 | u8 drbar; /* sDram Row Boundry Address Register */ | ||
269 | u32 row_high_limit; | ||
270 | u32 row_base; | ||
271 | |||
272 | /* find the DRAM Chip Select Base address and mask */ | ||
273 | pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); | ||
274 | |||
275 | debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx, | ||
276 | __func__, index, drbar); | ||
277 | |||
278 | row_high_limit = ((u32) drbar << 24); | ||
279 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | ||
280 | |||
281 | debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = " | ||
282 | "%#0x \n", mci->mc_idx, __func__, index, | ||
283 | row_high_limit, row_high_limit_last); | ||
284 | |||
285 | /* Empty row [p.57] */ | ||
286 | if (row_high_limit == row_high_limit_last) | ||
287 | continue; | ||
288 | |||
289 | row_base = row_high_limit_last; | ||
290 | csrow->first_page = row_base >> PAGE_SHIFT; | ||
291 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | ||
292 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | ||
293 | /* Error address is top 19 bits - so granularity is * | ||
294 | * 14 bits */ | ||
295 | csrow->grain = 1 << 14; | ||
296 | csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; | ||
297 | /* FIXME - check that this is unknowable with this chipset */ | ||
298 | csrow->dtype = DEV_UNKNOWN; | ||
299 | |||
300 | /* Mode is global on 82600 */ | ||
301 | csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; | ||
302 | row_high_limit_last = row_high_limit; | ||
303 | } | ||
304 | |||
305 | r82600_get_error_info(mci, &discard); /* clear counters */ | 311 | r82600_get_error_info(mci, &discard); /* clear counters */ |
306 | 312 | ||
307 | /* Here we assume that we will never see multiple instances of this | 313 | /* Here we assume that we will never see multiple instances of this |
@@ -324,10 +330,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
324 | return 0; | 330 | return 0; |
325 | 331 | ||
326 | fail: | 332 | fail: |
327 | if (mci) | 333 | edac_mc_free(mci); |
328 | edac_mc_free(mci); | 334 | return -ENODEV; |
329 | |||
330 | return rc; | ||
331 | } | 335 | } |
332 | 336 | ||
333 | /* returns count (>= 0), or negative on error */ | 337 | /* returns count (>= 0), or negative on error */ |