aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/edac/amd76x_edac.c74
-rw-r--r--drivers/edac/e752x_edac.c322
-rw-r--r--drivers/edac/e7xxx_edac.c158
-rw-r--r--drivers/edac/i82860_edac.c102
-rw-r--r--drivers/edac/i82875p_edac.c188
-rw-r--r--drivers/edac/r82600_edac.c118
6 files changed, 518 insertions, 444 deletions
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 303cb500b377..702141c5501b 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -182,6 +182,38 @@ static void amd76x_check(struct mem_ctl_info *mci)
182 amd76x_process_error_info(mci, &info, 1); 182 amd76x_process_error_info(mci, &info, 1);
183} 183}
184 184
185static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
186 enum edac_type edac_mode)
187{
188 struct csrow_info *csrow;
189 u32 mba, mba_base, mba_mask, dms;
190 int index;
191
192 for (index = 0; index < mci->nr_csrows; index++) {
193 csrow = &mci->csrows[index];
194
195 /* find the DRAM Chip Select Base address and mask */
196 pci_read_config_dword(pdev,
197 AMD76X_MEM_BASE_ADDR + (index * 4),
198 &mba);
199
200 if (!(mba & BIT(0)))
201 continue;
202
203 mba_base = mba & 0xff800000UL;
204 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
205 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
206 csrow->first_page = mba_base >> PAGE_SHIFT;
207 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
208 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
209 csrow->page_mask = mba_mask >> PAGE_SHIFT;
210 csrow->grain = csrow->nr_pages << PAGE_SHIFT;
211 csrow->mtype = MEM_RDDR;
212 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
213 csrow->edac_mode = edac_mode;
214 }
215}
216
185/** 217/**
186 * amd76x_probe1 - Perform set up for detected device 218 * amd76x_probe1 - Perform set up for detected device
187 * @pdev; PCI device detected 219 * @pdev; PCI device detected
@@ -193,15 +225,13 @@ static void amd76x_check(struct mem_ctl_info *mci)
193 */ 225 */
194static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) 226static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
195{ 227{
196 int rc = -ENODEV; 228 static const enum edac_type ems_modes[] = {
197 int index;
198 struct mem_ctl_info *mci = NULL;
199 enum edac_type ems_modes[] = {
200 EDAC_NONE, 229 EDAC_NONE,
201 EDAC_EC, 230 EDAC_EC,
202 EDAC_SECDED, 231 EDAC_SECDED,
203 EDAC_SECDED 232 EDAC_SECDED
204 }; 233 };
234 struct mem_ctl_info *mci = NULL;
205 u32 ems; 235 u32 ems;
206 u32 ems_mode; 236 u32 ems_mode;
207 struct amd76x_error_info discard; 237 struct amd76x_error_info discard;
@@ -212,8 +242,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
212 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); 242 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
213 243
214 if (mci == NULL) { 244 if (mci == NULL) {
215 rc = -ENOMEM; 245 return -ENOMEM;
216 goto fail;
217 } 246 }
218 247
219 debugf0("%s(): mci = %p\n", __func__, mci); 248 debugf0("%s(): mci = %p\n", __func__, mci);
@@ -228,33 +257,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
228 mci->edac_check = amd76x_check; 257 mci->edac_check = amd76x_check;
229 mci->ctl_page_to_phys = NULL; 258 mci->ctl_page_to_phys = NULL;
230 259
231 for (index = 0; index < mci->nr_csrows; index++) { 260 amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
232 struct csrow_info *csrow = &mci->csrows[index];
233 u32 mba;
234 u32 mba_base;
235 u32 mba_mask;
236 u32 dms;
237
238 /* find the DRAM Chip Select Base address and mask */
239 pci_read_config_dword(pdev,
240 AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
241
242 if (!(mba & BIT(0)))
243 continue;
244
245 mba_base = mba & 0xff800000UL;
246 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
247 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
248 csrow->first_page = mba_base >> PAGE_SHIFT;
249 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
250 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
251 csrow->page_mask = mba_mask >> PAGE_SHIFT;
252 csrow->grain = csrow->nr_pages << PAGE_SHIFT;
253 csrow->mtype = MEM_RDDR;
254 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
255 csrow->edac_mode = ems_modes[ems_mode];
256 }
257
258 amd76x_get_error_info(mci, &discard); /* clear counters */ 261 amd76x_get_error_info(mci, &discard); /* clear counters */
259 262
260 /* Here we assume that we will never see multiple instances of this 263 /* Here we assume that we will never see multiple instances of this
@@ -270,9 +273,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
270 return 0; 273 return 0;
271 274
272fail: 275fail:
273 if (mci != NULL) 276 edac_mc_free(mci);
274 edac_mc_free(mci); 277 return -ENODEV;
275 return rc;
276} 278}
277 279
278/* returns count (>= 0), or negative on error */ 280/* returns count (>= 0), or negative on error */
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 5e773e382e8a..5351a76739e5 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -765,22 +765,174 @@ static void e752x_check(struct mem_ctl_info *mci)
765 e752x_process_error_info(mci, &info, 1); 765 e752x_process_error_info(mci, &info, 1);
766} 766}
767 767
768static int e752x_probe1(struct pci_dev *pdev, int dev_idx) 768/* Return 1 if dual channel mode is active. Else return 0. */
769static inline int dual_channel_active(u16 ddrcsr)
770{
771 return (((ddrcsr >> 12) & 3) == 3);
772}
773
774static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
775 u16 ddrcsr)
776{
777 struct csrow_info *csrow;
778 unsigned long last_cumul_size;
779 int index, mem_dev, drc_chan;
780 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
781 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
782 u8 value;
783 u32 dra, drc, cumul_size;
784
785 pci_read_config_dword(pdev, E752X_DRA, &dra);
786 pci_read_config_dword(pdev, E752X_DRC, &drc);
787 drc_chan = dual_channel_active(ddrcsr);
788 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
789 drc_ddim = (drc >> 20) & 0x3;
790
791 /* The dram row boundary (DRB) reg values are boundary address for
792 * each DRAM row with a granularity of 64 or 128MB (single/dual
793 * channel operation). DRB regs are cumulative; therefore DRB7 will
794 * contain the total memory contained in all eight rows.
795 */
796 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
797 /* mem_dev 0=x8, 1=x4 */
798 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
799 csrow = &mci->csrows[index];
800
801 mem_dev = (mem_dev == 2);
802 pci_read_config_byte(pdev, E752X_DRB + index, &value);
803 /* convert a 128 or 64 MiB DRB to a page size. */
804 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
805 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
806 cumul_size);
807 if (cumul_size == last_cumul_size)
808 continue; /* not populated */
809
810 csrow->first_page = last_cumul_size;
811 csrow->last_page = cumul_size - 1;
812 csrow->nr_pages = cumul_size - last_cumul_size;
813 last_cumul_size = cumul_size;
814 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
815 csrow->mtype = MEM_RDDR; /* only one type supported */
816 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
817
818 /*
819 * if single channel or x8 devices then SECDED
820 * if dual channel and x4 then S4ECD4ED
821 */
822 if (drc_ddim) {
823 if (drc_chan && mem_dev) {
824 csrow->edac_mode = EDAC_S4ECD4ED;
825 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
826 } else {
827 csrow->edac_mode = EDAC_SECDED;
828 mci->edac_cap |= EDAC_FLAG_SECDED;
829 }
830 } else
831 csrow->edac_mode = EDAC_NONE;
832 }
833}
834
835static void e752x_init_mem_map_table(struct pci_dev *pdev,
836 struct e752x_pvt *pvt)
769{ 837{
770 int rc = -ENODEV;
771 int index; 838 int index;
839 u8 value, last, row, stat8;
840
841 last = 0;
842 row = 0;
843
844 for (index = 0; index < 8; index += 2) {
845 pci_read_config_byte(pdev, E752X_DRB + index, &value);
846 /* test if there is a dimm in this slot */
847 if (value == last) {
848 /* no dimm in the slot, so flag it as empty */
849 pvt->map[index] = 0xff;
850 pvt->map[index + 1] = 0xff;
851 } else { /* there is a dimm in the slot */
852 pvt->map[index] = row;
853 row++;
854 last = value;
855 /* test the next value to see if the dimm is double
856 * sided
857 */
858 pci_read_config_byte(pdev, E752X_DRB + index + 1,
859 &value);
860 pvt->map[index + 1] = (value == last) ?
861 0xff : /* the dimm is single sided,
862 so flag as empty */
863 row; /* this is a double sided dimm
864 to save the next row # */
865 row++;
866 last = value;
867 }
868 }
869
870 /* set the map type. 1 = normal, 0 = reversed */
871 pci_read_config_byte(pdev, E752X_DRM, &stat8);
872 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
873}
874
875/* Return 0 on success or 1 on failure. */
876static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
877 struct e752x_pvt *pvt)
878{
879 struct pci_dev *dev;
880
881 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
882 pvt->dev_info->err_dev,
883 pvt->bridge_ck);
884
885 if (pvt->bridge_ck == NULL)
886 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
887 PCI_DEVFN(0, 1));
888
889 if (pvt->bridge_ck == NULL) {
890 e752x_printk(KERN_ERR, "error reporting device not found:"
891 "vendor %x device 0x%x (broken BIOS?)\n",
892 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
893 return 1;
894 }
895
896 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
897 NULL);
898
899 if (dev == NULL)
900 goto fail;
901
902 pvt->dev_d0f0 = dev;
903 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
904
905 return 0;
906
907fail:
908 pci_dev_put(pvt->bridge_ck);
909 return 1;
910}
911
912static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
913{
914 struct pci_dev *dev;
915
916 dev = pvt->dev_d0f1;
917 /* Turn off error disable & SMI in case the BIOS turned it on */
918 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
919 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
920 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
921 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
922 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
923 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
924 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
925 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
926}
927
928static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
929{
772 u16 pci_data; 930 u16 pci_data;
773 u8 stat8; 931 u8 stat8;
774 struct mem_ctl_info *mci = NULL; 932 struct mem_ctl_info *mci;
775 struct e752x_pvt *pvt = NULL; 933 struct e752x_pvt *pvt;
776 u16 ddrcsr; 934 u16 ddrcsr;
777 u32 drc;
778 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 935 int drc_chan; /* Number of channels 0=1chan,1=2chan */
779 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
780 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
781 u32 dra;
782 unsigned long last_cumul_size;
783 struct pci_dev *dev = NULL;
784 struct e752x_error_info discard; 936 struct e752x_error_info discard;
785 937
786 debugf0("%s(): mci\n", __func__); 938 debugf0("%s(): mci\n", __func__);
@@ -794,25 +946,20 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
794 if (!force_function_unhide && !(stat8 & (1 << 5))) { 946 if (!force_function_unhide && !(stat8 & (1 << 5))) {
795 printk(KERN_INFO "Contact your BIOS vendor to see if the " 947 printk(KERN_INFO "Contact your BIOS vendor to see if the "
796 "E752x error registers can be safely un-hidden\n"); 948 "E752x error registers can be safely un-hidden\n");
797 goto fail; 949 return -ENOMEM;
798 } 950 }
799 stat8 |= (1 << 5); 951 stat8 |= (1 << 5);
800 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); 952 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
801 953
802 /* need to find out the number of channels */
803 pci_read_config_dword(pdev, E752X_DRC, &drc);
804 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); 954 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
805 /* FIXME: should check >>12 or 0xf, true for all? */ 955 /* FIXME: should check >>12 or 0xf, true for all? */
806 /* Dual channel = 1, Single channel = 0 */ 956 /* Dual channel = 1, Single channel = 0 */
807 drc_chan = (((ddrcsr >> 12) & 3) == 3); 957 drc_chan = dual_channel_active(ddrcsr);
808 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
809 drc_ddim = (drc >> 20) & 0x3;
810 958
811 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); 959 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
812 960
813 if (mci == NULL) { 961 if (mci == NULL) {
814 rc = -ENOMEM; 962 return -ENOMEM;
815 goto fail;
816 } 963 }
817 964
818 debugf3("%s(): init mci\n", __func__); 965 debugf3("%s(): init mci\n", __func__);
@@ -827,113 +974,20 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
827 debugf3("%s(): init pvt\n", __func__); 974 debugf3("%s(): init pvt\n", __func__);
828 pvt = (struct e752x_pvt *) mci->pvt_info; 975 pvt = (struct e752x_pvt *) mci->pvt_info;
829 pvt->dev_info = &e752x_devs[dev_idx]; 976 pvt->dev_info = &e752x_devs[dev_idx];
830 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 977 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
831 pvt->dev_info->err_dev,
832 pvt->bridge_ck);
833
834 if (pvt->bridge_ck == NULL)
835 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
836 PCI_DEVFN(0, 1));
837 978
838 if (pvt->bridge_ck == NULL) { 979 if (e752x_get_devs(pdev, dev_idx, pvt)) {
839 e752x_printk(KERN_ERR, "error reporting device not found:" 980 edac_mc_free(mci);
840 "vendor %x device 0x%x (broken BIOS?)\n", 981 return -ENODEV;
841 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
842 goto fail;
843 } 982 }
844 983
845 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
846 debugf3("%s(): more mci init\n", __func__); 984 debugf3("%s(): more mci init\n", __func__);
847 mci->ctl_name = pvt->dev_info->ctl_name; 985 mci->ctl_name = pvt->dev_info->ctl_name;
848 mci->edac_check = e752x_check; 986 mci->edac_check = e752x_check;
849 mci->ctl_page_to_phys = ctl_page_to_phys; 987 mci->ctl_page_to_phys = ctl_page_to_phys;
850 988
851 /* find out the device types */ 989 e752x_init_csrows(mci, pdev, ddrcsr);
852 pci_read_config_dword(pdev, E752X_DRA, &dra); 990 e752x_init_mem_map_table(pdev, pvt);
853
854 /*
855 * The dram row boundary (DRB) reg values are boundary address for
856 * each DRAM row with a granularity of 64 or 128MB (single/dual
857 * channel operation). DRB regs are cumulative; therefore DRB7 will
858 * contain the total memory contained in all eight rows.
859 */
860 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
861 u8 value;
862 u32 cumul_size;
863
864 /* mem_dev 0=x8, 1=x4 */
865 int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
866 struct csrow_info *csrow = &mci->csrows[index];
867
868 mem_dev = (mem_dev == 2);
869 pci_read_config_byte(pdev, E752X_DRB + index, &value);
870 /* convert a 128 or 64 MiB DRB to a page size. */
871 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
872 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
873 cumul_size);
874
875 if (cumul_size == last_cumul_size)
876 continue; /* not populated */
877
878 csrow->first_page = last_cumul_size;
879 csrow->last_page = cumul_size - 1;
880 csrow->nr_pages = cumul_size - last_cumul_size;
881 last_cumul_size = cumul_size;
882 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
883 csrow->mtype = MEM_RDDR; /* only one type supported */
884 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
885
886 /*
887 * if single channel or x8 devices then SECDED
888 * if dual channel and x4 then S4ECD4ED
889 */
890 if (drc_ddim) {
891 if (drc_chan && mem_dev) {
892 csrow->edac_mode = EDAC_S4ECD4ED;
893 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
894 } else {
895 csrow->edac_mode = EDAC_SECDED;
896 mci->edac_cap |= EDAC_FLAG_SECDED;
897 }
898 } else
899 csrow->edac_mode = EDAC_NONE;
900 }
901
902 /* Fill in the memory map table */
903 {
904 u8 value;
905 u8 last = 0;
906 u8 row = 0;
907
908 for (index = 0; index < 8; index += 2) {
909 pci_read_config_byte(pdev, E752X_DRB + index, &value);
910
911 /* test if there is a dimm in this slot */
912 if (value == last) {
913 /* no dimm in the slot, so flag it as empty */
914 pvt->map[index] = 0xff;
915 pvt->map[index + 1] = 0xff;
916 } else { /* there is a dimm in the slot */
917 pvt->map[index] = row;
918 row++;
919 last = value;
920 /* test the next value to see if the dimm is
921 double sided */
922 pci_read_config_byte(pdev,
923 E752X_DRB + index + 1,
924 &value);
925 pvt->map[index + 1] = (value == last) ?
926 0xff : /* the dimm is single sided,
927 * so flag as empty
928 */
929 row; /* this is a double sided dimm
930 * to save the next row #
931 */
932 row++;
933 last = value;
934 }
935 }
936 }
937 991
938 /* set the map type. 1 = normal, 0 = reversed */ 992 /* set the map type. 1 = normal, 0 = reversed */
939 pci_read_config_byte(pdev, E752X_DRM, &stat8); 993 pci_read_config_byte(pdev, E752X_DRM, &stat8);
@@ -961,21 +1015,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
961 goto fail; 1015 goto fail;
962 } 1016 }
963 1017
964 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, 1018 e752x_init_error_reporting_regs(pvt);
965 NULL);
966 pvt->dev_d0f0 = dev;
967 /* find the error reporting device and clear errors */
968 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
969 /* Turn off error disable & SMI in case the BIOS turned it on */
970 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
971 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
972 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
973 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
974 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
975 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
976 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
977 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
978
979 e752x_get_error_info(mci, &discard); /* clear other MCH errors */ 1019 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
980 1020
981 /* get this far and it's successful */ 1021 /* get this far and it's successful */
@@ -983,20 +1023,12 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
983 return 0; 1023 return 0;
984 1024
985fail: 1025fail:
986 if (mci) { 1026 pci_dev_put(pvt->dev_d0f0);
987 if (pvt->dev_d0f0) 1027 pci_dev_put(pvt->dev_d0f1);
988 pci_dev_put(pvt->dev_d0f0); 1028 pci_dev_put(pvt->bridge_ck);
989 1029 edac_mc_free(mci);
990 if (pvt->dev_d0f1)
991 pci_dev_put(pvt->dev_d0f1);
992
993 if (pvt->bridge_ck)
994 pci_dev_put(pvt->bridge_ck);
995
996 edac_mc_free(mci);
997 }
998 1030
999 return rc; 1031 return -ENODEV;
1000} 1032}
1001 1033
1002/* returns count (>= 0), or negative on error */ 1034/* returns count (>= 0), or negative on error */
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 1e282c843e77..9878379b4993 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -335,99 +335,61 @@ static void e7xxx_check(struct mem_ctl_info *mci)
335 e7xxx_process_error_info(mci, &info, 1); 335 e7xxx_process_error_info(mci, &info, 1);
336} 336}
337 337
338static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) 338/* Return 1 if dual channel mode is active. Else return 0. */
339static inline int dual_channel_active(u32 drc, int dev_idx)
339{ 340{
340 int rc = -ENODEV; 341 return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
341 int index; 342}
342 u16 pci_data;
343 struct mem_ctl_info *mci = NULL;
344 struct e7xxx_pvt *pvt = NULL;
345 u32 drc;
346 int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
347 int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
348 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
349 u32 dra;
350 unsigned long last_cumul_size;
351 struct e7xxx_error_info discard;
352
353 debugf0("%s(): mci\n", __func__);
354 343
355 /* need to find out the number of channels */
356 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
357 344
345/* Return DRB granularity (0=32mb, 1=64mb). */
346static inline int drb_granularity(u32 drc, int dev_idx)
347{
358 /* only e7501 can be single channel */ 348 /* only e7501 can be single channel */
359 if (dev_idx == E7501) { 349 return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
360 drc_chan = ((drc >> 22) & 0x1); 350}
361 drc_drbg = (drc >> 18) & 0x3;
362 }
363
364 drc_ddim = (drc >> 20) & 0x3;
365 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
366
367 if (mci == NULL) {
368 rc = -ENOMEM;
369 goto fail;
370 }
371
372 debugf3("%s(): init mci\n", __func__);
373 mci->mtype_cap = MEM_FLAG_RDDR;
374 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
375 EDAC_FLAG_S4ECD4ED;
376 /* FIXME - what if different memory types are in different csrows? */
377 mci->mod_name = EDAC_MOD_STR;
378 mci->mod_ver = E7XXX_REVISION;
379 mci->dev = &pdev->dev;
380
381 debugf3("%s(): init pvt\n", __func__);
382 pvt = (struct e7xxx_pvt *) mci->pvt_info;
383 pvt->dev_info = &e7xxx_devs[dev_idx];
384 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
385 pvt->dev_info->err_dev,
386 pvt->bridge_ck);
387 351
388 if (!pvt->bridge_ck) {
389 e7xxx_printk(KERN_ERR, "error reporting device not found:"
390 "vendor %x device 0x%x (broken BIOS?)\n",
391 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
392 goto fail;
393 }
394 352
395 debugf3("%s(): more mci init\n", __func__); 353static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
396 mci->ctl_name = pvt->dev_info->ctl_name; 354 int dev_idx, u32 drc)
397 mci->edac_check = e7xxx_check; 355{
398 mci->ctl_page_to_phys = ctl_page_to_phys; 356 unsigned long last_cumul_size;
357 int index;
358 u8 value;
359 u32 dra, cumul_size;
360 int drc_chan, drc_drbg, drc_ddim, mem_dev;
361 struct csrow_info *csrow;
399 362
400 /* find out the device types */
401 pci_read_config_dword(pdev, E7XXX_DRA, &dra); 363 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
364 drc_chan = dual_channel_active(drc, dev_idx);
365 drc_drbg = drb_granularity(drc, dev_idx);
366 drc_ddim = (drc >> 20) & 0x3;
367 last_cumul_size = 0;
402 368
403 /* 369 /* The dram row boundary (DRB) reg values are boundary address
404 * The dram row boundary (DRB) reg values are boundary address
405 * for each DRAM row with a granularity of 32 or 64MB (single/dual 370 * for each DRAM row with a granularity of 32 or 64MB (single/dual
406 * channel operation). DRB regs are cumulative; therefore DRB7 will 371 * channel operation). DRB regs are cumulative; therefore DRB7 will
407 * contain the total memory contained in all eight rows. 372 * contain the total memory contained in all eight rows.
408 */ 373 */
409 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 374 for (index = 0; index < mci->nr_csrows; index++) {
410 u8 value;
411 u32 cumul_size;
412 /* mem_dev 0=x8, 1=x4 */ 375 /* mem_dev 0=x8, 1=x4 */
413 int mem_dev = (dra >> (index * 4 + 3)) & 0x1; 376 mem_dev = (dra >> (index * 4 + 3)) & 0x1;
414 struct csrow_info *csrow = &mci->csrows[index]; 377 csrow = &mci->csrows[index];
415 378
416 pci_read_config_byte(pdev, E7XXX_DRB + index, &value); 379 pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
417 /* convert a 64 or 32 MiB DRB to a page size. */ 380 /* convert a 64 or 32 MiB DRB to a page size. */
418 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 381 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
419 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 382 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
420 cumul_size); 383 cumul_size);
421
422 if (cumul_size == last_cumul_size) 384 if (cumul_size == last_cumul_size)
423 continue; /* not populated */ 385 continue; /* not populated */
424 386
425 csrow->first_page = last_cumul_size; 387 csrow->first_page = last_cumul_size;
426 csrow->last_page = cumul_size - 1; 388 csrow->last_page = cumul_size - 1;
427 csrow->nr_pages = cumul_size - last_cumul_size; 389 csrow->nr_pages = cumul_size - last_cumul_size;
428 last_cumul_size = cumul_size; 390 last_cumul_size = cumul_size;
429 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 391 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
430 csrow->mtype = MEM_RDDR; /* only one type supported */ 392 csrow->mtype = MEM_RDDR; /* only one type supported */
431 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 393 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
432 394
433 /* 395 /*
@@ -445,9 +407,54 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
445 } else 407 } else
446 csrow->edac_mode = EDAC_NONE; 408 csrow->edac_mode = EDAC_NONE;
447 } 409 }
410}
448 411
449 mci->edac_cap |= EDAC_FLAG_NONE; 412static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
413{
414 u16 pci_data;
415 struct mem_ctl_info *mci = NULL;
416 struct e7xxx_pvt *pvt = NULL;
417 u32 drc;
418 int drc_chan;
419 struct e7xxx_error_info discard;
420
421 debugf0("%s(): mci\n", __func__);
422 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
423
424 drc_chan = dual_channel_active(drc, dev_idx);
425 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
426
427 if (mci == NULL)
428 return -ENOMEM;
450 429
430 debugf3("%s(): init mci\n", __func__);
431 mci->mtype_cap = MEM_FLAG_RDDR;
432 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
433 EDAC_FLAG_S4ECD4ED;
434 /* FIXME - what if different memory types are in different csrows? */
435 mci->mod_name = EDAC_MOD_STR;
436 mci->mod_ver = E7XXX_REVISION;
437 mci->dev = &pdev->dev;
438 debugf3("%s(): init pvt\n", __func__);
439 pvt = (struct e7xxx_pvt *) mci->pvt_info;
440 pvt->dev_info = &e7xxx_devs[dev_idx];
441 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
442 pvt->dev_info->err_dev,
443 pvt->bridge_ck);
444
445 if (!pvt->bridge_ck) {
446 e7xxx_printk(KERN_ERR, "error reporting device not found:"
447 "vendor %x device 0x%x (broken BIOS?)\n",
448 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
449 goto fail0;
450 }
451
452 debugf3("%s(): more mci init\n", __func__);
453 mci->ctl_name = pvt->dev_info->ctl_name;
454 mci->edac_check = e7xxx_check;
455 mci->ctl_page_to_phys = ctl_page_to_phys;
456 e7xxx_init_csrows(mci, pdev, dev_idx, drc);
457 mci->edac_cap |= EDAC_FLAG_NONE;
451 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 458 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
452 /* load the top of low memory, remap base, and remap limit vars */ 459 /* load the top of low memory, remap base, and remap limit vars */
453 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); 460 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
@@ -468,21 +475,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
468 */ 475 */
469 if (edac_mc_add_mc(mci,0)) { 476 if (edac_mc_add_mc(mci,0)) {
470 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 477 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
471 goto fail; 478 goto fail1;
472 } 479 }
473 480
474 /* get this far and it's successful */ 481 /* get this far and it's successful */
475 debugf3("%s(): success\n", __func__); 482 debugf3("%s(): success\n", __func__);
476 return 0; 483 return 0;
477 484
478fail: 485fail1:
479 if (mci != NULL) { 486 pci_dev_put(pvt->bridge_ck);
480 if(pvt != NULL && pvt->bridge_ck) 487
481 pci_dev_put(pvt->bridge_ck); 488fail0:
482 edac_mc_free(mci); 489 edac_mc_free(mci);
483 }
484 490
485 return rc; 491 return -ENODEV;
486} 492}
487 493
488/* returns count (>= 0), or negative on error */ 494/* returns count (>= 0), or negative on error */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index e2c3b8bc097b..d196dcc850a8 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -133,15 +133,50 @@ static void i82860_check(struct mem_ctl_info *mci)
133 i82860_process_error_info(mci, &info, 1); 133 i82860_process_error_info(mci, &info, 1);
134} 134}
135 135
136static int i82860_probe1(struct pci_dev *pdev, int dev_idx) 136static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
137{ 137{
138 int rc = -ENODEV;
139 int index;
140 struct mem_ctl_info *mci = NULL;
141 unsigned long last_cumul_size; 138 unsigned long last_cumul_size;
142 struct i82860_error_info discard; 139 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
140 u16 value;
141 u32 cumul_size;
142 struct csrow_info *csrow;
143 int index;
144
145 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
146 mchcfg_ddim = mchcfg_ddim & 0x180;
147 last_cumul_size = 0;
148
149 /* The group row boundary (GRA) reg values are boundary address
150 * for each DRAM row with a granularity of 16MB. GRA regs are
151 * cumulative; therefore GRA15 will contain the total memory contained
152 * in all eight rows.
153 */
154 for (index = 0; index < mci->nr_csrows; index++) {
155 csrow = &mci->csrows[index];
156 pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
157 cumul_size = (value & I82860_GBA_MASK) <<
158 (I82860_GBA_SHIFT - PAGE_SHIFT);
159 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
160 cumul_size);
161
162 if (cumul_size == last_cumul_size)
163 continue; /* not populated */
143 164
144 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 165 csrow->first_page = last_cumul_size;
166 csrow->last_page = cumul_size - 1;
167 csrow->nr_pages = cumul_size - last_cumul_size;
168 last_cumul_size = cumul_size;
169 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
170 csrow->mtype = MEM_RMBS;
171 csrow->dtype = DEV_UNKNOWN;
172 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
173 }
174}
175
176static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
177{
178 struct mem_ctl_info *mci;
179 struct i82860_error_info discard;
145 180
146 /* RDRAM has channels but these don't map onto the abstractions that 181 /* RDRAM has channels but these don't map onto the abstractions that
147 edac uses. 182 edac uses.
@@ -159,53 +194,15 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
159 debugf3("%s(): init mci\n", __func__); 194 debugf3("%s(): init mci\n", __func__);
160 mci->dev = &pdev->dev; 195 mci->dev = &pdev->dev;
161 mci->mtype_cap = MEM_FLAG_DDR; 196 mci->mtype_cap = MEM_FLAG_DDR;
162
163 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 197 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
164 /* I"m not sure about this but I think that all RDRAM is SECDED */ 198 /* I"m not sure about this but I think that all RDRAM is SECDED */
165 mci->edac_cap = EDAC_FLAG_SECDED; 199 mci->edac_cap = EDAC_FLAG_SECDED;
166 /* adjust FLAGS */
167
168 mci->mod_name = EDAC_MOD_STR; 200 mci->mod_name = EDAC_MOD_STR;
169 mci->mod_ver = I82860_REVISION; 201 mci->mod_ver = I82860_REVISION;
170 mci->ctl_name = i82860_devs[dev_idx].ctl_name; 202 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
171 mci->edac_check = i82860_check; 203 mci->edac_check = i82860_check;
172 mci->ctl_page_to_phys = NULL; 204 mci->ctl_page_to_phys = NULL;
173 205 i82860_init_csrows(mci, pdev);
174 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
175 mchcfg_ddim = mchcfg_ddim & 0x180;
176
177 /*
178 * The group row boundary (GRA) reg values are boundary address
179 * for each DRAM row with a granularity of 16MB. GRA regs are
180 * cumulative; therefore GRA15 will contain the total memory contained
181 * in all eight rows.
182 */
183 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
184 u16 value;
185 u32 cumul_size;
186 struct csrow_info *csrow = &mci->csrows[index];
187
188 pci_read_config_word(pdev, I82860_GBA + index * 2,
189 &value);
190
191 cumul_size = (value & I82860_GBA_MASK) <<
192 (I82860_GBA_SHIFT - PAGE_SHIFT);
193 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
194 cumul_size);
195
196 if (cumul_size == last_cumul_size)
197 continue; /* not populated */
198
199 csrow->first_page = last_cumul_size;
200 csrow->last_page = cumul_size - 1;
201 csrow->nr_pages = cumul_size - last_cumul_size;
202 last_cumul_size = cumul_size;
203 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
204 csrow->mtype = MEM_RMBS;
205 csrow->dtype = DEV_UNKNOWN;
206 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
207 }
208
209 i82860_get_error_info(mci, &discard); /* clear counters */ 206 i82860_get_error_info(mci, &discard); /* clear counters */
210 207
211 /* Here we assume that we will never see multiple instances of this 208 /* Here we assume that we will never see multiple instances of this
@@ -213,14 +210,17 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
213 */ 210 */
214 if (edac_mc_add_mc(mci,0)) { 211 if (edac_mc_add_mc(mci,0)) {
215 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 212 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
216 edac_mc_free(mci); 213 goto fail;
217 } else {
218 /* get this far and it's successful */
219 debugf3("%s(): success\n", __func__);
220 rc = 0;
221 } 214 }
222 215
223 return rc; 216 /* get this far and it's successful */
217 debugf3("%s(): success\n", __func__);
218
219 return 0;
220
221fail:
222 edac_mc_free(mci);
223 return -ENODEV;
224} 224}
225 225
226/* returns count (>= 0), or negative on error */ 226/* returns count (>= 0), or negative on error */
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 2be18ca96408..6787403463a1 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -265,116 +265,109 @@ static void i82875p_check(struct mem_ctl_info *mci)
265extern int pci_proc_attach_device(struct pci_dev *); 265extern int pci_proc_attach_device(struct pci_dev *);
266#endif 266#endif
267 267
268static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) 268/* Return 0 on success or 1 on failure. */
269static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
270 struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window)
269{ 271{
270 int rc = -ENODEV; 272 struct pci_dev *dev;
271 int index; 273 void __iomem *window;
272 struct mem_ctl_info *mci = NULL;
273 struct i82875p_pvt *pvt = NULL;
274 unsigned long last_cumul_size;
275 struct pci_dev *ovrfl_pdev;
276 void __iomem *ovrfl_window = NULL;
277 u32 drc;
278 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
279 u32 nr_chans;
280 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
281 struct i82875p_error_info discard;
282 274
283 debugf0("%s()\n", __func__); 275 *ovrfl_pdev = NULL;
284 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 276 *ovrfl_window = NULL;
277 dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
285 278
286 if (!ovrfl_pdev) { 279 if (dev == NULL) {
287 /* 280 /* Intel tells BIOS developers to hide device 6 which
288 * Intel tells BIOS developers to hide device 6 which
289 * configures the overflow device access containing 281 * configures the overflow device access containing
290 * the DRBs - this is where we expose device 6. 282 * the DRBs - this is where we expose device 6.
291 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm 283 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
292 */ 284 */
293 pci_write_bits8(pdev, 0xf4, 0x2, 0x2); 285 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
294 ovrfl_pdev = 286 dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
295 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
296 287
297 if (!ovrfl_pdev) 288 if (dev == NULL)
298 return -ENODEV; 289 return 1;
299 } 290 }
300 291
292 *ovrfl_pdev = dev;
293
301#ifdef CONFIG_PROC_FS 294#ifdef CONFIG_PROC_FS
302 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { 295 if ((dev->procent == NULL) && pci_proc_attach_device(dev)) {
303 i82875p_printk(KERN_ERR, 296 i82875p_printk(KERN_ERR, "%s(): Failed to attach overflow "
304 "%s(): Failed to attach overflow device\n", __func__); 297 "device\n", __func__);
305 return -ENODEV; 298 return 1;
306 } 299 }
307#endif 300#endif /* CONFIG_PROC_FS */
308 /* CONFIG_PROC_FS */ 301 if (pci_enable_device(dev)) {
309 if (pci_enable_device(ovrfl_pdev)) { 302 i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
310 i82875p_printk(KERN_ERR, 303 "device\n", __func__);
311 "%s(): Failed to enable overflow device\n", __func__); 304 return 1;
312 return -ENODEV;
313 } 305 }
314 306
315 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { 307 if (pci_request_regions(dev, pci_name(dev))) {
316#ifdef CORRECT_BIOS 308#ifdef CORRECT_BIOS
317 goto fail0; 309 goto fail0;
318#endif 310#endif
319 } 311 }
320 312
321 /* cache is irrelevant for PCI bus reads/writes */ 313 /* cache is irrelevant for PCI bus reads/writes */
322 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), 314 window = ioremap_nocache(pci_resource_start(dev, 0),
323 pci_resource_len(ovrfl_pdev, 0)); 315 pci_resource_len(dev, 0));
324 316
325 if (!ovrfl_window) { 317 if (window == NULL) {
326 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", 318 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
327 __func__); 319 __func__);
328 goto fail1; 320 goto fail1;
329 } 321 }
330 322
331 /* need to find out the number of channels */ 323 *ovrfl_window = window;
332 drc = readl(ovrfl_window + I82875P_DRC); 324 return 0;
333 drc_chan = ((drc >> 21) & 0x1);
334 nr_chans = drc_chan + 1;
335 325
336 drc_ddim = (drc >> 18) & 0x1; 326fail1:
337 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), 327 pci_release_regions(dev);
338 nr_chans);
339 328
340 if (!mci) { 329#ifdef CORRECT_BIOS
341 rc = -ENOMEM; 330fail0:
342 goto fail2; 331 pci_disable_device(dev);
343 } 332#endif
333 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
334 return 1;
335}
344 336
345 debugf3("%s(): init mci\n", __func__);
346 mci->dev = &pdev->dev;
347 mci->mtype_cap = MEM_FLAG_DDR;
348 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
349 mci->edac_cap = EDAC_FLAG_UNKNOWN;
350 /* adjust FLAGS */
351 337
352 mci->mod_name = EDAC_MOD_STR; 338/* Return 1 if dual channel mode is active. Else return 0. */
353 mci->mod_ver = I82875P_REVISION; 339static inline int dual_channel_active(u32 drc)
354 mci->ctl_name = i82875p_devs[dev_idx].ctl_name; 340{
355 mci->edac_check = i82875p_check; 341 return (drc >> 21) & 0x1;
356 mci->ctl_page_to_phys = NULL; 342}
357 debugf3("%s(): init pvt\n", __func__);
358 pvt = (struct i82875p_pvt *) mci->pvt_info;
359 pvt->ovrfl_pdev = ovrfl_pdev;
360 pvt->ovrfl_window = ovrfl_window;
361 343
362 /* 344
363 * The dram row boundary (DRB) reg values are boundary address 345static void i82875p_init_csrows(struct mem_ctl_info *mci,
346 struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc)
347{
348 struct csrow_info *csrow;
349 unsigned long last_cumul_size;
350 u8 value;
351 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
352 u32 cumul_size;
353 int index;
354
355 drc_ddim = (drc >> 18) & 0x1;
356 last_cumul_size = 0;
357
358 /* The dram row boundary (DRB) reg values are boundary address
364 * for each DRAM row with a granularity of 32 or 64MB (single/dual 359 * for each DRAM row with a granularity of 32 or 64MB (single/dual
365 * channel operation). DRB regs are cumulative; therefore DRB7 will 360 * channel operation). DRB regs are cumulative; therefore DRB7 will
366 * contain the total memory contained in all eight rows. 361 * contain the total memory contained in all eight rows.
367 */ 362 */
368 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 363
369 u8 value; 364 for (index = 0; index < mci->nr_csrows; index++) {
370 u32 cumul_size; 365 csrow = &mci->csrows[index];
371 struct csrow_info *csrow = &mci->csrows[index];
372 366
373 value = readb(ovrfl_window + I82875P_DRB + index); 367 value = readb(ovrfl_window + I82875P_DRB + index);
374 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 368 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
375 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 369 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
376 cumul_size); 370 cumul_size);
377
378 if (cumul_size == last_cumul_size) 371 if (cumul_size == last_cumul_size)
379 continue; /* not populated */ 372 continue; /* not populated */
380 373
@@ -382,12 +375,54 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
382 csrow->last_page = cumul_size - 1; 375 csrow->last_page = cumul_size - 1;
383 csrow->nr_pages = cumul_size - last_cumul_size; 376 csrow->nr_pages = cumul_size - last_cumul_size;
384 last_cumul_size = cumul_size; 377 last_cumul_size = cumul_size;
385 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 378 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
386 csrow->mtype = MEM_DDR; 379 csrow->mtype = MEM_DDR;
387 csrow->dtype = DEV_UNKNOWN; 380 csrow->dtype = DEV_UNKNOWN;
388 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 381 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
389 } 382 }
383}
384
385static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
386{
387 int rc = -ENODEV;
388 struct mem_ctl_info *mci;
389 struct i82875p_pvt *pvt;
390 struct pci_dev *ovrfl_pdev;
391 void __iomem *ovrfl_window;
392 u32 drc;
393 u32 nr_chans;
394 struct i82875p_error_info discard;
395
396 debugf0("%s()\n", __func__);
397 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
398
399 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
400 return -ENODEV;
401 drc = readl(ovrfl_window + I82875P_DRC);
402 nr_chans = dual_channel_active(drc) + 1;
403 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
404 nr_chans);
405
406 if (!mci) {
407 rc = -ENOMEM;
408 goto fail0;
409 }
390 410
411 debugf3("%s(): init mci\n", __func__);
412 mci->dev = &pdev->dev;
413 mci->mtype_cap = MEM_FLAG_DDR;
414 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
415 mci->edac_cap = EDAC_FLAG_UNKNOWN;
416 mci->mod_name = EDAC_MOD_STR;
417 mci->mod_ver = I82875P_REVISION;
418 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
419 mci->edac_check = i82875p_check;
420 mci->ctl_page_to_phys = NULL;
421 debugf3("%s(): init pvt\n", __func__);
422 pvt = (struct i82875p_pvt *) mci->pvt_info;
423 pvt->ovrfl_pdev = ovrfl_pdev;
424 pvt->ovrfl_window = ovrfl_window;
425 i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
391 i82875p_get_error_info(mci, &discard); /* clear counters */ 426 i82875p_get_error_info(mci, &discard); /* clear counters */
392 427
393 /* Here we assume that we will never see multiple instances of this 428 /* Here we assume that we will never see multiple instances of this
@@ -395,25 +430,20 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
395 */ 430 */
396 if (edac_mc_add_mc(mci,0)) { 431 if (edac_mc_add_mc(mci,0)) {
397 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 432 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
398 goto fail3; 433 goto fail1;
399 } 434 }
400 435
401 /* get this far and it's successful */ 436 /* get this far and it's successful */
402 debugf3("%s(): success\n", __func__); 437 debugf3("%s(): success\n", __func__);
403 return 0; 438 return 0;
404 439
405fail3: 440fail1:
406 edac_mc_free(mci); 441 edac_mc_free(mci);
407 442
408fail2: 443fail0:
409 iounmap(ovrfl_window); 444 iounmap(ovrfl_window);
410
411fail1:
412 pci_release_regions(ovrfl_pdev); 445 pci_release_regions(ovrfl_pdev);
413 446
414#ifdef CORRECT_BIOS
415fail0:
416#endif
417 pci_disable_device(ovrfl_pdev); 447 pci_disable_device(ovrfl_pdev);
418 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 448 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
419 return rc; 449 return rc;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index eb3aa615dc57..fecdb2c9ee28 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -205,25 +205,72 @@ static void r82600_check(struct mem_ctl_info *mci)
205 r82600_process_error_info(mci, &info, 1); 205 r82600_process_error_info(mci, &info, 1);
206} 206}
207 207
208static int r82600_probe1(struct pci_dev *pdev, int dev_idx) 208static inline int ecc_enabled(u8 dramcr)
209{ 209{
210 int rc = -ENODEV; 210 return dramcr & BIT(5);
211}
212
213static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
214 u8 dramcr)
215{
216 struct csrow_info *csrow;
211 int index; 217 int index;
212 struct mem_ctl_info *mci = NULL; 218 u8 drbar; /* SDRAM Row Boundry Address Register */
219 u32 row_high_limit, row_high_limit_last;
220 u32 reg_sdram, ecc_on, row_base;
221
222 ecc_on = ecc_enabled(dramcr);
223 reg_sdram = dramcr & BIT(4);
224 row_high_limit_last = 0;
225
226 for (index = 0; index < mci->nr_csrows; index++) {
227 csrow = &mci->csrows[index];
228
229 /* find the DRAM Chip Select Base address and mask */
230 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
231
232 debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
233
234 row_high_limit = ((u32) drbar << 24);
235/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
236
237 debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n",
238 __func__, index, row_high_limit, row_high_limit_last);
239
240 /* Empty row [p.57] */
241 if (row_high_limit == row_high_limit_last)
242 continue;
243
244 row_base = row_high_limit_last;
245
246 csrow->first_page = row_base >> PAGE_SHIFT;
247 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
248 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
249 /* Error address is top 19 bits - so granularity is *
250 * 14 bits */
251 csrow->grain = 1 << 14;
252 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
253 /* FIXME - check that this is unknowable with this chipset */
254 csrow->dtype = DEV_UNKNOWN;
255
256 /* Mode is global on 82600 */
257 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
258 row_high_limit_last = row_high_limit;
259 }
260}
261
262static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
263{
264 struct mem_ctl_info *mci;
213 u8 dramcr; 265 u8 dramcr;
214 u32 ecc_on;
215 u32 reg_sdram;
216 u32 eapr; 266 u32 eapr;
217 u32 scrub_disabled; 267 u32 scrub_disabled;
218 u32 sdram_refresh_rate; 268 u32 sdram_refresh_rate;
219 u32 row_high_limit_last = 0;
220 struct r82600_error_info discard; 269 struct r82600_error_info discard;
221 270
222 debugf0("%s()\n", __func__); 271 debugf0("%s()\n", __func__);
223 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); 272 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
224 pci_read_config_dword(pdev, R82600_EAP, &eapr); 273 pci_read_config_dword(pdev, R82600_EAP, &eapr);
225 ecc_on = dramcr & BIT(5);
226 reg_sdram = dramcr & BIT(4);
227 scrub_disabled = eapr & BIT(31); 274 scrub_disabled = eapr & BIT(31);
228 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); 275 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
229 debugf2("%s(): sdram refresh rate = %#0x\n", __func__, 276 debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
@@ -231,10 +278,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
231 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); 278 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
232 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); 279 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
233 280
234 if (mci == NULL) { 281 if (mci == NULL)
235 rc = -ENOMEM; 282 return -ENOMEM;
236 goto fail;
237 }
238 283
239 debugf0("%s(): mci = %p\n", __func__, mci); 284 debugf0("%s(): mci = %p\n", __func__, mci);
240 mci->dev = &pdev->dev; 285 mci->dev = &pdev->dev;
@@ -250,7 +295,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
250 * is possible. */ 295 * is possible. */
251 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 296 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
252 297
253 if (ecc_on) { 298 if (ecc_enabled(dramcr)) {
254 if (scrub_disabled) 299 if (scrub_disabled)
255 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " 300 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
256 "%#0x\n", __func__, mci, eapr); 301 "%#0x\n", __func__, mci, eapr);
@@ -262,46 +307,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
262 mci->ctl_name = "R82600"; 307 mci->ctl_name = "R82600";
263 mci->edac_check = r82600_check; 308 mci->edac_check = r82600_check;
264 mci->ctl_page_to_phys = NULL; 309 mci->ctl_page_to_phys = NULL;
265 310 r82600_init_csrows(mci, pdev, dramcr);
266 for (index = 0; index < mci->nr_csrows; index++) {
267 struct csrow_info *csrow = &mci->csrows[index];
268 u8 drbar; /* sDram Row Boundry Address Register */
269 u32 row_high_limit;
270 u32 row_base;
271
272 /* find the DRAM Chip Select Base address and mask */
273 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
274
275 debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx,
276 __func__, index, drbar);
277
278 row_high_limit = ((u32) drbar << 24);
279/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
280
281 debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = "
282 "%#0x \n", mci->mc_idx, __func__, index,
283 row_high_limit, row_high_limit_last);
284
285 /* Empty row [p.57] */
286 if (row_high_limit == row_high_limit_last)
287 continue;
288
289 row_base = row_high_limit_last;
290 csrow->first_page = row_base >> PAGE_SHIFT;
291 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
292 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
293 /* Error address is top 19 bits - so granularity is *
294 * 14 bits */
295 csrow->grain = 1 << 14;
296 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
297 /* FIXME - check that this is unknowable with this chipset */
298 csrow->dtype = DEV_UNKNOWN;
299
300 /* Mode is global on 82600 */
301 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
302 row_high_limit_last = row_high_limit;
303 }
304
305 r82600_get_error_info(mci, &discard); /* clear counters */ 311 r82600_get_error_info(mci, &discard); /* clear counters */
306 312
307 /* Here we assume that we will never see multiple instances of this 313 /* Here we assume that we will never see multiple instances of this
@@ -324,10 +330,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
324 return 0; 330 return 0;
325 331
326fail: 332fail:
327 if (mci) 333 edac_mc_free(mci);
328 edac_mc_free(mci); 334 return -ENODEV;
329
330 return rc;
331} 335}
332 336
333/* returns count (>= 0), or negative on error */ 337/* returns count (>= 0), or negative on error */
an> list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { list_del(&un->list_id); spin_lock(&un->ulp->lock); un->semid = -1; list_del_rcu(&un->list_proc); spin_unlock(&un->ulp->lock); kfree_rcu(un, rcu); } /* Wake up all pending processes and let them fail with EIDRM. */ INIT_LIST_HEAD(&tasks); list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { unlink_queue(sma, q); wake_up_sem_queue_prepare(&tasks, q, -EIDRM); } /* Remove the semaphore set from the IDR */ sem_rmid(ns, sma); sem_unlock(sma); wake_up_sem_queue_do(&tasks); ns->used_sems -= sma->sem_nsems; security_sem_free(sma); ipc_rcu_putref(sma); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct semid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); out.sem_otime = in->sem_otime; out.sem_ctime = in->sem_ctime; out.sem_nsems = in->sem_nsems; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static int semctl_nolock(struct ipc_namespace *ns, int semid, int cmd, int version, union semun arg) { int err; struct sem_array *sma; switch(cmd) { case IPC_INFO: case SEM_INFO: { struct seminfo seminfo; int max_id; err = security_sem_semctl(NULL, cmd); if (err) return err; memset(&seminfo,0,sizeof(seminfo)); seminfo.semmni = ns->sc_semmni; seminfo.semmns = ns->sc_semmns; seminfo.semmsl = ns->sc_semmsl; seminfo.semopm = ns->sc_semopm; seminfo.semvmx = SEMVMX; seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; down_read(&sem_ids(ns).rw_mutex); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids(ns).in_use; seminfo.semaem = ns->used_sems; } else { seminfo.semusz = SEMUSZ; seminfo.semaem = SEMAEM; } max_id = ipc_get_maxid(&sem_ids(ns)); up_read(&sem_ids(ns).rw_mutex); if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; } case IPC_STAT: case SEM_STAT: { struct semid64_ds tbuf; int id; if (cmd == SEM_STAT) { sma = sem_lock(ns, semid); if (IS_ERR(sma)) return PTR_ERR(sma); id = sma->sem_perm.id; } else { sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) return PTR_ERR(sma); id = 0; } err = -EACCES; if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) goto out_unlock; err = security_sem_semctl(sma, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); tbuf.sem_otime = sma->sem_otime; tbuf.sem_ctime = sma->sem_ctime; tbuf.sem_nsems = sma->sem_nsems; sem_unlock(sma); if (copy_semid_to_user (arg.buf, &tbuf, version)) return -EFAULT; return id; } default: return -EINVAL; } out_unlock: sem_unlock(sma); return err; } static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int cmd, int version, union semun arg) { struct sem_array *sma; struct sem* curr; int err; ushort fast_sem_io[SEMMSL_FAST]; ushort* sem_io = fast_sem_io; int nsems; struct list_head tasks; sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) return PTR_ERR(sma); INIT_LIST_HEAD(&tasks); nsems = sma->sem_nsems; err = -EACCES; if (ipcperms(ns, &sma->sem_perm, (cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO)) goto out_unlock; err = security_sem_semctl(sma, cmd); if (err) goto out_unlock; err = -EACCES; switch (cmd) { case GETALL: { ushort __user *array = arg.array; int i; if(nsems > SEMMSL_FAST) { sem_getref_and_unlock(sma); sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) { sem_putref(sma); return -ENOMEM; } sem_lock_and_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); err = -EIDRM; goto out_free; } } for (i = 0; i < sma->sem_nsems; i++) sem_io[i] = sma->sem_base[i].semval; sem_unlock(sma); err = 0; if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } case SETALL: { int i; struct sem_undo *un; sem_getref_and_unlock(sma); if(nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) { sem_putref(sma); return -ENOMEM; } } if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { sem_putref(sma); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { sem_putref(sma); err = -ERANGE; goto out_free; } } sem_lock_and_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); err = -EIDRM; goto out_free; } for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; assert_spin_locked(&sma->sem_perm.lock); list_for_each_entry(un, &sma->list_id, list_id) { for (i = 0; i < nsems; i++) un->semadj[i] = 0; } sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ do_smart_update(sma, NULL, 0, 0, &tasks); err = 0; goto out_unlock; } /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ } err = -EINVAL; if(semnum < 0 || semnum >= nsems) goto out_unlock; curr = &sma->sem_base[semnum]; switch (cmd) { case GETVAL: err = curr->semval; goto out_unlock; case GETPID: err = curr->sempid; goto out_unlock; case GETNCNT: err = count_semncnt(sma,semnum); goto out_unlock; case GETZCNT: err = count_semzcnt(sma,semnum); goto out_unlock; case SETVAL: { int val = arg.val; struct sem_undo *un; err = -ERANGE; if (val > SEMVMX || val < 0) goto out_unlock; assert_spin_locked(&sma->sem_perm.lock); list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0; curr->semval = val; curr->sempid = task_tgid_vnr(current); sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ do_smart_update(sma, NULL, 0, 0, &tasks); err = 0; goto out_unlock; } } out_unlock: sem_unlock(sma); wake_up_sem_queue_do(&tasks); out_free: if(sem_io != fast_sem_io) ipc_free(sem_io, sizeof(ushort)*nsems); return err; } static inline unsigned long copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct semid_ds tbuf_old; if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->sem_perm.uid = tbuf_old.sem_perm.uid; out->sem_perm.gid = tbuf_old.sem_perm.gid; out->sem_perm.mode = tbuf_old.sem_perm.mode; return 0; } default: return -EINVAL; } } /* * This function handles some semctl commands which require the rw_mutex * to be held in write mode. * NOTE: no locks must be held, the rw_mutex is taken inside this function. */ static int semctl_down(struct ipc_namespace *ns, int semid, int cmd, int version, union semun arg) { struct sem_array *sma; int err; struct semid64_ds semid64; struct kern_ipc_perm *ipcp; if(cmd == IPC_SET) { if (copy_semid_from_user(&semid64, arg.buf, version)) return -EFAULT; } ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd, &semid64.sem_perm, 0); if (IS_ERR(ipcp)) return PTR_ERR(ipcp); sma = container_of(ipcp, struct sem_array, sem_perm); err = security_sem_semctl(sma, cmd); if (err) goto out_unlock; switch(cmd){ case IPC_RMID: freeary(ns, ipcp); goto out_up; case IPC_SET: err = ipc_update_perm(&semid64.sem_perm, ipcp); if (err) goto out_unlock; sma->sem_ctime = get_seconds(); break; default: err = -EINVAL; } out_unlock: sem_unlock(sma); out_up: up_write(&sem_ids(ns).rw_mutex); return err; } SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg) { int err = -EINVAL; int version; struct ipc_namespace *ns; if (semid < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch(cmd) { case IPC_INFO: case SEM_INFO: case IPC_STAT: case SEM_STAT: err = semctl_nolock(ns, semid, cmd, version, arg); return err; case GETALL: case GETVAL: case GETPID: case GETNCNT: case GETZCNT: case SETVAL: case SETALL: err = semctl_main(ns,semid,semnum,cmd,version,arg); return err; case IPC_RMID: case IPC_SET: err = semctl_down(ns, semid, cmd, version, arg); return err; default: return -EINVAL; } } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg) { return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg); } SYSCALL_ALIAS(sys_semctl, SyS_semctl); #endif /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE * * If this allocation and assignment succeeds, but later * portions of this code fail, there is no need to free the sem_undo_list. * Just let it stay associated with the task, and it'll be freed later * at exit time. * * This can block, so callers must hold no locks. */ static inline int get_undo_list(struct sem_undo_list **undo_listp) { struct sem_undo_list *undo_list; undo_list = current->sysvsem.undo_list; if (!undo_list) { undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); if (undo_list == NULL) return -ENOMEM; spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); INIT_LIST_HEAD(&undo_list->list_proc); current->sysvsem.undo_list = undo_list; } *undo_listp = undo_list; return 0; } static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { if (un->semid == semid) return un; } return NULL; } static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; assert_spin_locked(&ulp->lock); un = __lookup_undo(ulp, semid); if (un) { list_del_rcu(&un->list_proc); list_add_rcu(&un->list_proc, &ulp->list_proc); } return un; } /** * find_alloc_undo - Lookup (and if not present create) undo array * @ns: namespace * @semid: semaphore array id * * The function looks up (and if not present creates) the undo structure. * The size of the undo structure depends on the size of the semaphore * array, thus the alloc path is not that straightforward. * Lifetime-rules: sem_undo is rcu-protected, on success, the function * performs a rcu_read_lock(). */ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) { struct sem_array *sma; struct sem_undo_list *ulp; struct sem_undo *un, *new; int nsems; int error; error = get_undo_list(&ulp); if (error) return ERR_PTR(error); rcu_read_lock(); spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); spin_unlock(&ulp->lock); if (likely(un!=NULL)) goto out; rcu_read_unlock(); /* no undo structure around - allocate one. */ /* step 1: figure out the size of the semaphore array */ sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) return ERR_CAST(sma); nsems = sma->sem_nsems; sem_getref_and_unlock(sma); /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { sem_putref(sma); return ERR_PTR(-ENOMEM); } /* step 3: Acquire the lock on semaphore array */ sem_lock_and_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); kfree(new); un = ERR_PTR(-EIDRM); goto out; } spin_lock(&ulp->lock); /* * step 4: check for races: did someone else allocate the undo struct? */ un = lookup_undo(ulp, semid); if (un) { kfree(new); goto success; } /* step 5: initialize & link new undo structure */ new->semadj = (short *) &new[1]; new->ulp = ulp; new->semid = semid; assert_spin_locked(&ulp->lock); list_add_rcu(&new->list_proc, &ulp->list_proc); assert_spin_locked(&sma->sem_perm.lock); list_add(&new->list_id, &sma->list_id); un = new; success: spin_unlock(&ulp->lock); rcu_read_lock(); sem_unlock(sma); out: return un; } /** * get_queue_result - Retrieve the result code from sem_queue * @q: Pointer to queue structure * * Retrieve the return code from the pending queue. If IN_WAKEUP is found in * q->status, then we must loop until the value is replaced with the final * value: This may happen if a task is woken up by an unrelated event (e.g. * signal) and in parallel the task is woken up by another task because it got * the requested semaphores. * * The function can be called with or without holding the semaphore spinlock. */ static int get_queue_result(struct sem_queue *q) { int error; error = q->status; while (unlikely(error == IN_WAKEUP)) { cpu_relax(); error = q->status; } return error; } SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, unsigned, nsops, const struct timespec __user *, timeout) { int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; struct sembuf* sops = fast_sops, *sop; struct sem_undo *un; int undos = 0, alter = 0, max; struct sem_queue queue; unsigned long jiffies_left = 0; struct ipc_namespace *ns; struct list_head tasks; ns = current->nsproxy->ipc_ns; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > ns->sc_semopm) return -E2BIG; if(nsops > SEMOPM_FAST) { sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); if(sops==NULL) return -ENOMEM; } if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { error=-EFAULT; goto out_free; } if (timeout) { struct timespec _timeout; if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { error = -EFAULT; goto out_free; } if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || _timeout.tv_nsec >= 1000000000L) { error = -EINVAL; goto out_free; } jiffies_left = timespec_to_jiffies(&_timeout); } max = 0; for (sop = sops; sop < sops + nsops; sop++) { if (sop->sem_num >= max) max = sop->sem_num; if (sop->sem_flg & SEM_UNDO) undos = 1; if (sop->sem_op != 0) alter = 1; } if (undos) { un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); goto out_free; } } else un = NULL; INIT_LIST_HEAD(&tasks); sma = sem_lock_check(ns, semid); if (IS_ERR(sma)) { if (un) rcu_read_unlock(); error = PTR_ERR(sma); goto out_free; } /* * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID * and now a new array with received the same id. Check and fail. * This case can be detected checking un->semid. The existence of * "un" itself is guaranteed by rcu. */ error = -EIDRM; if (un) { if (un->semid == -1) { rcu_read_unlock(); goto out_unlock_free; } else { /* * rcu lock can be released, "un" cannot disappear: * - sem_lock is acquired, thus IPC_RMID is * impossible. * - exit_sem is impossible, it always operates on * current (or a dead task). */ rcu_read_unlock(); } } error = -EFBIG; if (max >= sma->sem_nsems) goto out_unlock_free; error = -EACCES; if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out_unlock_free; error = security_sem_semop(sma, sops, nsops, alter); if (error) goto out_unlock_free; error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); if (error <= 0) { if (alter && error == 0) do_smart_update(sma, sops, nsops, 1, &tasks); goto out_unlock_free; } /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = task_tgid_vnr(current); queue.alter = alter; if (alter) list_add_tail(&queue.list, &sma->sem_pending); else list_add(&queue.list, &sma->sem_pending); if (nsops == 1) { struct sem *curr; curr = &sma->sem_base[sops->sem_num]; if (alter) list_add_tail(&queue.simple_list, &curr->sem_pending); else list_add(&queue.simple_list, &curr->sem_pending); } else { INIT_LIST_HEAD(&queue.simple_list); sma->complex_count++; } queue.status = -EINTR; queue.sleeper = current; sleep_again: current->state = TASK_INTERRUPTIBLE; sem_unlock(sma); if (timeout) jiffies_left = schedule_timeout(jiffies_left); else schedule(); error = get_queue_result(&queue); if (error != -EINTR) { /* fast path: update_queue already obtained all requested * resources. * Perform a smp_mb(): User space could assume that semop() * is a memory barrier: Without the mb(), the cpu could * speculatively read in user space stale data that was * overwritten by the previous owner of the semaphore. */ smp_mb(); goto out_free; } sma = sem_lock(ns, semid); /* * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. */ error = get_queue_result(&queue); /* * Array removed? If yes, leave without sem_unlock(). */ if (IS_ERR(sma)) { goto out_free; } /* * If queue.status != -EINTR we are woken up by another process. * Leave without unlink_queue(), but with sem_unlock(). */ if (error != -EINTR) { goto out_unlock_free; } /* * If an interrupt occurred we have to clean up the queue */ if (timeout && jiffies_left == 0) error = -EAGAIN; /* * If the wakeup was spurious, just retry */ if (error == -EINTR && !signal_pending(current)) goto sleep_again; unlink_queue(sma, &queue); out_unlock_free: sem_unlock(sma); wake_up_sem_queue_do(&tasks); out_free: if(sops != fast_sops) kfree(sops); return error; } SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, unsigned, nsops) { return sys_semtimedop(semid, tsops, nsops, NULL); } /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. */ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) { struct sem_undo_list *undo_list; int error; if (clone_flags & CLONE_SYSVSEM) { error = get_undo_list(&undo_list); if (error) return error; atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; } else tsk->sysvsem.undo_list = NULL; return 0; } /* * add semadj values to semaphores, free undo structures. * undo structures are not freed when semaphore arrays are destroyed * so some of them may be out of date. * IMPLEMENTATION NOTE: There is some confusion over whether the * set of adjustments that needs to be done should be done in an atomic * manner or not. That is, if we are attempting to decrement the semval * should we queue up and wait until we can do so legally? * The original implementation attempted to do this (queue and wait). * The current implementation does not do so. The POSIX standard * and SVID should be consulted to determine what behavior is mandated. */ void exit_sem(struct task_struct *tsk) { struct sem_undo_list *ulp; ulp = tsk->sysvsem.undo_list; if (!ulp) return; tsk->sysvsem.undo_list = NULL; if (!atomic_dec_and_test(&ulp->refcnt)) return; for (;;) { struct sem_array *sma; struct sem_undo *un; struct list_head tasks; int semid; int i; rcu_read_lock(); un = list_entry_rcu(ulp->list_proc.next, struct sem_undo, list_proc); if (&un->list_proc == &ulp->list_proc) semid = -1; else semid = un->semid; rcu_read_unlock(); if (semid == -1) break; sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid); /* exit_sem raced with IPC_RMID, nothing to do */ if (IS_ERR(sma)) continue; un = __lookup_undo(ulp, semid); if (un == NULL) { /* exit_sem raced with IPC_RMID+semget() that created * exactly the same semid. Nothing to do. */ sem_unlock(sma); continue; } /* remove un from the linked lists */ assert_spin_locked(&sma->sem_perm.lock); list_del(&un->list_id); spin_lock(&ulp->lock); list_del_rcu(&un->list_proc); spin_unlock(&ulp->lock); /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { struct sem * semaphore = &sma->sem_base[i]; if (un->semadj[i]) { semaphore->semval += un->semadj[i]; /* * Range checks of the new semaphore value, * not defined by sus: * - Some unices ignore the undo entirely * (e.g. HP UX 11i 11.22, Tru64 V5.1) * - some cap the value (e.g. FreeBSD caps * at 0, but doesn't enforce SEMVMX) * * Linux caps the semaphore value, both at 0 * and at SEMVMX. * * Manfred <manfred@colorfullife.com> */ if (semaphore->semval < 0) semaphore->semval = 0; if (semaphore->semval > SEMVMX) semaphore->semval = SEMVMX; semaphore->sempid = task_tgid_vnr(current); } } /* maybe some queued-up processes were waiting for this */ INIT_LIST_HEAD(&tasks); do_smart_update(sma, NULL, 0, 1, &tasks); sem_unlock(sma); wake_up_sem_queue_do(&tasks); kfree_rcu(un, rcu); } kfree(ulp); } #ifdef CONFIG_PROC_FS static int sysvipc_sem_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct sem_array *sma = it; return seq_printf(s, "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", sma->sem_perm.key, sma->sem_perm.id, sma->sem_perm.mode, sma->sem_nsems, from_kuid_munged(user_ns, sma->sem_perm.uid), from_kgid_munged(user_ns, sma->sem_perm.gid), from_kuid_munged(user_ns, sma->sem_perm.cuid), from_kgid_munged(user_ns, sma->sem_perm.cgid), sma->sem_otime, sma->sem_ctime); } #endif