aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/edac/amd64_edac.c293
-rw-r--r--drivers/edac/amd64_edac.h81
2 files changed, 136 insertions, 238 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 62b4ae9eb337..306850879f20 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -265,37 +265,6 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
265 return retval; 265 return retval;
266} 266}
267 267
268/* Map from a CSROW entry to the mask entry that operates on it */
269static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
270{
271 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
272 return csrow;
273 else
274 return csrow >> 1;
275}
276
277/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
278static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
279{
280 if (dct == 0)
281 return pvt->dcsb0[csrow];
282 else
283 return pvt->dcsb1[csrow];
284}
285
286/*
287 * Return the 'mask' address the i'th CS entry. This function is needed because
288 * there number of DCSM registers on Rev E and prior vs Rev F and later is
289 * different.
290 */
291static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
292{
293 if (dct == 0)
294 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
295 else
296 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
297}
298
299/* 268/*
300 * returns true if the SysAddr given by sys_addr matches the 269 * returns true if the SysAddr given by sys_addr matches the
301 * DRAM base/limit associated with node_id 270 * DRAM base/limit associated with node_id
@@ -386,37 +355,47 @@ err_no_match:
386} 355}
387 356
388/* 357/*
389 * Extract the DRAM CS base address from selected csrow register. 358 * compute the CS base address of the @csrow on the DRAM controller @dct.
359 * For details see F2x[5C:40] in the processor's BKDG
390 */ 360 */
391static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) 361static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
362 u64 *base, u64 *mask)
392{ 363{
393 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << 364 u64 csbase, csmask, base_bits, mask_bits;
394 pvt->dcs_shift; 365 u8 addr_shift;
395}
396 366
397/* 367 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
398 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. 368 csbase = pvt->csels[dct].csbases[csrow];
399 */ 369 csmask = pvt->csels[dct].csmasks[csrow];
400static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) 370 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
401{ 371 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
402 u64 dcsm_bits, other_bits; 372 addr_shift = 4;
403 u64 mask; 373 } else {
404 374 csbase = pvt->csels[dct].csbases[csrow];
405 /* Extract bits from DRAM CS Mask. */ 375 csmask = pvt->csels[dct].csmasks[csrow >> 1];
406 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; 376 addr_shift = 8;
407 377
408 other_bits = pvt->dcsm_mask; 378 if (boot_cpu_data.x86 == 0x15)
409 other_bits = ~(other_bits << pvt->dcs_shift); 379 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
380 else
381 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
382 }
410 383
411 /* 384 *base = (csbase & base_bits) << addr_shift;
412 * The extracted bits from DCSM belong in the spaces represented by
413 * the cleared bits in other_bits.
414 */
415 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
416 385
417 return mask; 386 *mask = ~0ULL;
387 /* poke holes for the csmask */
388 *mask &= ~(mask_bits << addr_shift);
389 /* OR them in */
390 *mask |= (csmask & mask_bits) << addr_shift;
418} 391}
419 392
393#define for_each_chip_select(i, dct, pvt) \
394 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
395
396#define for_each_chip_select_mask(i, dct, pvt) \
397 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
398
420/* 399/*
421 * @input_addr is an InputAddr associated with the node given by mci. Return the 400 * @input_addr is an InputAddr associated with the node given by mci. Return the
422 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). 401 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -429,19 +408,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
429 408
430 pvt = mci->pvt_info; 409 pvt = mci->pvt_info;
431 410
432 /* 411 for_each_chip_select(csrow, 0, pvt) {
433 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS 412 if (!csrow_enabled(csrow, 0, pvt))
434 * base/mask register pair, test the condition shown near the start of
435 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
436 */
437 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
438
439 /* This DRAM chip select is disabled on this node */
440 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
441 continue; 413 continue;
442 414
443 base = base_from_dct_base(pvt, csrow); 415 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
444 mask = ~mask_from_dct_mask(pvt, csrow); 416
417 mask = ~mask;
445 418
446 if ((input_addr & mask) == (base & mask)) { 419 if ((input_addr & mask) == (base & mask)) {
447 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", 420 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
@@ -451,7 +424,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
451 return csrow; 424 return csrow;
452 } 425 }
453 } 426 }
454
455 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", 427 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
456 (unsigned long)input_addr, pvt->mc_node_id); 428 (unsigned long)input_addr, pvt->mc_node_id);
457 429
@@ -779,13 +751,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
779 u64 base, mask; 751 u64 base, mask;
780 752
781 pvt = mci->pvt_info; 753 pvt = mci->pvt_info;
782 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); 754 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
783 755
784 base = base_from_dct_base(pvt, csrow); 756 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
785 mask = mask_from_dct_mask(pvt, csrow);
786 757
787 *input_addr_min = base & ~mask; 758 *input_addr_min = base & ~mask;
788 *input_addr_max = base | mask | pvt->dcs_mask_notused; 759 *input_addr_max = base | mask;
789} 760}
790 761
791/* Map the Error address to a PAGE and PAGE OFFSET. */ 762/* Map the Error address to a PAGE and PAGE OFFSET. */
@@ -913,93 +884,62 @@ static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
913} 884}
914 885
915/* 886/*
916 * NOTE: CPU Revision Dependent code: Rev E and Rev F 887 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
917 *
918 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
919 * set the shift factor for the DCSB and DCSM values.
920 *
921 * ->dcs_mask_notused, RevE:
922 *
923 * To find the max InputAddr for the csrow, start with the base address and set
924 * all bits that are "don't care" bits in the test at the start of section
925 * 3.5.4 (p. 84).
926 *
927 * The "don't care" bits are all set bits in the mask and all bits in the gaps
928 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
929 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
930 * gaps.
931 *
932 * ->dcs_mask_notused, RevF and later:
933 *
934 * To find the max InputAddr for the csrow, start with the base address and set
935 * all bits that are "don't care" bits in the test at the start of NPT section
936 * 4.5.4 (p. 87).
937 *
938 * The "don't care" bits are all set bits in the mask and all bits in the gaps
939 * between bit ranges [36:27] and [21:13].
940 *
941 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
942 * which are all bits in the above-mentioned gaps.
943 */ 888 */
944static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 889static void prep_chip_selects(struct amd64_pvt *pvt)
945{ 890{
946
947 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { 891 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
948 pvt->dcsb_base = REV_E_DCSB_BASE_BITS; 892 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
949 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; 893 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
950 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
951 pvt->dcs_shift = REV_E_DCS_SHIFT;
952 pvt->cs_count = 8;
953 pvt->num_dcsm = 8;
954 } else { 894 } else {
955 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 895 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
956 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 896 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
957 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
958 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
959 pvt->cs_count = 8;
960 pvt->num_dcsm = 4;
961 } 897 }
962} 898}
963 899
964/* 900/*
965 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers 901 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
966 */ 902 */
967static void read_dct_base_mask(struct amd64_pvt *pvt) 903static void read_dct_base_mask(struct amd64_pvt *pvt)
968{ 904{
969 int cs, reg; 905 int cs;
970 906
971 amd64_set_dct_base_and_mask(pvt); 907 prep_chip_selects(pvt);
972 908
973 for (cs = 0; cs < pvt->cs_count; cs++) { 909 for_each_chip_select(cs, 0, pvt) {
974 reg = K8_DCSB0 + (cs * 4); 910 u32 reg0 = DCSB0 + (cs * 4);
911 u32 reg1 = DCSB1 + (cs * 4);
912 u32 *base0 = &pvt->csels[0].csbases[cs];
913 u32 *base1 = &pvt->csels[1].csbases[cs];
975 914
976 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb0[cs])) 915 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
977 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 916 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
978 cs, pvt->dcsb0[cs], reg); 917 cs, *base0, reg0);
979 918
980 if (!dct_ganging_enabled(pvt)) { 919 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
981 reg = F10_DCSB1 + (cs * 4); 920 continue;
982 921
983 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb1[cs])) 922 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
984 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 923 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
985 cs, pvt->dcsb1[cs], reg); 924 cs, *base1, reg1);
986 }
987 } 925 }
988 926
989 for (cs = 0; cs < pvt->num_dcsm; cs++) { 927 for_each_chip_select_mask(cs, 0, pvt) {
990 reg = K8_DCSM0 + (cs * 4); 928 u32 reg0 = DCSM0 + (cs * 4);
929 u32 reg1 = DCSM1 + (cs * 4);
930 u32 *mask0 = &pvt->csels[0].csmasks[cs];
931 u32 *mask1 = &pvt->csels[1].csmasks[cs];
991 932
992 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm0[cs])) 933 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
993 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 934 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
994 cs, pvt->dcsm0[cs], reg); 935 cs, *mask0, reg0);
995 936
996 if (!dct_ganging_enabled(pvt)) { 937 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
997 reg = F10_DCSM1 + (cs * 4); 938 continue;
998 939
999 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm1[cs])) 940 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
1000 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 941 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
1001 cs, pvt->dcsm1[cs], reg); 942 cs, *mask1, reg1);
1002 }
1003 } 943 }
1004} 944}
1005 945
@@ -1261,10 +1201,11 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1261 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory 1201 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1262 * Interleaving Modes. 1202 * Interleaving Modes.
1263 */ 1203 */
1264static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1204static u8 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1265 int hi_range_sel, u32 intlv_en) 1205 int hi_range_sel, u32 intlv_en)
1266{ 1206{
1267 u32 cs, temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1; 1207 u32 temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
1208 u8 cs;
1268 1209
1269 if (dct_ganging_enabled(pvt)) 1210 if (dct_ganging_enabled(pvt))
1270 cs = 0; 1211 cs = 0;
@@ -1345,14 +1286,13 @@ static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1345 * checks if the csrow passed in is marked as SPARED, if so returns the new 1286 * checks if the csrow passed in is marked as SPARED, if so returns the new
1346 * spare row 1287 * spare row
1347 */ 1288 */
1348static inline int f10_process_possible_spare(int csrow, 1289static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1349 u32 cs, struct amd64_pvt *pvt)
1350{ 1290{
1351 u32 swap_done; 1291 u32 swap_done;
1352 u32 bad_dram_cs; 1292 u32 bad_dram_cs;
1353 1293
1354 /* Depending on channel, isolate respective SPARING info */ 1294 /* Depending on channel, isolate respective SPARING info */
1355 if (cs) { 1295 if (dct) {
1356 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); 1296 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1357 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); 1297 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1358 if (swap_done && (csrow == bad_dram_cs)) 1298 if (swap_done && (csrow == bad_dram_cs))
@@ -1374,11 +1314,11 @@ static inline int f10_process_possible_spare(int csrow,
1374 * -EINVAL: NOT FOUND 1314 * -EINVAL: NOT FOUND
1375 * 0..csrow = Chip-Select Row 1315 * 0..csrow = Chip-Select Row
1376 */ 1316 */
1377static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) 1317static int f10_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1378{ 1318{
1379 struct mem_ctl_info *mci; 1319 struct mem_ctl_info *mci;
1380 struct amd64_pvt *pvt; 1320 struct amd64_pvt *pvt;
1381 u32 cs_base, cs_mask; 1321 u64 cs_base, cs_mask;
1382 int cs_found = -EINVAL; 1322 int cs_found = -EINVAL;
1383 int csrow; 1323 int csrow;
1384 1324
@@ -1388,39 +1328,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1388 1328
1389 pvt = mci->pvt_info; 1329 pvt = mci->pvt_info;
1390 1330
1391 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 1331 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1392
1393 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1394 1332
1395 cs_base = amd64_get_dct_base(pvt, cs, csrow); 1333 for_each_chip_select(csrow, dct, pvt) {
1396 if (!(cs_base & K8_DCSB_CS_ENABLE)) 1334 if (!csrow_enabled(csrow, dct, pvt))
1397 continue; 1335 continue;
1398 1336
1399 /* 1337 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1400 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1401 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1402 * of the actual address.
1403 */
1404 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1405
1406 /*
1407 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1408 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1409 */
1410 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1411 1338
1412 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", 1339 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1413 csrow, cs_base, cs_mask); 1340 csrow, cs_base, cs_mask);
1414 1341
1415 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; 1342 cs_mask = ~cs_mask;
1416 1343
1417 debugf1(" Final CSMask=0x%x\n", cs_mask); 1344 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1418 debugf1(" (InputAddr & ~CSMask)=0x%x " 1345 "(CSBase & ~CSMask)=0x%llx\n",
1419 "(CSBase & ~CSMask)=0x%x\n", 1346 (in_addr & cs_mask), (cs_base & cs_mask));
1420 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1421 1347
1422 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { 1348 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1423 cs_found = f10_process_possible_spare(csrow, cs, pvt); 1349 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1424 1350
1425 debugf1(" MATCH csrow=%d\n", cs_found); 1351 debugf1(" MATCH csrow=%d\n", cs_found);
1426 break; 1352 break;
@@ -1434,10 +1360,11 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
1434 u64 sys_addr, int *nid, int *chan_sel) 1360 u64 sys_addr, int *nid, int *chan_sel)
1435{ 1361{
1436 int cs_found = -EINVAL, high_range = 0; 1362 int cs_found = -EINVAL, high_range = 0;
1437 u32 intlv_shift;
1438 u64 hole_off;
1439 u32 hole_valid, tmp, dct_sel_base, channel;
1440 u64 chan_addr, dct_sel_base_off; 1363 u64 chan_addr, dct_sel_base_off;
1364 u64 hole_off;
1365 u32 hole_valid, tmp, dct_sel_base;
1366 u32 intlv_shift;
1367 u8 channel;
1441 1368
1442 u8 node_id = dram_dst_node(pvt, range); 1369 u8 node_id = dram_dst_node(pvt, range);
1443 u32 intlv_en = dram_intlv_en(pvt, range); 1370 u32 intlv_en = dram_intlv_en(pvt, range);
@@ -1499,10 +1426,9 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
1499 } 1426 }
1500 } 1427 }
1501 1428
1502 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", 1429 debugf1(" (ChannelAddrLong=0x%llx)\n", chan_addr);
1503 chan_addr, (u32)(chan_addr >> 8));
1504 1430
1505 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); 1431 cs_found = f10_lookup_addr_in_dct(chan_addr, node_id, channel);
1506 1432
1507 if (cs_found >= 0) { 1433 if (cs_found >= 0) {
1508 *nid = node_id; 1434 *nid = node_id;
@@ -1603,7 +1529,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1603 } 1529 }
1604 1530
1605 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; 1531 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1606 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0; 1532 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1533 : pvt->csels[0].csbases;
1607 1534
1608 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); 1535 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1609 1536
@@ -1613,11 +1540,11 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1613 for (dimm = 0; dimm < 4; dimm++) { 1540 for (dimm = 0; dimm < 4; dimm++) {
1614 1541
1615 size0 = 0; 1542 size0 = 0;
1616 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) 1543 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1617 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1544 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1618 1545
1619 size1 = 0; 1546 size1 = 0;
1620 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) 1547 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1621 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1548 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1622 1549
1623 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 1550 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
@@ -2082,7 +2009,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2082 * NOTE: CPU Revision Dependent code 2009 * NOTE: CPU Revision Dependent code
2083 * 2010 *
2084 * Input: 2011 * Input:
2085 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) 2012 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2086 * k8 private pointer to --> 2013 * k8 private pointer to -->
2087 * DRAM Bank Address mapping register 2014 * DRAM Bank Address mapping register
2088 * node_id 2015 * node_id
@@ -2148,7 +2075,7 @@ static int init_csrows(struct mem_ctl_info *mci)
2148{ 2075{
2149 struct csrow_info *csrow; 2076 struct csrow_info *csrow;
2150 struct amd64_pvt *pvt = mci->pvt_info; 2077 struct amd64_pvt *pvt = mci->pvt_info;
2151 u64 input_addr_min, input_addr_max, sys_addr; 2078 u64 input_addr_min, input_addr_max, sys_addr, base, mask;
2152 u32 val; 2079 u32 val;
2153 int i, empty = 1; 2080 int i, empty = 1;
2154 2081
@@ -2161,10 +2088,10 @@ static int init_csrows(struct mem_ctl_info *mci)
2161 pvt->mc_node_id, val, 2088 pvt->mc_node_id, val,
2162 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE)); 2089 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
2163 2090
2164 for (i = 0; i < pvt->cs_count; i++) { 2091 for_each_chip_select(i, 0, pvt) {
2165 csrow = &mci->csrows[i]; 2092 csrow = &mci->csrows[i];
2166 2093
2167 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 2094 if (!csrow_enabled(i, 0, pvt)) {
2168 debugf1("----CSROW %d EMPTY for node %d\n", i, 2095 debugf1("----CSROW %d EMPTY for node %d\n", i,
2169 pvt->mc_node_id); 2096 pvt->mc_node_id);
2170 continue; 2097 continue;
@@ -2180,7 +2107,9 @@ static int init_csrows(struct mem_ctl_info *mci)
2180 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); 2107 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2181 sys_addr = input_addr_to_sys_addr(mci, input_addr_max); 2108 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2182 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); 2109 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2183 csrow->page_mask = ~mask_from_dct_mask(pvt, i); 2110
2111 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2112 csrow->page_mask = ~mask;
2184 /* 8 bytes of resolution */ 2113 /* 8 bytes of resolution */
2185 2114
2186 csrow->mtype = amd64_determine_memory_type(pvt, i); 2115 csrow->mtype = amd64_determine_memory_type(pvt, i);
@@ -2532,7 +2461,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2532 goto err_siblings; 2461 goto err_siblings;
2533 2462
2534 ret = -ENOMEM; 2463 ret = -ENOMEM;
2535 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid); 2464 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
2536 if (!mci) 2465 if (!mci)
2537 goto err_siblings; 2466 goto err_siblings;
2538 2467
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index a2bc9a650fff..1964f89a28bc 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -160,6 +160,14 @@
160#define OFF false 160#define OFF false
161 161
162/* 162/*
163 * Create a contiguous bitmask starting at bit position @lo and ending at
164 * position @hi. For example
165 *
166 * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
167 */
168#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
169
170/*
163 * PCI-defined configuration space registers 171 * PCI-defined configuration space registers
164 */ 172 */
165 173
@@ -198,45 +206,14 @@
198/* 206/*
199 * Function 2 - DRAM controller 207 * Function 2 - DRAM controller
200 */ 208 */
201#define K8_DCSB0 0x40 209#define DCSB0 0x40
202#define F10_DCSB1 0x140 210#define DCSB1 0x140
203 211#define DCSB_CS_ENABLE BIT(0)
204#define K8_DCSB_CS_ENABLE BIT(0)
205#define K8_DCSB_NPT_SPARE BIT(1)
206#define K8_DCSB_NPT_TESTFAIL BIT(2)
207
208/*
209 * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
210 * the address
211 */
212#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
213#define REV_E_DCS_SHIFT 4
214
215#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
216#define REV_F_F1Xh_DCS_SHIFT 8
217
218/*
219 * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
220 * to form the address
221 */
222#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
223#define REV_F_DCS_SHIFT 8
224
225/* DRAM CS Mask Registers */
226#define K8_DCSM0 0x60
227#define F10_DCSM1 0x160
228
229/* REV E: select [29:21] and [15:9] from DCSM */
230#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
231
232/* unused bits [24:20] and [12:0] */
233#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
234 212
235/* REV F and later: select [28:19] and [13:5] from DCSM */ 213#define DCSM0 0x60
236#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0 214#define DCSM1 0x160
237 215
238/* unused bits [26:22] and [12:0] */ 216#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
239#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
240 217
241#define DBAM0 0x80 218#define DBAM0 0x80
242#define DBAM1 0x180 219#define DBAM1 0x180
@@ -412,6 +389,15 @@ struct dram_range {
412 struct reg_pair lim; 389 struct reg_pair lim;
413}; 390};
414 391
392/* A DCT chip selects collection */
393struct chip_select {
394 u32 csbases[NUM_CHIPSELECTS];
395 u8 b_cnt;
396
397 u32 csmasks[NUM_CHIPSELECTS];
398 u8 m_cnt;
399};
400
415struct amd64_pvt { 401struct amd64_pvt {
416 struct low_ops *ops; 402 struct low_ops *ops;
417 403
@@ -434,29 +420,12 @@ struct amd64_pvt {
434 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ 420 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
435 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ 421 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
436 422
437 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ 423 /* one for each DCT */
438 u32 dcsb0[NUM_CHIPSELECTS]; 424 struct chip_select csels[2];
439 u32 dcsb1[NUM_CHIPSELECTS];
440
441 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
442 u32 dcsm0[NUM_CHIPSELECTS];
443 u32 dcsm1[NUM_CHIPSELECTS];
444 425
445 /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ 426 /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
446 struct dram_range ranges[DRAM_RANGES]; 427 struct dram_range ranges[DRAM_RANGES];
447 428
448 /*
449 * The following fields are set at (load) run time, after CPU revision
450 * has been determined, since the dct_base and dct_mask registers vary
451 * based on revision
452 */
453 u32 dcsb_base; /* DCSB base bits */
454 u32 dcsm_mask; /* DCSM mask bits */
455 u32 cs_count; /* num chip selects (== num DCSB registers) */
456 u32 num_dcsm; /* Number of DCSM registers */
457 u32 dcs_mask_notused; /* DCSM notused mask bits */
458 u32 dcs_shift; /* DCSB and DCSM shift value */
459
460 u64 top_mem; /* top of memory below 4GB */ 429 u64 top_mem; /* top of memory below 4GB */
461 u64 top_mem2; /* top of memory above 4GB */ 430 u64 top_mem2; /* top of memory above 4GB */
462 431