aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-10-08 15:00:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-10-08 15:00:39 -0400
commit1bfd16a6571db105d7d57acf768c1df297781f07 (patch)
tree83e266b8a474c9fd2cb6fed99bd6a1a3af8cd8e6
parent5587481e92105734e8e45a24fd8603228ec02449 (diff)
parent94baaee4947d84809b289d5ca03677525ffc6da9 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: amd64_edac: beef up DRAM error injection amd64_edac: fix DRAM base and limit extraction amd64_edac: fix chip select handling amd64_edac: simple fix to allow reporting of CECC errors amd64_edac: fix K8 intlv_sel check amd64_edac: fix interleave enable tests amd64_edac: fix DRAM base and limit address extraction amd64_edac: fix driver instance lookup table allocation
-rw-r--r--drivers/edac/amd64_edac.c104
-rw-r--r--drivers/edac/amd64_edac.h23
-rw-r--r--drivers/edac/amd64_edac_inj.c49
3 files changed, 101 insertions, 75 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4e551e63b6dc..4f4ac82382f7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644);
15 15
16/* Lookup table for all possible MC control instances */ 16/* Lookup table for all possible MC control instances */
17struct amd64_pvt; 17struct amd64_pvt;
18static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; 18static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; 19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
20 20
21/* 21/*
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only 22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
@@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
189/* Map from a CSROW entry to the mask entry that operates on it */ 189/* Map from a CSROW entry to the mask entry that operates on it */
190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
191{ 191{
192 return csrow >> (pvt->num_dcsm >> 3); 192 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
193 return csrow;
194 else
195 return csrow >> 1;
193} 196}
194 197
195/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ 198/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
@@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
279 intlv_en = pvt->dram_IntlvEn[0]; 282 intlv_en = pvt->dram_IntlvEn[0];
280 283
281 if (intlv_en == 0) { 284 if (intlv_en == 0) {
282 for (node_id = 0; ; ) { 285 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
283 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 286 if (amd64_base_limit_match(pvt, sys_addr, node_id))
284 break; 287 goto found;
285
286 if (++node_id >= DRAM_REG_COUNT)
287 goto err_no_match;
288 } 288 }
289 goto found; 289 goto err_no_match;
290 } 290 }
291 291
292 if (unlikely((intlv_en != (0x01 << 8)) && 292 if (unlikely((intlv_en != 0x01) &&
293 (intlv_en != (0x03 << 8)) && 293 (intlv_en != 0x03) &&
294 (intlv_en != (0x07 << 8)))) { 294 (intlv_en != 0x07))) {
295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " 295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
296 "IntlvEn field of DRAM Base Register for node 0: " 296 "IntlvEn field of DRAM Base Register for node 0: "
297 "This probably indicates a BIOS bug.\n", intlv_en); 297 "this probably indicates a BIOS bug.\n", intlv_en);
298 return NULL; 298 return NULL;
299 } 299 }
300 300
301 bits = (((u32) sys_addr) >> 12) & intlv_en; 301 bits = (((u32) sys_addr) >> 12) & intlv_en;
302 302
303 for (node_id = 0; ; ) { 303 for (node_id = 0; ; ) {
304 if ((pvt->dram_limit[node_id] & intlv_en) == bits) 304 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
305 break; /* intlv_sel field matches */ 305 break; /* intlv_sel field matches */
306 306
307 if (++node_id >= DRAM_REG_COUNT) 307 if (++node_id >= DRAM_REG_COUNT)
@@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
311 /* sanity test for sys_addr */ 311 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_printk(KERN_WARNING, 313 amd64_printk(KERN_WARNING,
314 "%s(): sys_addr 0x%lx falls outside base/limit " 314 "%s(): sys_addr 0x%llx falls outside base/limit "
315 "address range for node %d with node interleaving " 315 "address range for node %d with node interleaving "
316 "enabled.\n", __func__, (unsigned long)sys_addr, 316 "enabled.\n",
317 node_id); 317 __func__, sys_addr, node_id);
318 return NULL; 318 return NULL;
319 } 319 }
320 320
@@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
377 * base/mask register pair, test the condition shown near the start of 377 * base/mask register pair, test the condition shown near the start of
378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). 378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
379 */ 379 */
380 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 380 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
381 381
382 /* This DRAM chip select is disabled on this node */ 382 /* This DRAM chip select is disabled on this node */
383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) 383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
@@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
734 u64 base, mask; 734 u64 base, mask;
735 735
736 pvt = mci->pvt_info; 736 pvt = mci->pvt_info;
737 BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); 737 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
738 738
739 base = base_from_dct_base(pvt, csrow); 739 base = base_from_dct_base(pvt, csrow);
740 mask = mask_from_dct_mask(pvt, csrow); 740 mask = mask_from_dct_mask(pvt, csrow);
@@ -962,35 +962,27 @@ err_reg:
962 */ 962 */
963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
964{ 964{
965 if (pvt->ext_model >= OPTERON_CPU_REV_F) { 965
966 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
967 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
968 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
969 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
970 pvt->dcs_shift = REV_E_DCS_SHIFT;
971 pvt->cs_count = 8;
972 pvt->num_dcsm = 8;
973 } else {
966 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 974 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
967 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 975 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
968 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; 976 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
969 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; 977 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
970 978
971 switch (boot_cpu_data.x86) { 979 if (boot_cpu_data.x86 == 0x11) {
972 case 0xf: 980 pvt->cs_count = 4;
973 pvt->num_dcsm = REV_F_DCSM_COUNT; 981 pvt->num_dcsm = 2;
974 break; 982 } else {
975 983 pvt->cs_count = 8;
976 case 0x10: 984 pvt->num_dcsm = 4;
977 pvt->num_dcsm = F10_DCSM_COUNT;
978 break;
979
980 case 0x11:
981 pvt->num_dcsm = F11_DCSM_COUNT;
982 break;
983
984 default:
985 amd64_printk(KERN_ERR, "Unsupported family!\n");
986 break;
987 } 985 }
988 } else {
989 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
990 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
991 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
992 pvt->dcs_shift = REV_E_DCS_SHIFT;
993 pvt->num_dcsm = REV_E_DCSM_COUNT;
994 } 986 }
995} 987}
996 988
@@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
1003 995
1004 amd64_set_dct_base_and_mask(pvt); 996 amd64_set_dct_base_and_mask(pvt);
1005 997
1006 for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { 998 for (cs = 0; cs < pvt->cs_count; cs++) {
1007 reg = K8_DCSB0 + (cs * 4); 999 reg = K8_DCSB0 + (cs * 4);
1008 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 1000 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1009 &pvt->dcsb0[cs]); 1001 &pvt->dcsb0[cs]);
@@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1130 debugf0("Reading K8_DRAM_BASE_LOW failed\n"); 1122 debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1131 1123
1132 /* Extract parts into separate data entries */ 1124 /* Extract parts into separate data entries */
1133 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1125 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24;
1134 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1126 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1135 pvt->dram_rw_en[dram] = (low & 0x3); 1127 pvt->dram_rw_en[dram] = (low & 0x3);
1136 1128
@@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1143 * Extract parts into separate data entries. Limit is the HIGHEST memory 1135 * Extract parts into separate data entries. Limit is the HIGHEST memory
1144 * location of the region, so lower 24 bits need to be all ones 1136 * location of the region, so lower 24 bits need to be all ones
1145 */ 1137 */
1146 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; 1138 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF;
1147 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; 1139 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1148 pvt->dram_DstNode[dram] = (low & 0x7); 1140 pvt->dram_DstNode[dram] = (low & 0x7);
1149} 1141}
@@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1193 * different from the node that detected the error. 1185 * different from the node that detected the error.
1194 */ 1186 */
1195 src_mci = find_mc_by_sys_addr(mci, SystemAddress); 1187 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1196 if (src_mci) { 1188 if (!src_mci) {
1197 amd64_mc_printk(mci, KERN_ERR, 1189 amd64_mc_printk(mci, KERN_ERR,
1198 "failed to map error address 0x%lx to a node\n", 1190 "failed to map error address 0x%lx to a node\n",
1199 (unsigned long)SystemAddress); 1191 (unsigned long)SystemAddress);
@@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1376 1368
1377 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; 1369 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1378 1370
1379 pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | 1371 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1380 ((u64) low_base & 0xFFFF0000))) << 8; 1372 (((u64)low_base & 0xFFFF0000) << 24);
1381 1373
1382 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); 1374 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1383 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1375 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
@@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1398 * Extract address values and form a LIMIT address. Limit is the HIGHEST 1390 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1399 * memory location of the region, so low 24 bits need to be all ones. 1391 * memory location of the region, so low 24 bits need to be all ones.
1400 */ 1392 */
1401 low_limit |= 0x0000FFFF; 1393 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1402 pvt->dram_limit[dram] = 1394 (((u64) low_limit & 0xFFFF0000) << 24) |
1403 ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); 1395 0x00FFFFFF;
1404} 1396}
1405 1397
1406static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1398static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1566 1558
1567 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 1559 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1568 1560
1569 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 1561 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1570 1562
1571 cs_base = amd64_get_dct_base(pvt, cs, csrow); 1563 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1572 if (!(cs_base & K8_DCSB_CS_ENABLE)) 1564 if (!(cs_base & K8_DCSB_CS_ENABLE))
@@ -2497,7 +2489,7 @@ err_reg:
2497 * NOTE: CPU Revision Dependent code 2489 * NOTE: CPU Revision Dependent code
2498 * 2490 *
2499 * Input: 2491 * Input:
2500 * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) 2492 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2501 * k8 private pointer to --> 2493 * k8 private pointer to -->
2502 * DRAM Bank Address mapping register 2494 * DRAM Bank Address mapping register
2503 * node_id 2495 * node_id
@@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2577 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" 2569 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2578 ); 2570 );
2579 2571
2580 for (i = 0; i < CHIPSELECT_COUNT; i++) { 2572 for (i = 0; i < pvt->cs_count; i++) {
2581 csrow = &mci->csrows[i]; 2573 csrow = &mci->csrows[i];
2582 2574
2583 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 2575 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
@@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2988 goto err_exit; 2980 goto err_exit;
2989 2981
2990 ret = -ENOMEM; 2982 ret = -ENOMEM;
2991 mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); 2983 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2992 if (!mci) 2984 if (!mci)
2993 goto err_exit; 2985 goto err_exit;
2994 2986
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8ea07e2715dc..c6f359a85207 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -132,6 +132,8 @@
132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ 132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
133#define EDAC_MOD_STR "amd64_edac" 133#define EDAC_MOD_STR "amd64_edac"
134 134
135#define EDAC_MAX_NUMNODES 8
136
135/* Extended Model from CPUID, for CPU Revision numbers */ 137/* Extended Model from CPUID, for CPU Revision numbers */
136#define OPTERON_CPU_LE_REV_C 0 138#define OPTERON_CPU_LE_REV_C 0
137#define OPTERON_CPU_REV_D 1 139#define OPTERON_CPU_REV_D 1
@@ -142,7 +144,7 @@
142#define OPTERON_CPU_REV_FA 5 144#define OPTERON_CPU_REV_FA 5
143 145
144/* Hardware limit on ChipSelect rows per MC and processors per system */ 146/* Hardware limit on ChipSelect rows per MC and processors per system */
145#define CHIPSELECT_COUNT 8 147#define MAX_CS_COUNT 8
146#define DRAM_REG_COUNT 8 148#define DRAM_REG_COUNT 8
147 149
148 150
@@ -193,7 +195,6 @@
193 */ 195 */
194#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) 196#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
195#define REV_E_DCS_SHIFT 4 197#define REV_E_DCS_SHIFT 4
196#define REV_E_DCSM_COUNT 8
197 198
198#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) 199#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
199#define REV_F_F1Xh_DCS_SHIFT 8 200#define REV_F_F1Xh_DCS_SHIFT 8
@@ -204,9 +205,6 @@
204 */ 205 */
205#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) 206#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
206#define REV_F_DCS_SHIFT 8 207#define REV_F_DCS_SHIFT 8
207#define REV_F_DCSM_COUNT 4
208#define F10_DCSM_COUNT 4
209#define F11_DCSM_COUNT 2
210 208
211/* DRAM CS Mask Registers */ 209/* DRAM CS Mask Registers */
212#define K8_DCSM0 0x60 210#define K8_DCSM0 0x60
@@ -374,13 +372,11 @@ enum {
374 372
375#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ 373#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
376 (BIT(((word) & 0xF) + 20) | \ 374 (BIT(((word) & 0xF) + 20) | \
377 BIT(17) | \ 375 BIT(17) | bits)
378 ((bits) & 0xF))
379 376
380#define SET_NB_DRAM_INJECTION_READ(word, bits) \ 377#define SET_NB_DRAM_INJECTION_READ(word, bits) \
381 (BIT(((word) & 0xF) + 20) | \ 378 (BIT(((word) & 0xF) + 20) | \
382 BIT(16) | \ 379 BIT(16) | bits)
383 ((bits) & 0xF))
384 380
385#define K8_NBCAP 0xE8 381#define K8_NBCAP 0xE8
386#define K8_NBCAP_CORES (BIT(12)|BIT(13)) 382#define K8_NBCAP_CORES (BIT(12)|BIT(13))
@@ -445,12 +441,12 @@ struct amd64_pvt {
445 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ 441 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
446 442
447 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ 443 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
448 u32 dcsb0[CHIPSELECT_COUNT]; 444 u32 dcsb0[MAX_CS_COUNT];
449 u32 dcsb1[CHIPSELECT_COUNT]; 445 u32 dcsb1[MAX_CS_COUNT];
450 446
451 /* DRAM CS Mask Registers F2x[1,0][6C:60] */ 447 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
452 u32 dcsm0[CHIPSELECT_COUNT]; 448 u32 dcsm0[MAX_CS_COUNT];
453 u32 dcsm1[CHIPSELECT_COUNT]; 449 u32 dcsm1[MAX_CS_COUNT];
454 450
455 /* 451 /*
456 * Decoded parts of DRAM BASE and LIMIT Registers 452 * Decoded parts of DRAM BASE and LIMIT Registers
@@ -470,6 +466,7 @@ struct amd64_pvt {
470 */ 466 */
471 u32 dcsb_base; /* DCSB base bits */ 467 u32 dcsb_base; /* DCSB base bits */
472 u32 dcsm_mask; /* DCSM mask bits */ 468 u32 dcsm_mask; /* DCSM mask bits */
469 u32 cs_count; /* num chip selects (== num DCSB registers) */
473 u32 num_dcsm; /* Number of DCSM registers */ 470 u32 num_dcsm; /* Number of DCSM registers */
474 u32 dcs_mask_notused; /* DCSM notused mask bits */ 471 u32 dcs_mask_notused; /* DCSM notused mask bits */
475 u32 dcs_shift; /* DCSB and DCSM shift value */ 472 u32 dcs_shift; /* DCSB and DCSM shift value */
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index d3675b76b3a7..29f1f7a612d9 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,5 +1,11 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
4{
5 struct amd64_pvt *pvt = mci->pvt_info;
6 return sprintf(buf, "0x%x\n", pvt->injection.section);
7}
8
3/* 9/*
4 * store error injection section value which refers to one of 4 16-byte sections 10 * store error injection section value which refers to one of 4 16-byte sections
5 * within a 64-byte cacheline 11 * within a 64-byte cacheline
@@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
15 21
16 ret = strict_strtoul(data, 10, &value); 22 ret = strict_strtoul(data, 10, &value);
17 if (ret != -EINVAL) { 23 if (ret != -EINVAL) {
24
25 if (value > 3) {
26 amd64_printk(KERN_WARNING,
27 "%s: invalid section 0x%lx\n",
28 __func__, value);
29 return -EINVAL;
30 }
31
18 pvt->injection.section = (u32) value; 32 pvt->injection.section = (u32) value;
19 return count; 33 return count;
20 } 34 }
21 return ret; 35 return ret;
22} 36}
23 37
38static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
39{
40 struct amd64_pvt *pvt = mci->pvt_info;
41 return sprintf(buf, "0x%x\n", pvt->injection.word);
42}
43
24/* 44/*
25 * store error injection word value which refers to one of 9 16-bit word of the 45 * store error injection word value which refers to one of 9 16-bit word of the
26 * 16-byte (128-bit + ECC bits) section 46 * 16-byte (128-bit + ECC bits) section
@@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
37 ret = strict_strtoul(data, 10, &value); 57 ret = strict_strtoul(data, 10, &value);
38 if (ret != -EINVAL) { 58 if (ret != -EINVAL) {
39 59
40 value = (value <= 8) ? value : 0; 60 if (value > 8) {
41 pvt->injection.word = (u32) value; 61 amd64_printk(KERN_WARNING,
62 "%s: invalid word 0x%lx\n",
63 __func__, value);
64 return -EINVAL;
65 }
42 66
67 pvt->injection.word = (u32) value;
43 return count; 68 return count;
44 } 69 }
45 return ret; 70 return ret;
46} 71}
47 72
73static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
74{
75 struct amd64_pvt *pvt = mci->pvt_info;
76 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
77}
78
48/* 79/*
49 * store 16 bit error injection vector which enables injecting errors to the 80 * store 16 bit error injection vector which enables injecting errors to the
50 * corresponding bit within the error injection word above. When used during a 81 * corresponding bit within the error injection word above. When used during a
@@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
60 ret = strict_strtoul(data, 16, &value); 91 ret = strict_strtoul(data, 16, &value);
61 if (ret != -EINVAL) { 92 if (ret != -EINVAL) {
62 93
63 pvt->injection.bit_map = (u32) value & 0xFFFF; 94 if (value & 0xFFFF0000) {
95 amd64_printk(KERN_WARNING,
96 "%s: invalid EccVector: 0x%lx\n",
97 __func__, value);
98 return -EINVAL;
99 }
64 100
101 pvt->injection.bit_map = (u32) value;
65 return count; 102 return count;
66 } 103 }
67 return ret; 104 return ret;
@@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
147 .name = "inject_section", 184 .name = "inject_section",
148 .mode = (S_IRUGO | S_IWUSR) 185 .mode = (S_IRUGO | S_IWUSR)
149 }, 186 },
150 .show = NULL, 187 .show = amd64_inject_section_show,
151 .store = amd64_inject_section_store, 188 .store = amd64_inject_section_store,
152 }, 189 },
153 { 190 {
@@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
155 .name = "inject_word", 192 .name = "inject_word",
156 .mode = (S_IRUGO | S_IWUSR) 193 .mode = (S_IRUGO | S_IWUSR)
157 }, 194 },
158 .show = NULL, 195 .show = amd64_inject_word_show,
159 .store = amd64_inject_word_store, 196 .store = amd64_inject_word_store,
160 }, 197 },
161 { 198 {
@@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
163 .name = "inject_ecc_vector", 200 .name = "inject_ecc_vector",
164 .mode = (S_IRUGO | S_IWUSR) 201 .mode = (S_IRUGO | S_IWUSR)
165 }, 202 },
166 .show = NULL, 203 .show = amd64_inject_ecc_vector_show,
167 .store = amd64_inject_ecc_vector_store, 204 .store = amd64_inject_ecc_vector_store,
168 }, 205 },
169 { 206 {