aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2010-09-22 10:08:37 -0400
committerBorislav Petkov <borislav.petkov@amd.com>2011-01-07 05:54:21 -0500
commit6245288232516aadf293f575d1812dafb4696aee (patch)
tree8ca69fb55bbc1556b75cc3cbc5cccc1bca4aa613 /drivers/edac
parentb8f85c477bdf1fec98ea7cbe952fdb5f40eb0aa7 (diff)
EDAC, MCE: Overhaul error fields extraction macros
Make macro names shorter thus making code shorter and more clear. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c4
-rw-r--r--drivers/edac/mce_amd.c83
-rw-r--r--drivers/edac/mce_amd.h10
3 files changed, 43 insertions, 54 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index eca9ba193e94..0212232e1597 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2055,8 +2055,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
2055static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, 2055static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2056 struct err_regs *info) 2056 struct err_regs *info)
2057{ 2057{
2058 u32 ec = ERROR_CODE(info->nbsl); 2058 u16 ec = EC(info->nbsl);
2059 u32 xec = EXT_ERROR_CODE(info->nbsl); 2059 u8 xec = XEC(info->nbsl, 0x1f);
2060 int ecc_type = (info->nbsh >> 13) & 0x3; 2060 int ecc_type = (info->nbsh >> 13) & 0x3;
2061 2061
2062 /* Bail early out if this was an 'observed' error */ 2062 /* Bail early out if this was an 'observed' error */
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index c14abe3e4074..53d4dc0de343 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -133,13 +133,13 @@ static bool f12h_dc_mce(u16 ec, u8 xec)
133 bool ret = false; 133 bool ret = false;
134 134
135 if (MEM_ERROR(ec)) { 135 if (MEM_ERROR(ec)) {
136 u8 ll = ec & 0x3; 136 u8 ll = LL(ec);
137 ret = true; 137 ret = true;
138 138
139 if (ll == LL_L2) 139 if (ll == LL_L2)
140 pr_cont("during L1 linefill from L2.\n"); 140 pr_cont("during L1 linefill from L2.\n");
141 else if (ll == LL_L1) 141 else if (ll == LL_L1)
142 pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec)); 142 pr_cont("Data/Tag %s error.\n", R4_MSG(ec));
143 else 143 else
144 ret = false; 144 ret = false;
145 } 145 }
@@ -148,10 +148,7 @@ static bool f12h_dc_mce(u16 ec, u8 xec)
148 148
149static bool f10h_dc_mce(u16 ec, u8 xec) 149static bool f10h_dc_mce(u16 ec, u8 xec)
150{ 150{
151 u8 r4 = (ec >> 4) & 0xf; 151 if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
152 u8 ll = ec & 0x3;
153
154 if (r4 == R4_GEN && ll == LL_L1) {
155 pr_cont("during data scrub.\n"); 152 pr_cont("during data scrub.\n");
156 return true; 153 return true;
157 } 154 }
@@ -170,15 +167,12 @@ static bool k8_dc_mce(u16 ec, u8 xec)
170 167
171static bool f14h_dc_mce(u16 ec, u8 xec) 168static bool f14h_dc_mce(u16 ec, u8 xec)
172{ 169{
173 u8 r4 = (ec >> 4) & 0xf; 170 u8 r4 = R4(ec);
174 u8 ll = ec & 0x3;
175 u8 tt = (ec >> 2) & 0x3;
176 u8 ii = tt;
177 bool ret = true; 171 bool ret = true;
178 172
179 if (MEM_ERROR(ec)) { 173 if (MEM_ERROR(ec)) {
180 174
181 if (tt != TT_DATA || ll != LL_L1) 175 if (TT(ec) != TT_DATA || LL(ec) != LL_L1)
182 return false; 176 return false;
183 177
184 switch (r4) { 178 switch (r4) {
@@ -198,7 +192,7 @@ static bool f14h_dc_mce(u16 ec, u8 xec)
198 } 192 }
199 } else if (BUS_ERROR(ec)) { 193 } else if (BUS_ERROR(ec)) {
200 194
201 if ((ii != II_MEM && ii != II_IO) || ll != LL_LG) 195 if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG)
202 return false; 196 return false;
203 197
204 pr_cont("System read data error on a "); 198 pr_cont("System read data error on a ");
@@ -273,16 +267,14 @@ static bool f15h_dc_mce(u16 ec, u8 xec)
273 267
274static void amd_decode_dc_mce(struct mce *m) 268static void amd_decode_dc_mce(struct mce *m)
275{ 269{
276 u16 ec = m->status & 0xffff; 270 u16 ec = EC(m->status);
277 u8 xec = (m->status >> 16) & xec_mask; 271 u8 xec = XEC(m->status, xec_mask);
278 272
279 pr_emerg(HW_ERR "Data Cache Error: "); 273 pr_emerg(HW_ERR "Data Cache Error: ");
280 274
281 /* TLB error signatures are the same across families */ 275 /* TLB error signatures are the same across families */
282 if (TLB_ERROR(ec)) { 276 if (TLB_ERROR(ec)) {
283 u8 tt = (ec >> 2) & 0x3; 277 if (TT(ec) == TT_DATA) {
284
285 if (tt == TT_DATA) {
286 pr_cont("%s TLB %s.\n", LL_MSG(ec), 278 pr_cont("%s TLB %s.\n", LL_MSG(ec),
287 ((xec == 2) ? "locked miss" 279 ((xec == 2) ? "locked miss"
288 : (xec ? "multimatch" : "parity"))); 280 : (xec ? "multimatch" : "parity")));
@@ -296,8 +288,7 @@ static void amd_decode_dc_mce(struct mce *m)
296 288
297static bool k8_ic_mce(u16 ec, u8 xec) 289static bool k8_ic_mce(u16 ec, u8 xec)
298{ 290{
299 u8 ll = ec & 0x3; 291 u8 ll = LL(ec);
300 u8 r4 = (ec >> 4) & 0xf;
301 bool ret = true; 292 bool ret = true;
302 293
303 if (!MEM_ERROR(ec)) 294 if (!MEM_ERROR(ec))
@@ -306,7 +297,7 @@ static bool k8_ic_mce(u16 ec, u8 xec)
306 if (ll == 0x2) 297 if (ll == 0x2)
307 pr_cont("during a linefill from L2.\n"); 298 pr_cont("during a linefill from L2.\n");
308 else if (ll == 0x1) { 299 else if (ll == 0x1) {
309 switch (r4) { 300 switch (R4(ec)) {
310 case R4_IRD: 301 case R4_IRD:
311 pr_cont("Parity error during data load.\n"); 302 pr_cont("Parity error during data load.\n");
312 break; 303 break;
@@ -331,13 +322,11 @@ static bool k8_ic_mce(u16 ec, u8 xec)
331 322
332static bool f14h_ic_mce(u16 ec, u8 xec) 323static bool f14h_ic_mce(u16 ec, u8 xec)
333{ 324{
334 u8 ll = ec & 0x3; 325 u8 r4 = R4(ec);
335 u8 tt = (ec >> 2) & 0x3;
336 u8 r4 = (ec >> 4) & 0xf;
337 bool ret = true; 326 bool ret = true;
338 327
339 if (MEM_ERROR(ec)) { 328 if (MEM_ERROR(ec)) {
340 if (tt != 0 || ll != 1) 329 if (TT(ec) != 0 || LL(ec) != 1)
341 ret = false; 330 ret = false;
342 331
343 if (r4 == R4_IRD) 332 if (r4 == R4_IRD)
@@ -378,8 +367,8 @@ static bool f15h_ic_mce(u16 ec, u8 xec)
378 367
379static void amd_decode_ic_mce(struct mce *m) 368static void amd_decode_ic_mce(struct mce *m)
380{ 369{
381 u16 ec = m->status & 0xffff; 370 u16 ec = EC(m->status);
382 u8 xec = (m->status >> 16) & xec_mask; 371 u8 xec = XEC(m->status, xec_mask);
383 372
384 pr_emerg(HW_ERR "Instruction Cache Error: "); 373 pr_emerg(HW_ERR "Instruction Cache Error: ");
385 374
@@ -398,8 +387,8 @@ static void amd_decode_ic_mce(struct mce *m)
398 387
399static void amd_decode_bu_mce(struct mce *m) 388static void amd_decode_bu_mce(struct mce *m)
400{ 389{
401 u32 ec = m->status & 0xffff; 390 u16 ec = EC(m->status);
402 u32 xec = (m->status >> 16) & xec_mask; 391 u8 xec = XEC(m->status, xec_mask);
403 392
404 pr_emerg(HW_ERR "Bus Unit Error"); 393 pr_emerg(HW_ERR "Bus Unit Error");
405 394
@@ -408,23 +397,23 @@ static void amd_decode_bu_mce(struct mce *m)
408 else if (xec == 0x3) 397 else if (xec == 0x3)
409 pr_cont(" in the victim data buffers.\n"); 398 pr_cont(" in the victim data buffers.\n");
410 else if (xec == 0x2 && MEM_ERROR(ec)) 399 else if (xec == 0x2 && MEM_ERROR(ec))
411 pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec)); 400 pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
412 else if (xec == 0x0) { 401 else if (xec == 0x0) {
413 if (TLB_ERROR(ec)) 402 if (TLB_ERROR(ec))
414 pr_cont(": %s error in a Page Descriptor Cache or " 403 pr_cont(": %s error in a Page Descriptor Cache or "
415 "Guest TLB.\n", TT_MSG(ec)); 404 "Guest TLB.\n", TT_MSG(ec));
416 else if (BUS_ERROR(ec)) 405 else if (BUS_ERROR(ec))
417 pr_cont(": %s/ECC error in data read from NB: %s.\n", 406 pr_cont(": %s/ECC error in data read from NB: %s.\n",
418 RRRR_MSG(ec), PP_MSG(ec)); 407 R4_MSG(ec), PP_MSG(ec));
419 else if (MEM_ERROR(ec)) { 408 else if (MEM_ERROR(ec)) {
420 u8 rrrr = (ec >> 4) & 0xf; 409 u8 r4 = R4(ec);
421 410
422 if (rrrr >= 0x7) 411 if (r4 >= 0x7)
423 pr_cont(": %s error during data copyback.\n", 412 pr_cont(": %s error during data copyback.\n",
424 RRRR_MSG(ec)); 413 R4_MSG(ec));
425 else if (rrrr <= 0x1) 414 else if (r4 <= 0x1)
426 pr_cont(": %s parity/ECC error during data " 415 pr_cont(": %s parity/ECC error during data "
427 "access from L2.\n", RRRR_MSG(ec)); 416 "access from L2.\n", R4_MSG(ec));
428 else 417 else
429 goto wrong_bu_mce; 418 goto wrong_bu_mce;
430 } else 419 } else
@@ -440,8 +429,8 @@ wrong_bu_mce:
440 429
441static void amd_decode_cu_mce(struct mce *m) 430static void amd_decode_cu_mce(struct mce *m)
442{ 431{
443 u16 ec = m->status & 0xffff; 432 u16 ec = EC(m->status);
444 u8 xec = (m->status >> 16) & xec_mask; 433 u8 xec = XEC(m->status, xec_mask);
445 434
446 pr_emerg(HW_ERR "Combined Unit Error: "); 435 pr_emerg(HW_ERR "Combined Unit Error: ");
447 436
@@ -480,8 +469,8 @@ wrong_cu_mce:
480 469
481static void amd_decode_ls_mce(struct mce *m) 470static void amd_decode_ls_mce(struct mce *m)
482{ 471{
483 u16 ec = m->status & 0xffff; 472 u16 ec = EC(m->status);
484 u8 xec = (m->status >> 16) & xec_mask; 473 u8 xec = XEC(m->status, xec_mask);
485 474
486 if (boot_cpu_data.x86 >= 0x14) { 475 if (boot_cpu_data.x86 >= 0x14) {
487 pr_emerg("You shouldn't be seeing an LS MCE on this cpu family," 476 pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
@@ -492,12 +481,12 @@ static void amd_decode_ls_mce(struct mce *m)
492 pr_emerg(HW_ERR "Load Store Error"); 481 pr_emerg(HW_ERR "Load Store Error");
493 482
494 if (xec == 0x0) { 483 if (xec == 0x0) {
495 u8 r4 = (ec >> 4) & 0xf; 484 u8 r4 = R4(ec);
496 485
497 if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) 486 if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
498 goto wrong_ls_mce; 487 goto wrong_ls_mce;
499 488
500 pr_cont(" during %s.\n", RRRR_MSG(ec)); 489 pr_cont(" during %s.\n", R4_MSG(ec));
501 } else 490 } else
502 goto wrong_ls_mce; 491 goto wrong_ls_mce;
503 492
@@ -605,8 +594,8 @@ static bool nb_noop_mce(u16 ec, u8 xec)
605 594
606void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) 595void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
607{ 596{
608 u8 xec = (m->status >> 16) & 0x1f; 597 u16 ec = EC(m->status);
609 u16 ec = m->status & 0xffff; 598 u8 xec = XEC(m->status, 0x1f);
610 u32 nbsh = (u32)(m->status >> 32); 599 u32 nbsh = (u32)(m->status >> 32);
611 600
612 pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id); 601 pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id);
@@ -668,7 +657,7 @@ EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
668static void amd_decode_fr_mce(struct mce *m) 657static void amd_decode_fr_mce(struct mce *m)
669{ 658{
670 struct cpuinfo_x86 *c = &boot_cpu_data; 659 struct cpuinfo_x86 *c = &boot_cpu_data;
671 u8 xec = (m->status >> 16) & xec_mask; 660 u8 xec = XEC(m->status, xec_mask);
672 661
673 if (c->x86 == 0xf || c->x86 == 0x11) 662 if (c->x86 == 0xf || c->x86 == 0x11)
674 goto wrong_fr_mce; 663 goto wrong_fr_mce;
@@ -694,7 +683,7 @@ wrong_fr_mce:
694 683
695static void amd_decode_fp_mce(struct mce *m) 684static void amd_decode_fp_mce(struct mce *m)
696{ 685{
697 u8 xec = (m->status >> 16) & xec_mask; 686 u8 xec = XEC(m->status, xec_mask);
698 687
699 pr_emerg(HW_ERR "Floating Point Unit Error: "); 688 pr_emerg(HW_ERR "Floating Point Unit Error: ");
700 689
@@ -739,11 +728,11 @@ static inline void amd_decode_err_code(u16 ec)
739 TT_MSG(ec), LL_MSG(ec)); 728 TT_MSG(ec), LL_MSG(ec));
740 } else if (MEM_ERROR(ec)) { 729 } else if (MEM_ERROR(ec)) {
741 pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n", 730 pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
742 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); 731 R4_MSG(ec), TT_MSG(ec), LL_MSG(ec));
743 } else if (BUS_ERROR(ec)) { 732 } else if (BUS_ERROR(ec)) {
744 pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, " 733 pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
745 "Participating Processor: %s\n", 734 "Participating Processor: %s\n",
746 RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), 735 R4_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
747 PP_MSG(ec)); 736 PP_MSG(ec));
748 } else 737 } else
749 pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec); 738 pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 83988471123f..45dda47173f2 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -7,8 +7,8 @@
7 7
8#define BIT_64(n) (U64_C(1) << (n)) 8#define BIT_64(n) (U64_C(1) << (n))
9 9
10#define ERROR_CODE(x) ((x) & 0xffff) 10#define EC(x) ((x) & 0xffff)
11#define EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f) 11#define XEC(x, mask) (((x) >> 16) & mask)
12 12
13#define LOW_SYNDROME(x) (((x) >> 15) & 0xff) 13#define LOW_SYNDROME(x) (((x) >> 15) & 0xff)
14#define HIGH_SYNDROME(x) (((x) >> 24) & 0xff) 14#define HIGH_SYNDROME(x) (((x) >> 24) & 0xff)
@@ -21,15 +21,15 @@
21#define TT_MSG(x) tt_msgs[TT(x)] 21#define TT_MSG(x) tt_msgs[TT(x)]
22#define II(x) (((x) >> 2) & 0x3) 22#define II(x) (((x) >> 2) & 0x3)
23#define II_MSG(x) ii_msgs[II(x)] 23#define II_MSG(x) ii_msgs[II(x)]
24#define LL(x) (((x) >> 0) & 0x3) 24#define LL(x) ((x) & 0x3)
25#define LL_MSG(x) ll_msgs[LL(x)] 25#define LL_MSG(x) ll_msgs[LL(x)]
26#define TO(x) (((x) >> 8) & 0x1) 26#define TO(x) (((x) >> 8) & 0x1)
27#define TO_MSG(x) to_msgs[TO(x)] 27#define TO_MSG(x) to_msgs[TO(x)]
28#define PP(x) (((x) >> 9) & 0x3) 28#define PP(x) (((x) >> 9) & 0x3)
29#define PP_MSG(x) pp_msgs[PP(x)] 29#define PP_MSG(x) pp_msgs[PP(x)]
30 30
31#define RRRR(x) (((x) >> 4) & 0xf) 31#define R4(x) (((x) >> 4) & 0xf)
32#define RRRR_MSG(x) ((RRRR(x) < 9) ? rrrr_msgs[RRRR(x)] : "Wrong R4!") 32#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
33 33
34#define K8_NBSH 0x4C 34#define K8_NBSH 0x4C
35 35