aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@infradead.org>2009-01-06 17:43:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:30 -0500
commit920c8df6ac678fdb8c49a6ce2e47a98e62757d77 (patch)
tree3c9b3699310332798b4d928cf2fac09b28df9235
parent29d6cf26a74b8575a6416b7ad4d369a455f8d009 (diff)
edac: driver for i5400 MCH (Seaburg)
EDAC driver for i5400 MCH (Seaburg) This driver adds support for i5400 MCH chipset. Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com> Signed-off-by: Ben Woodard <woodard@redhat.com> Cc: Doug Thompson <norsk5@yahoo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/i5400_edac.c1471
3 files changed, 1479 insertions, 0 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index e2667a8c2997..eee47fd16d79 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -109,6 +109,13 @@ config EDAC_X38
109 Support for error detection and correction on the Intel 109 Support for error detection and correction on the Intel
110 X38 server chipsets. 110 X38 server chipsets.
111 111
112config EDAC_I5400
113 tristate "Intel 5400 (Seaburg) chipsets"
114 depends on EDAC_MM_EDAC && PCI && X86
115 help
116 Support for error detection and correction the Intel
117 i5400 MCH chipset (Seaburg).
118
112config EDAC_I82860 119config EDAC_I82860
113 tristate "Intel 82860" 120 tristate "Intel 82860"
114 depends on EDAC_MM_EDAC && PCI && X86_32 121 depends on EDAC_MM_EDAC && PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 62c2d9bad8dc..b75196927de3 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -20,6 +20,7 @@ endif
20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o 20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
21obj-$(CONFIG_EDAC_I5000) += i5000_edac.o 21obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
22obj-$(CONFIG_EDAC_I5100) += i5100_edac.o 22obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
23obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
23obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 24obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
24obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 25obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
25obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 26obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
new file mode 100644
index 000000000000..8ec3eca3061b
--- /dev/null
+++ b/drivers/edac/i5400_edac.c
@@ -0,0 +1,1471 @@
1/*
2 * Intel 5400 class Memory Controllers kernel module
3 *
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Copyright (c) 2008 by:
8 * Ben Woodard <woodard@redhat.com>
9 * Mauro Carvalho Chehab <mchehab@redhat.com>
10 *
11 * Red Hat Inc. http://www.redhat.com
12 *
13 * Forked and adapted from the i5000_edac driver which was
14 * written by Douglas Thompson Linux Networx <norsk5@xmission.com>
15 *
16 * This module is based on the following document:
17 *
18 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
19 * http://developer.intel.com/design/chipsets/datashts/313070.htm
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/pci.h>
26#include <linux/pci_ids.h>
27#include <linux/slab.h>
28#include <linux/edac.h>
29#include <linux/mmzone.h>
30
31#include "edac_core.h"
32
33/*
34 * Alter this version for the I5400 module when modifications are made
35 */
36#define I5400_REVISION " Ver: 1.0.0 " __DATE__
37
38#define EDAC_MOD_STR "i5400_edac"
39
40#define i5400_printk(level, fmt, arg...) \
41 edac_printk(level, "i5400", fmt, ##arg)
42
43#define i5400_mc_printk(mci, level, fmt, arg...) \
44 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
45
46/* Limits for i5400 */
47#define NUM_MTRS_PER_BRANCH 4
48#define CHANNELS_PER_BRANCH 2
49#define MAX_CHANNELS 4
50#define MAX_DIMMS (MAX_CHANNELS * 4) /* Up to 4 DIMM's per channel */
51#define MAX_CSROWS (MAX_DIMMS * 2) /* max possible csrows per channel */
52
53/* Device 16,
54 * Function 0: System Address
55 * Function 1: Memory Branch Map, Control, Errors Register
56 * Function 2: FSB Error Registers
57 *
58 * All 3 functions of Device 16 (0,1,2) share the SAME DID
59 */
60#ifndef PCI_DEVICE_ID_INTEL_5400_ERR
61#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 /* Device 16 (0,1,2) */
62#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 /* Device 21 (0,1) */
63#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 /* Device 21 (0,1) */
64#endif
65
66 /* OFFSETS for Function 0 */
67#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
68#define MAXCH 0x56 /* Max Channel Number */
69#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
70
71 /* OFFSETS for Function 1 */
72#define TOLM 0x6C
73#define REDMEMB 0x7C
74#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */
75#define MIR0 0x80
76#define MIR1 0x84
77#define AMIR0 0x8c
78#define AMIR1 0x90
79
80 /* Fatal error registers */
81#define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */
82#define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */
83
84#define NERR_FAT_FBD 0x9c
85#define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */
86
87 /* Non-fatal error register */
88#define NERR_NF_FBD 0xa4
89
90 /* Enable error mask */
91#define EMASK_FBD 0xa8
92
93#define ERR0_FBD 0xac
94#define ERR1_FBD 0xb0
95#define ERR2_FBD 0xb4
96#define MCERR_FBD 0xb8
97
98 /* No OFFSETS for Device 16 Function 2 */
99
100/*
101 * Device 21,
102 * Function 0: Memory Map Branch 0
103 *
104 * Device 22,
105 * Function 0: Memory Map Branch 1
106 */
107
108 /* OFFSETS for Function 0 */
109#define AMBPRESENT_0 0x64
110#define AMBPRESENT_1 0x66
111#define MTR0 0x80
112#define MTR1 0x82
113#define MTR2 0x84
114#define MTR3 0x86
115
116 /* OFFSETS for Function 1 */
117#define NRECFGLOG 0x74
118#define RECFGLOG 0x78
119#define NRECMEMA 0xbe
120#define NRECMEMB 0xc0
121#define NRECFB_DIMMA 0xc4
122#define NRECFB_DIMMB 0xc8
123#define NRECFB_DIMMC 0xcc
124#define NRECFB_DIMMD 0xd0
125#define NRECFB_DIMME 0xd4
126#define NRECFB_DIMMF 0xd8
127#define REDMEMA 0xdC
128#define RECMEMA 0xf0
129#define RECMEMB 0xf4
130#define RECFB_DIMMA 0xf8
131#define RECFB_DIMMB 0xec
132#define RECFB_DIMMC 0xf0
133#define RECFB_DIMMD 0xf4
134#define RECFB_DIMME 0xf8
135#define RECFB_DIMMF 0xfC
136
137/*
138 * Error indicator bits and masks
139 * Error masks are according with Table 5-17 of i5400 datasheet
140 */
141
142enum error_mask {
143 EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */
144 EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */
145 EMASK_M3 = 1<<2, /* Reserved */
146 EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */
147 EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */
148 EMASK_M6 = 1<<5, /* Unsupported on i5400 */
149 EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
150 EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */
151 EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */
152 EMASK_M10 = 1<<9, /* Unsupported on i5400 */
153 EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
154 EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */
155 EMASK_M13 = 1<<12, /* Memory Write error on first attempt */
156 EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */
157 EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */
158 EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */
159 EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */
160 EMASK_M18 = 1<<17, /* Unsupported on i5400 */
161 EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */
162 EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */
163 EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */
164 EMASK_M22 = 1<<21, /* SPD protocol Error */
165 EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */
166 EMASK_M24 = 1<<23, /* Refresh error */
167 EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */
168 EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */
169 EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */
170 EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */
171 EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */
172};
173
174/*
175 * Names to translate bit error into something useful
176 */
177char *error_name[] = {
178 [0] = "Memory Write error on non-redundant retry",
179 [1] = "Memory or FB-DIMM configuration CRC read error",
180 /* Reserved */
181 [3] = "Uncorrectable Data ECC on Replay",
182 [4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
183 /* Unsupported on i5400 */
184 [6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
185 [7] = "Aliased Uncorrectable Patrol Data ECC",
186 [8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
187 /* Unsupported */
188 [10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
189 [11] = "Non-Aliased Uncorrectable Patrol Data ECC",
190 [12] = "Memory Write error on first attempt",
191 [13] = "FB-DIMM Configuration Write error on first attempt",
192 [14] = "Memory or FB-DIMM configuration CRC read error",
193 [15] = "Channel Failed-Over Occurred",
194 [16] = "Correctable Non-Mirrored Demand Data ECC",
195 /* Unsupported */
196 [18] = "Correctable Resilver- or Spare-Copy Data ECC",
197 [19] = "Correctable Patrol Data ECC",
198 [20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
199 [21] = "SPD protocol Error",
200 [22] = "Non-Redundant Fast Reset Timeout",
201 [23] = "Refresh error",
202 [24] = "Memory Write error on redundant retry",
203 [25] = "Redundant Fast Reset Timeout",
204 [26] = "Correctable Counter Threshold Exceeded",
205 [27] = "DIMM-Spare Copy Completed",
206 [28] = "DIMM-Isolation Completed",
207};
208
209/* Fatal errors */
210#define ERROR_FAT_MASK (EMASK_M1 | \
211 EMASK_M2 | \
212 EMASK_M23)
213
214/* Correctable errors */
215#define ERROR_NF_CORRECTABLE (EMASK_M27 | \
216 EMASK_M20 | \
217 EMASK_M19 | \
218 EMASK_M18 | \
219 EMASK_M17 | \
220 EMASK_M16)
221#define ERROR_NF_DIMM_SPARE (EMASK_M29 | \
222 EMASK_M28)
223#define ERROR_NF_SPD_PROTOCOL (EMASK_M22)
224#define ERROR_NF_NORTH_CRC (EMASK_M21)
225
226/* Recoverable errors */
227#define ERROR_NF_RECOVERABLE (EMASK_M26 | \
228 EMASK_M25 | \
229 EMASK_M24 | \
230 EMASK_M15 | \
231 EMASK_M14 | \
232 EMASK_M13 | \
233 EMASK_M12 | \
234 EMASK_M11 | \
235 EMASK_M9 | \
236 EMASK_M8 | \
237 EMASK_M7 | \
238 EMASK_M5)
239
240/* uncorrectable errors */
241#define ERROR_NF_UNCORRECTABLE (EMASK_M4)
242
243/* mask to all non-fatal errors */
244#define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \
245 ERROR_NF_UNCORRECTABLE | \
246 ERROR_NF_RECOVERABLE | \
247 ERROR_NF_DIMM_SPARE | \
248 ERROR_NF_SPD_PROTOCOL | \
249 ERROR_NF_NORTH_CRC)
250
251/*
252 * Define error masks for the several registers
253 */
254
255/* Enable all fatal and non fatal errors */
256#define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK)
257
258/* mask for fatal error registers */
259#define FERR_FAT_MASK ERROR_FAT_MASK
260
261/* masks for non-fatal error register */
262#define TO_NF_MASK(a) (((a) & EMASK_M29) | ((a) >> 3))
263#define FROM_NF_FERR(a) (((a) & EMASK_M29) | (((a) << 3) & ((1 << 30)-1)))
264
265#define FERR_NF_MASK TO_NF_MASK(ERROR_NF_MASK)
266#define FERR_NF_CORRECTABLE TO_NF_MASK(ERROR_NF_CORRECTABLE)
267#define FERR_NF_DIMM_SPARE TO_NF_MASK(ERROR_NF_DIMM_SPARE)
268#define FERR_NF_SPD_PROTOCOL TO_NF_MASK(ERROR_NF_SPD_PROTOCOL)
269#define FERR_NF_NORTH_CRC TO_NF_MASK(ERROR_NF_NORTH_CRC)
270#define FERR_NF_RECOVERABLE TO_NF_MASK(ERROR_NF_RECOVERABLE)
271#define FERR_NF_UNCORRECTABLE TO_NF_MASK(ERROR_NF_UNCORRECTABLE)
272
273/* Defines to extract the vaious fields from the
274 * MTRx - Memory Technology Registers
275 */
276#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10))
277#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9))
278#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1<< 8)) ? 8 : 4)
279#define MTR_DRAM_BANKS(mtr) (((mtr) & (1<< 6)) ? 8 : 4)
280#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
281#define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1)
282#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
283#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
284#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
285#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
286#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
287
288/* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */
289static inline int extract_fbdchan_indx(u32 x)
290{
291 return (x>>28) & 0x3;
292}
293
294#ifdef CONFIG_EDAC_DEBUG
295/* MTR NUMROW */
296static char *numrow_toString[] = {
297 "8,192 - 13 rows",
298 "16,384 - 14 rows",
299 "32,768 - 15 rows",
300 "65,536 - 16 rows"
301};
302
303/* MTR NUMCOL */
304static char *numcol_toString[] = {
305 "1,024 - 10 columns",
306 "2,048 - 11 columns",
307 "4,096 - 12 columns",
308 "reserved"
309};
310#endif
311
312/* Device name and register DID (Device ID) */
313struct i5400_dev_info {
314 const char *ctl_name; /* name for this device */
315 u16 fsb_mapping_errors; /* DID for the branchmap,control */
316};
317
318/* Table of devices attributes supported by this driver */
319static const struct i5400_dev_info i5400_devs[] = {
320 {
321 .ctl_name = "I5400",
322 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
323 },
324};
325
326struct i5400_dimm_info {
327 int megabytes; /* size, 0 means not present */
328 int dual_rank;
329};
330
331/* driver private data structure */
332struct i5400_pvt {
333 struct pci_dev *system_address; /* 16.0 */
334 struct pci_dev *branchmap_werrors; /* 16.1 */
335 struct pci_dev *fsb_error_regs; /* 16.2 */
336 struct pci_dev *branch_0; /* 21.0 */
337 struct pci_dev *branch_1; /* 22.0 */
338
339 u16 tolm; /* top of low memory */
340 u64 ambase; /* AMB BAR */
341
342 u16 mir0, mir1;
343
344 u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
345 u16 b0_ambpresent0; /* Branch 0, Channel 0 */
346 u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
347
348 u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
349 u16 b1_ambpresent0; /* Branch 1, Channel 8 */
350 u16 b1_ambpresent1; /* Branch 1, Channel 1 */
351
352 /* DIMM information matrix, allocating architecture maximums */
353 struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
354
355 /* Actual values for this controller */
356 int maxch; /* Max channels */
357 int maxdimmperch; /* Max DIMMs per channel */
358};
359
360/* I5400 MCH error information retrieved from Hardware */
361struct i5400_error_info {
362 /* These registers are always read from the MC */
363 u32 ferr_fat_fbd; /* First Errors Fatal */
364 u32 nerr_fat_fbd; /* Next Errors Fatal */
365 u32 ferr_nf_fbd; /* First Errors Non-Fatal */
366 u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
367
368 /* These registers are input ONLY if there was a Recoverable Error */
369 u32 redmemb; /* Recoverable Mem Data Error log B */
370 u16 recmema; /* Recoverable Mem Error log A */
371 u32 recmemb; /* Recoverable Mem Error log B */
372
373 /* These registers are input ONLY if there was a Non-Recoverable Error */
374 u16 nrecmema; /* Non-Recoverable Mem log A */
375 u16 nrecmemb; /* Non-Recoverable Mem log B */
376
377};
378
379/* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and
380 5400 better to use an inline function than a macro in this case */
381static inline int nrec_bank(struct i5400_error_info *info)
382{
383 return ((info->nrecmema) >> 12) & 0x7;
384}
385static inline int nrec_rank(struct i5400_error_info *info)
386{
387 return ((info->nrecmema) >> 8) & 0xf;
388}
389static inline int nrec_buf_id(struct i5400_error_info *info)
390{
391 return ((info->nrecmema)) & 0xff;
392}
393static inline int nrec_rdwr(struct i5400_error_info *info)
394{
395 return (info->nrecmemb) >> 31;
396}
397/* This applies to both NREC and REC string so it can be used with nrec_rdwr
398 and rec_rdwr */
399static inline const char *rdwr_str(int rdwr)
400{
401 return rdwr ? "Write" : "Read";
402}
403static inline int nrec_cas(struct i5400_error_info *info)
404{
405 return ((info->nrecmemb) >> 16) & 0x1fff;
406}
407static inline int nrec_ras(struct i5400_error_info *info)
408{
409 return (info->nrecmemb) & 0xffff;
410}
411static inline int rec_bank(struct i5400_error_info *info)
412{
413 return ((info->recmema) >> 12) & 0x7;
414}
415static inline int rec_rank(struct i5400_error_info *info)
416{
417 return ((info->recmema) >> 8) & 0xf;
418}
419static inline int rec_rdwr(struct i5400_error_info *info)
420{
421 return (info->recmemb) >> 31;
422}
423static inline int rec_cas(struct i5400_error_info *info)
424{
425 return ((info->recmemb) >> 16) & 0x1fff;
426}
427static inline int rec_ras(struct i5400_error_info *info)
428{
429 return (info->recmemb) & 0xffff;
430}
431
432static struct edac_pci_ctl_info *i5400_pci;
433
434/*
435 * i5400_get_error_info Retrieve the hardware error information from
436 * the hardware and cache it in the 'info'
437 * structure
438 */
439static void i5400_get_error_info(struct mem_ctl_info *mci,
440 struct i5400_error_info *info)
441{
442 struct i5400_pvt *pvt;
443 u32 value;
444
445 pvt = mci->pvt_info;
446
447 /* read in the 1st FATAL error register */
448 pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
449
450 /* Mask only the bits that the doc says are valid
451 */
452 value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
453
454 /* If there is an error, then read in the
455 NEXT FATAL error register and the Memory Error Log Register A
456 */
457 if (value & FERR_FAT_MASK) {
458 info->ferr_fat_fbd = value;
459
460 /* harvest the various error data we need */
461 pci_read_config_dword(pvt->branchmap_werrors,
462 NERR_FAT_FBD, &info->nerr_fat_fbd);
463 pci_read_config_word(pvt->branchmap_werrors,
464 NRECMEMA, &info->nrecmema);
465 pci_read_config_word(pvt->branchmap_werrors,
466 NRECMEMB, &info->nrecmemb);
467
468 /* Clear the error bits, by writing them back */
469 pci_write_config_dword(pvt->branchmap_werrors,
470 FERR_FAT_FBD, value);
471 } else {
472 info->ferr_fat_fbd = 0;
473 info->nerr_fat_fbd = 0;
474 info->nrecmema = 0;
475 info->nrecmemb = 0;
476 }
477
478 /* read in the 1st NON-FATAL error register */
479 pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
480
481 /* If there is an error, then read in the 1st NON-FATAL error
482 * register as well */
483 if (value & FERR_NF_MASK) {
484 info->ferr_nf_fbd = value;
485
486 /* harvest the various error data we need */
487 pci_read_config_dword(pvt->branchmap_werrors,
488 NERR_NF_FBD, &info->nerr_nf_fbd);
489 pci_read_config_word(pvt->branchmap_werrors,
490 RECMEMA, &info->recmema);
491 pci_read_config_dword(pvt->branchmap_werrors,
492 RECMEMB, &info->recmemb);
493 pci_read_config_dword(pvt->branchmap_werrors,
494 REDMEMB, &info->redmemb);
495
496 /* Clear the error bits, by writing them back */
497 pci_write_config_dword(pvt->branchmap_werrors,
498 FERR_NF_FBD, value);
499 } else {
500 info->ferr_nf_fbd = 0;
501 info->nerr_nf_fbd = 0;
502 info->recmema = 0;
503 info->recmemb = 0;
504 info->redmemb = 0;
505 }
506}
507
508/*
509 * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
510 * struct i5400_error_info *info,
511 * int handle_errors);
512 *
513 * handle the Intel FATAL and unrecoverable errors, if any
514 */
515static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
516 struct i5400_error_info *info,
517 unsigned long allErrors)
518{
519 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
520 int branch;
521 int channel;
522 int bank;
523 int buf_id;
524 int rank;
525 int rdwr;
526 int ras, cas;
527 int errnum;
528 char *type = NULL;
529
530 if (!allErrors)
531 return; /* if no error, return now */
532
533 if (allErrors & ERROR_FAT_MASK)
534 type = "FATAL";
535 else if (allErrors & FERR_NF_UNCORRECTABLE)
536 type = "NON-FATAL uncorrected";
537 else
538 type = "NON-FATAL recoverable";
539
540 /* ONLY ONE of the possible error bits will be set, as per the docs */
541
542 branch = extract_fbdchan_indx(info->ferr_fat_fbd);
543 channel = branch;
544
545 /* Use the NON-Recoverable macros to extract data */
546 bank = nrec_bank(info);
547 rank = nrec_rank(info);
548 buf_id = nrec_buf_id(info);
549 rdwr = nrec_rdwr(info);
550 ras = nrec_ras(info);
551 cas = nrec_cas(info);
552
553 debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
554 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
555 rank, channel, channel + 1, branch >> 1, bank,
556 buf_id, rdwr_str(rdwr), ras, cas);
557
558 /* Only 1 bit will be on */
559 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
560
561 /* Form out message */
562 snprintf(msg, sizeof(msg),
563 "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s RAS=%d CAS=%d "
564 "%s Err=0x%lx (%s))",
565 type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas, type,
566 allErrors, error_name[errnum]);
567
568 /* Call the helper to output message */
569 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
570}
571
572/*
573 * i5400_process_fatal_error_info(struct mem_ctl_info *mci,
574 * struct i5400_error_info *info,
575 * int handle_errors);
576 *
577 * handle the Intel NON-FATAL errors, if any
578 */
579static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
580 struct i5400_error_info *info)
581{
582 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
583 unsigned long allErrors;
584 int branch;
585 int channel;
586 int bank;
587 int rank;
588 int rdwr;
589 int ras, cas;
590 int errnum;
591
592 /* mask off the Error bits that are possible */
593 allErrors = FROM_NF_FERR(info->ferr_nf_fbd & FERR_NF_MASK);
594 if (!allErrors)
595 return; /* if no error, return now */
596
597 /* ONLY ONE of the possible error bits will be set, as per the docs */
598
599 if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
600 i5400_proccess_non_recoverable_info(mci, info, allErrors);
601 return;
602 }
603
604 /* Correctable errors */
605 if (allErrors & ERROR_NF_CORRECTABLE) {
606 debugf0("\tCorrected bits= 0x%lx\n", allErrors);
607
608 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
609
610 channel = 0;
611 if (REC_ECC_LOCATOR_ODD(info->redmemb))
612 channel = 1;
613
614 /* Convert channel to be based from zero, instead of
615 * from branch base of 0 */
616 channel += branch;
617
618 bank = rec_bank(info);
619 rank = rec_rank(info);
620 rdwr = rec_rdwr(info);
621 ras = rec_ras(info);
622 cas = rec_cas(info);
623
624 /* Only 1 bit will be on */
625 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
626
627 debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
628 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
629 rank, channel, branch >> 1, bank,
630 rdwr_str(rdwr), ras, cas);
631
632 /* Form out message */
633 snprintf(msg, sizeof(msg),
634 "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
635 "CAS=%d, CE Err=0x%lx (%s))", branch >> 1, bank,
636 rdwr_str(rdwr), ras, cas, allErrors,
637 error_name[errnum]);
638
639 /* Call the helper to output message */
640 edac_mc_handle_fbd_ce(mci, rank, channel, msg);
641
642 return;
643 }
644
645 /* Miscelaneous errors */
646 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
647
648 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
649
650 i5400_mc_printk(mci, KERN_EMERG,
651 "Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
652 branch >> 1, allErrors, error_name[errnum]);
653}
654
655/*
656 * i5400_process_error_info Process the error info that is
657 * in the 'info' structure, previously retrieved from hardware
658 */
659static void i5400_process_error_info(struct mem_ctl_info *mci,
660 struct i5400_error_info *info)
661{ u32 allErrors;
662
663 /* First handle any fatal errors that occurred */
664 allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
665 i5400_proccess_non_recoverable_info(mci, info, allErrors);
666
667 /* now handle any non-fatal errors that occurred */
668 i5400_process_nonfatal_error_info(mci, info);
669}
670
671/*
672 * i5400_clear_error Retrieve any error from the hardware
673 * but do NOT process that error.
674 * Used for 'clearing' out of previous errors
675 * Called by the Core module.
676 */
677static void i5400_clear_error(struct mem_ctl_info *mci)
678{
679 struct i5400_error_info info;
680
681 i5400_get_error_info(mci, &info);
682}
683
684/*
685 * i5400_check_error Retrieve and process errors reported by the
686 * hardware. Called by the Core module.
687 */
688static void i5400_check_error(struct mem_ctl_info *mci)
689{
690 struct i5400_error_info info;
691 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
692 i5400_get_error_info(mci, &info);
693 i5400_process_error_info(mci, &info);
694}
695
696/*
697 * i5400_put_devices 'put' all the devices that we have
698 * reserved via 'get'
699 */
700static void i5400_put_devices(struct mem_ctl_info *mci)
701{
702 struct i5400_pvt *pvt;
703
704 pvt = mci->pvt_info;
705
706 /* Decrement usage count for devices */
707 if (pvt->branch_1)
708 pci_dev_put(pvt->branch_1);
709
710 if (pvt->branch_0)
711 pci_dev_put(pvt->branch_0);
712
713 if (pvt->fsb_error_regs)
714 pci_dev_put(pvt->fsb_error_regs);
715
716 if (pvt->branchmap_werrors)
717 pci_dev_put(pvt->branchmap_werrors);
718}
719
720/*
721 * i5400_get_devices Find and perform 'get' operation on the MCH's
722 * device/functions we want to reference for this driver
723 *
724 * Need to 'get' device 16 func 1 and func 2
725 */
726static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
727{
728 struct i5400_pvt *pvt;
729 struct pci_dev *pdev;
730
731 pvt = mci->pvt_info;
732 pvt->branchmap_werrors = NULL;
733 pvt->fsb_error_regs = NULL;
734 pvt->branch_0 = NULL;
735 pvt->branch_1 = NULL;
736
737 /* Attempt to 'get' the MCH register we want */
738 pdev = NULL;
739 while (!pvt->branchmap_werrors || !pvt->fsb_error_regs) {
740 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
741 PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
742 if (!pdev) {
743 /* End of list, leave */
744 i5400_printk(KERN_ERR,
745 "'system address,Process Bus' "
746 "device not found:"
747 "vendor 0x%x device 0x%x ERR funcs "
748 "(broken BIOS?)\n",
749 PCI_VENDOR_ID_INTEL,
750 PCI_DEVICE_ID_INTEL_5400_ERR);
751 goto error;
752 }
753
754 /* Store device 16 funcs 1 and 2 */
755 switch (PCI_FUNC(pdev->devfn)) {
756 case 1:
757 pvt->branchmap_werrors = pdev;
758 break;
759 case 2:
760 pvt->fsb_error_regs = pdev;
761 break;
762 }
763 }
764
765 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
766 pci_name(pvt->system_address),
767 pvt->system_address->vendor, pvt->system_address->device);
768 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
769 pci_name(pvt->branchmap_werrors),
770 pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
771 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
772 pci_name(pvt->fsb_error_regs),
773 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
774
775 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
776 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
777 if (!pvt->branch_0) {
778 i5400_printk(KERN_ERR,
779 "MC: 'BRANCH 0' device not found:"
780 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
781 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
782 goto error;
783 }
784
785 /* If this device claims to have more than 2 channels then
786 * fetch Branch 1's information
787 */
788 if (pvt->maxch < CHANNELS_PER_BRANCH)
789 return 0;
790
791 pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
792 PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
793 if (!pvt->branch_1) {
794 i5400_printk(KERN_ERR,
795 "MC: 'BRANCH 1' device not found:"
796 "vendor 0x%x device 0x%x Func 0 "
797 "(broken BIOS?)\n",
798 PCI_VENDOR_ID_INTEL,
799 PCI_DEVICE_ID_INTEL_5400_FBD1);
800 goto error;
801 }
802
803 return 0;
804
805error:
806 i5400_put_devices(mci);
807 return -ENODEV;
808}
809
810/*
811 * determine_amb_present
812 *
813 * the information is contained in NUM_MTRS_PER_BRANCH different registers
814 * determining which of the NUM_MTRS_PER_BRANCH requires knowing
815 * which channel is in question
816 *
817 * 2 branches, each with 2 channels
818 * b0_ambpresent0 for channel '0'
819 * b0_ambpresent1 for channel '1'
820 * b1_ambpresent0 for channel '2'
821 * b1_ambpresent1 for channel '3'
822 */
823static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
824{
825 int amb_present;
826
827 if (channel < CHANNELS_PER_BRANCH) {
828 if (channel & 0x1)
829 amb_present = pvt->b0_ambpresent1;
830 else
831 amb_present = pvt->b0_ambpresent0;
832 } else {
833 if (channel & 0x1)
834 amb_present = pvt->b1_ambpresent1;
835 else
836 amb_present = pvt->b1_ambpresent0;
837 }
838
839 return amb_present;
840}
841
842/*
843 * determine_mtr(pvt, csrow, channel)
844 *
845 * return the proper MTR register as determine by the csrow and channel desired
846 */
847static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
848{
849 int mtr;
850 int n;
851
852 /* There is one MTR for each slot pair of FB-DIMMs,
853 Each slot may have one or two ranks (2 csrows),
854 Each slot pair may be at branch 0 or branch 1.
855 So, csrow should be divided by eight
856 */
857 n = csrow >> 3;
858
859 if (n >= NUM_MTRS_PER_BRANCH) {
860 debugf0("ERROR: trying to access an invalid csrow: %d\n", csrow);
861 return 0;
862 }
863
864 if (channel < CHANNELS_PER_BRANCH)
865 mtr = pvt->b0_mtr[n];
866 else
867 mtr = pvt->b1_mtr[n];
868
869 return mtr;
870}
871
872/*
873 */
874static void decode_mtr(int slot_row, u16 mtr)
875{
876 int ans;
877
878 ans = MTR_DIMMS_PRESENT(mtr);
879
880 debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
881 ans ? "Present" : "NOT Present");
882 if (!ans)
883 return;
884
885 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
886
887 debugf2("\t\tELECTRICAL THROTTLING is %s\n",
888 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled": "disabled");
889
890 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
891 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
892 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
893 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
894}
895
896static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
897 struct i5400_dimm_info *dinfo)
898{
899 int mtr;
900 int amb_present_reg;
901 int addrBits;
902
903 mtr = determine_mtr(pvt, csrow, channel);
904 if (MTR_DIMMS_PRESENT(mtr)) {
905 amb_present_reg = determine_amb_present_reg(pvt, channel);
906
907 /* Determine if there is a DIMM present in this DIMM slot */
908 if (amb_present_reg & (1 << (csrow >> 1))) {
909 dinfo->dual_rank = MTR_DIMM_RANK(mtr);
910
911 if (!((dinfo->dual_rank == 0) &&
912 ((csrow & 0x1) == 0x1))) {
913 /* Start with the number of bits for a Bank
914 * on the DRAM */
915 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
916 /* Add thenumber of ROW bits */
917 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
918 /* add the number of COLUMN bits */
919 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
920
921 addrBits += 6; /* add 64 bits per DIMM */
922 addrBits -= 20; /* divide by 2^^20 */
923 addrBits -= 3; /* 8 bits per bytes */
924
925 dinfo->megabytes = 1 << addrBits;
926 }
927 }
928 }
929}
930
931/*
932 * calculate_dimm_size
933 *
934 * also will output a DIMM matrix map, if debug is enabled, for viewing
935 * how the DIMMs are populated
936 */
937static void calculate_dimm_size(struct i5400_pvt *pvt)
938{
939 struct i5400_dimm_info *dinfo;
940 int csrow, max_csrows;
941 char *p, *mem_buffer;
942 int space, n;
943 int channel;
944
945 /* ================= Generate some debug output ================= */
946 space = PAGE_SIZE;
947 mem_buffer = p = kmalloc(space, GFP_KERNEL);
948 if (p == NULL) {
949 i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
950 __FILE__, __func__);
951 return;
952 }
953
954 /* Scan all the actual CSROWS (which is # of DIMMS * 2)
955 * and calculate the information for each DIMM
956 * Start with the highest csrow first, to display it first
957 * and work toward the 0th csrow
958 */
959 max_csrows = pvt->maxdimmperch * 2;
960 for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
961
962 /* on an odd csrow, first output a 'boundary' marker,
963 * then reset the message buffer */
964 if (csrow & 0x1) {
965 n = snprintf(p, space, "---------------------------"
966 "--------------------------------");
967 p += n;
968 space -= n;
969 debugf2("%s\n", mem_buffer);
970 p = mem_buffer;
971 space = PAGE_SIZE;
972 }
973 n = snprintf(p, space, "csrow %2d ", csrow);
974 p += n;
975 space -= n;
976
977 for (channel = 0; channel < pvt->maxch; channel++) {
978 dinfo = &pvt->dimm_info[csrow][channel];
979 handle_channel(pvt, csrow, channel, dinfo);
980 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
981 p += n;
982 space -= n;
983 }
984 debugf2("%s\n", mem_buffer);
985 p = mem_buffer;
986 space = PAGE_SIZE;
987 }
988
989 /* Output the last bottom 'boundary' marker */
990 n = snprintf(p, space, "---------------------------"
991 "--------------------------------");
992 p += n;
993 space -= n;
994 debugf2("%s\n", mem_buffer);
995 p = mem_buffer;
996 space = PAGE_SIZE;
997
998 /* now output the 'channel' labels */
999 n = snprintf(p, space, " ");
1000 p += n;
1001 space -= n;
1002 for (channel = 0; channel < pvt->maxch; channel++) {
1003 n = snprintf(p, space, "channel %d | ", channel);
1004 p += n;
1005 space -= n;
1006 }
1007
1008 /* output the last message and free buffer */
1009 debugf2("%s\n", mem_buffer);
1010 kfree(mem_buffer);
1011}
1012
1013/*
1014 * i5400_get_mc_regs read in the necessary registers and
1015 * cache locally
1016 *
1017 * Fills in the private data members
1018 */
1019static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1020{
1021 struct i5400_pvt *pvt;
1022 u32 actual_tolm;
1023 u16 limit;
1024 int slot_row;
1025 int maxch;
1026 int maxdimmperch;
1027 int way0, way1;
1028
1029 pvt = mci->pvt_info;
1030
1031 pci_read_config_dword(pvt->system_address, AMBASE,
1032 (u32 *) &pvt->ambase);
1033 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
1034 ((u32 *) &pvt->ambase) + sizeof(u32));
1035
1036 maxdimmperch = pvt->maxdimmperch;
1037 maxch = pvt->maxch;
1038
1039 debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
1040 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
1041
1042 /* Get the Branch Map regs */
1043 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
1044 pvt->tolm >>= 12;
1045 debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
1046 pvt->tolm);
1047
1048 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
1049 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
1050 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
1051
1052 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
1053 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
1054
1055 /* Get the MIR[0-1] regs */
1056 limit = (pvt->mir0 >> 4) & 0x0fff;
1057 way0 = pvt->mir0 & 0x1;
1058 way1 = pvt->mir0 & 0x2;
1059 debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
1060 limit = (pvt->mir1 >> 4) & 0xfff;
1061 way0 = pvt->mir1 & 0x1;
1062 way1 = pvt->mir1 & 0x2;
1063 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
1064
1065 /* Get the set of MTR[0-3] regs by each branch */
1066 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
1067 int where = MTR0 + (slot_row * sizeof(u32));
1068
1069 /* Branch 0 set of MTR registers */
1070 pci_read_config_word(pvt->branch_0, where,
1071 &pvt->b0_mtr[slot_row]);
1072
1073 debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
1074 pvt->b0_mtr[slot_row]);
1075
1076 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1077 pvt->b1_mtr[slot_row] = 0;
1078 continue;
1079 }
1080
1081 /* Branch 1 set of MTR registers */
1082 pci_read_config_word(pvt->branch_1, where,
1083 &pvt->b1_mtr[slot_row]);
1084 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
1085 pvt->b1_mtr[slot_row]);
1086 }
1087
1088 /* Read and dump branch 0's MTRs */
1089 debugf2("\nMemory Technology Registers:\n");
1090 debugf2(" Branch 0:\n");
1091 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
1092 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1093
1094 pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
1095 &pvt->b0_ambpresent0);
1096 debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
1097 pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
1098 &pvt->b0_ambpresent1);
1099 debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
1100
1101 /* Only if we have 2 branchs (4 channels) */
1102 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1103 pvt->b1_ambpresent0 = 0;
1104 pvt->b1_ambpresent1 = 0;
1105 } else {
1106 /* Read and dump branch 1's MTRs */
1107 debugf2(" Branch 1:\n");
1108 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
1109 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1110
1111 pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
1112 &pvt->b1_ambpresent0);
1113 debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
1114 pvt->b1_ambpresent0);
1115 pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
1116 &pvt->b1_ambpresent1);
1117 debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
1118 pvt->b1_ambpresent1);
1119 }
1120
1121 /* Go and determine the size of each DIMM and place in an
1122 * orderly matrix */
1123 calculate_dimm_size(pvt);
1124}
1125
1126/*
1127 * i5400_init_csrows Initialize the 'csrows' table within
1128 * the mci control structure with the
1129 * addressing of memory.
1130 *
1131 * return:
1132 * 0 success
1133 * 1 no actual memory found on this MC
1134 */
1135static int i5400_init_csrows(struct mem_ctl_info *mci)
1136{
1137 struct i5400_pvt *pvt;
1138 struct csrow_info *p_csrow;
1139 int empty, channel_count;
1140 int max_csrows;
1141 int mtr;
1142 int csrow_megs;
1143 int channel;
1144 int csrow;
1145
1146 pvt = mci->pvt_info;
1147
1148 channel_count = pvt->maxch;
1149 max_csrows = pvt->maxdimmperch * 2;
1150
1151 empty = 1; /* Assume NO memory */
1152
1153 for (csrow = 0; csrow < max_csrows; csrow++) {
1154 p_csrow = &mci->csrows[csrow];
1155
1156 p_csrow->csrow_idx = csrow;
1157
1158 /* use branch 0 for the basis */
1159 mtr = determine_mtr(pvt, csrow, 0);
1160
1161 /* if no DIMMS on this row, continue */
1162 if (!MTR_DIMMS_PRESENT(mtr))
1163 continue;
1164
1165 /* FAKE OUT VALUES, FIXME */
1166 p_csrow->first_page = 0 + csrow * 20;
1167 p_csrow->last_page = 9 + csrow * 20;
1168 p_csrow->page_mask = 0xFFF;
1169
1170 p_csrow->grain = 8;
1171
1172 csrow_megs = 0;
1173 for (channel = 0; channel < pvt->maxch; channel++)
1174 csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
1175
1176 p_csrow->nr_pages = csrow_megs << 8;
1177
1178 /* Assume DDR2 for now */
1179 p_csrow->mtype = MEM_FB_DDR2;
1180
1181 /* ask what device type on this row */
1182 if (MTR_DRAM_WIDTH(mtr))
1183 p_csrow->dtype = DEV_X8;
1184 else
1185 p_csrow->dtype = DEV_X4;
1186
1187 p_csrow->edac_mode = EDAC_S8ECD8ED;
1188
1189 empty = 0;
1190 }
1191
1192 return empty;
1193}
1194
1195/*
1196 * i5400_enable_error_reporting
1197 * Turn on the memory reporting features of the hardware
1198 */
1199static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
1200{
1201 struct i5400_pvt *pvt;
1202 u32 fbd_error_mask;
1203
1204 pvt = mci->pvt_info;
1205
1206 /* Read the FBD Error Mask Register */
1207 pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
1208 &fbd_error_mask);
1209
1210 /* Enable with a '0' */
1211 fbd_error_mask &= ~(ENABLE_EMASK_ALL);
1212
1213 pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
1214 fbd_error_mask);
1215}
1216
1217/*
1218 * i5400_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
1219 *
1220 * ask the device how many channels are present and how many CSROWS
1221 * as well
1222 */
1223static void i5400_get_dimm_and_channel_counts(struct pci_dev *pdev,
1224 int *num_dimms_per_channel,
1225 int *num_channels)
1226{
1227 u8 value;
1228
1229 /* Need to retrieve just how many channels and dimms per channel are
1230 * supported on this memory controller
1231 */
1232 pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
1233 *num_dimms_per_channel = (int)value * 2;
1234
1235 pci_read_config_byte(pdev, MAXCH, &value);
1236 *num_channels = (int)value;
1237}
1238
1239/*
1240 * i5400_probe1 Probe for ONE instance of device to see if it is
1241 * present.
1242 * return:
1243 * 0 for FOUND a device
1244 * < 0 for error code
1245 */
1246static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1247{
1248 struct mem_ctl_info *mci;
1249 struct i5400_pvt *pvt;
1250 int num_channels;
1251 int num_dimms_per_channel;
1252 int num_csrows;
1253
1254 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1255 __func__,
1256 pdev->bus->number,
1257 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1258
1259 /* We only are looking for func 0 of the set */
1260 if (PCI_FUNC(pdev->devfn) != 0)
1261 return -ENODEV;
1262
1263 /* Ask the devices for the number of CSROWS and CHANNELS so
1264 * that we can calculate the memory resources, etc
1265 *
1266 * The Chipset will report what it can handle which will be greater
1267 * or equal to what the motherboard manufacturer will implement.
1268 *
1269 * As we don't have a motherboard identification routine to determine
1270 * actual number of slots/dimms per channel, we thus utilize the
1271 * resource as specified by the chipset. Thus, we might have
1272 * have more DIMMs per channel than actually on the mobo, but this
1273 * allows the driver to support upto the chipset max, without
1274 * some fancy mobo determination.
1275 */
1276 i5400_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
1277 &num_channels);
1278 num_csrows = num_dimms_per_channel * 2;
1279
1280 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
1281 __func__, num_channels, num_dimms_per_channel, num_csrows);
1282
1283 /* allocate a new MC control structure */
1284 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
1285
1286 if (mci == NULL)
1287 return -ENOMEM;
1288
1289 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
1290
1291 mci->dev = &pdev->dev; /* record ptr to the generic device */
1292
1293 pvt = mci->pvt_info;
1294 pvt->system_address = pdev; /* Record this device in our private */
1295 pvt->maxch = num_channels;
1296 pvt->maxdimmperch = num_dimms_per_channel;
1297
1298 /* 'get' the pci devices we want to reserve for our use */
1299 if (i5400_get_devices(mci, dev_idx))
1300 goto fail0;
1301
1302 /* Time to get serious */
1303 i5400_get_mc_regs(mci); /* retrieve the hardware registers */
1304
1305 mci->mc_idx = 0;
1306 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1307 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1308 mci->edac_cap = EDAC_FLAG_NONE;
1309 mci->mod_name = "i5400_edac.c";
1310 mci->mod_ver = I5400_REVISION;
1311 mci->ctl_name = i5400_devs[dev_idx].ctl_name;
1312 mci->dev_name = pci_name(pdev);
1313 mci->ctl_page_to_phys = NULL;
1314
1315 /* Set the function pointer to an actual operation function */
1316 mci->edac_check = i5400_check_error;
1317
1318 /* initialize the MC control structure 'csrows' table
1319 * with the mapping and control information */
1320 if (i5400_init_csrows(mci)) {
1321 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
1322 " because i5400_init_csrows() returned nonzero "
1323 "value\n");
1324 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1325 } else {
1326 debugf1("MC: Enable error reporting now\n");
1327 i5400_enable_error_reporting(mci);
1328 }
1329
1330 /* add this new MC control structure to EDAC's list of MCs */
1331 if (edac_mc_add_mc(mci)) {
1332 debugf0("MC: " __FILE__
1333 ": %s(): failed edac_mc_add_mc()\n", __func__);
1334 /* FIXME: perhaps some code should go here that disables error
1335 * reporting if we just enabled it
1336 */
1337 goto fail1;
1338 }
1339
1340 i5400_clear_error(mci);
1341
1342 /* allocating generic PCI control info */
1343 i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1344 if (!i5400_pci) {
1345 printk(KERN_WARNING
1346 "%s(): Unable to create PCI control\n",
1347 __func__);
1348 printk(KERN_WARNING
1349 "%s(): PCI error report via EDAC not setup\n",
1350 __func__);
1351 }
1352
1353 return 0;
1354
1355 /* Error exit unwinding stack */
1356fail1:
1357
1358 i5400_put_devices(mci);
1359
1360fail0:
1361 edac_mc_free(mci);
1362 return -ENODEV;
1363}
1364
1365/*
1366 * i5400_init_one constructor for one instance of device
1367 *
1368 * returns:
1369 * negative on error
1370 * count (>= 0)
1371 */
1372static int __devinit i5400_init_one(struct pci_dev *pdev,
1373 const struct pci_device_id *id)
1374{
1375 int rc;
1376
1377 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1378
1379 /* wake up device */
1380 rc = pci_enable_device(pdev);
1381 if (rc == -EIO)
1382 return rc;
1383
1384 /* now probe and enable the device */
1385 return i5400_probe1(pdev, id->driver_data);
1386}
1387
1388/*
1389 * i5400_remove_one destructor for one instance of device
1390 *
1391 */
1392static void __devexit i5400_remove_one(struct pci_dev *pdev)
1393{
1394 struct mem_ctl_info *mci;
1395
1396 debugf0(__FILE__ ": %s()\n", __func__);
1397
1398 if (i5400_pci)
1399 edac_pci_release_generic_ctl(i5400_pci);
1400
1401 mci = edac_mc_del_mc(&pdev->dev);
1402 if (!mci)
1403 return;
1404
1405 /* retrieve references to resources, and free those resources */
1406 i5400_put_devices(mci);
1407
1408 edac_mc_free(mci);
1409}
1410
1411/*
1412 * pci_device_id table for which devices we are looking for
1413 *
1414 * The "E500P" device is the first device supported.
1415 */
1416static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
1417 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
1418 {0,} /* 0 terminated list. */
1419};
1420
1421MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
1422
1423/*
1424 * i5400_driver pci_driver structure for this module
1425 *
1426 */
1427static struct pci_driver i5400_driver = {
1428 .name = KBUILD_BASENAME,
1429 .probe = i5400_init_one,
1430 .remove = __devexit_p(i5400_remove_one),
1431 .id_table = i5400_pci_tbl,
1432};
1433
1434/*
1435 * i5400_init Module entry function
1436 * Try to initialize this module for its devices
1437 */
1438static int __init i5400_init(void)
1439{
1440 int pci_rc;
1441
1442 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1443
1444 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1445 opstate_init();
1446
1447 pci_rc = pci_register_driver(&i5400_driver);
1448
1449 return (pci_rc < 0) ? pci_rc : 0;
1450}
1451
1452/*
1453 * i5400_exit() Module exit function
1454 * Unregister the driver
1455 */
1456static void __exit i5400_exit(void)
1457{
1458 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1459 pci_unregister_driver(&i5400_driver);
1460}
1461
1462module_init(i5400_init);
1463module_exit(i5400_exit);
1464
1465MODULE_LICENSE("GPL");
1466MODULE_AUTHOR("Ben Woodard <woodard@redhat.com> Red Hat Inc. (http://www.redhat.com)");
1467MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com> Red Hat Inc. (http://www.redhat.com)");
1468MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - " I5400_REVISION);
1469
1470module_param(edac_op_state, int, 0444);
1471MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");