aboutsummaryrefslogblamecommitdiffstats
path: root/drivers/edac/i5400_edac.c
blob: 74d6ec342afbf2e0f34c9d8fe808f3c8b0f6d9c5 (plain) (tree)
1
2
  
                                                              
































                                                                      
                                       











                                                               
                                                   
                                 

                                                       





                                                          



                                                                  
   















































































































                                                                                                                    
                                   




                                                                    
                                     


                                                                        
                                      






                                                                            
                                      

































































                                                                        









                                                                  
 






                                                                  





                                                           

                                                                    















                                                                            
                                        






                           
                                        






















                                                                   














                                                                       
                                                                          


                                                                         
                                                                          























                                                                             
                                                                         




























































































































































































                                                                              



                                                                           

























                                                                       
                                                                   







































                                                                              



                                                                           






                                                               
                                  












































                                                                           
                                                                     














                                                                  



                                            






























































































                                                                                


                                                                               




























                                                                        
                                                                               






                                                                       
                                                         
           
                  

                                       

                                                                         


























                                                               
                                                                   


















                                                                               















                                                                   


























                                                                            
                                     



                                                                  
                                       










































































































                                                                                
                                                            
















































































                                                                              
                                       



































































                                                                               













                                                                        


                                              

                                                               






                                                              
                                                                             


                                                                          
                                                                      

                                         


                                                      









                                                                              
                                                                     









































                                                                               

                                                                  










































                                                                              
                                                      


                                     
               













                                                                     
                                                  






























                                                                        
                             












                                                                     
                                                      














                                                                      
                                                      






                                             




                                                                    


                                                                            
/*
 * Intel 5400 class Memory Controllers kernel module (Seaburg)
 *
 * This file may be distributed under the terms of the
 * GNU General Public License.
 *
 * Copyright (c) 2008 by:
 *	 Ben Woodard <woodard@redhat.com>
 *	 Mauro Carvalho Chehab <mchehab@redhat.com>
 *
 * Red Hat Inc. http://www.redhat.com
 *
 * Forked and adapted from the i5000_edac driver which was
 * written by Douglas Thompson Linux Networx <norsk5@xmission.com>
 *
 * This module is based on the following document:
 *
 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
 * 	http://developer.intel.com/design/chipsets/datashts/313070.htm
 *
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>

#include "edac_core.h"

/*
 * Alter this version for the I5400 module when modifications are made
 */
#define I5400_REVISION    " Ver: 1.0.0"

#define EDAC_MOD_STR      "i5400_edac"

#define i5400_printk(level, fmt, arg...) \
	edac_printk(level, "i5400", fmt, ##arg)

#define i5400_mc_printk(mci, level, fmt, arg...) \
	edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)

/* Limits for i5400 */
#define NUM_MTRS_PER_BRANCH	4
#define CHANNELS_PER_BRANCH	2
#define MAX_DIMMS_PER_CHANNEL	NUM_MTRS_PER_BRANCH
#define	MAX_CHANNELS		4
/* max possible csrows per channel */
#define MAX_CSROWS		(MAX_DIMMS_PER_CHANNEL)

/* Device 16,
 * Function 0: System Address
 * Function 1: Memory Branch Map, Control, Errors Register
 * Function 2: FSB Error Registers
 *
 * All 3 functions of Device 16 (0,1,2) share the SAME DID and
 * uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2),
 * PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1
 * for device 21 (0,1).
 */

	/* OFFSETS for Function 0 */
#define		AMBASE			0x48 /* AMB Mem Mapped Reg Region Base */
#define		MAXCH			0x56 /* Max Channel Number */
#define		MAXDIMMPERCH		0x57 /* Max DIMM PER Channel Number */

	/* OFFSETS for Function 1 */
#define		TOLM			0x6C
#define		REDMEMB			0x7C
#define			REC_ECC_LOCATOR_ODD(x)	((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0]  indicate EVEN */
#define		MIR0			0x80
#define		MIR1			0x84
#define		AMIR0			0x8c
#define		AMIR1			0x90

	/* Fatal error registers */
#define		FERR_FAT_FBD		0x98	/* also called as FERR_FAT_FB_DIMM at datasheet */
#define			FERR_FAT_FBDCHAN (3<<28)	/* channel index where the highest-order error occurred */

#define		NERR_FAT_FBD		0x9c
#define		FERR_NF_FBD		0xa0	/* also called as FERR_NFAT_FB_DIMM at datasheet */

	/* Non-fatal error register */
#define		NERR_NF_FBD		0xa4

	/* Enable error mask */
#define		EMASK_FBD		0xa8

#define		ERR0_FBD		0xac
#define		ERR1_FBD		0xb0
#define		ERR2_FBD		0xb4
#define		MCERR_FBD		0xb8

	/* No OFFSETS for Device 16 Function 2 */

/*
 * Device 21,
 * Function 0: Memory Map Branch 0
 *
 * Device 22,
 * Function 0: Memory Map Branch 1
 */

	/* OFFSETS for Function 0 */
#define AMBPRESENT_0	0x64
#define AMBPRESENT_1	0x66
#define MTR0		0x80
#define MTR1		0x82
#define MTR2		0x84
#define MTR3		0x86

	/* OFFSETS for Function 1 */
#define NRECFGLOG		0x74
#define RECFGLOG		0x78
#define NRECMEMA		0xbe
#define NRECMEMB		0xc0
#define NRECFB_DIMMA		0xc4
#define NRECFB_DIMMB		0xc8
#define NRECFB_DIMMC		0xcc
#define NRECFB_DIMMD		0xd0
#define NRECFB_DIMME		0xd4
#define NRECFB_DIMMF		0xd8
#define REDMEMA			0xdC
#define RECMEMA			0xf0
#define RECMEMB			0xf4
#define RECFB_DIMMA		0xf8
#define RECFB_DIMMB		0xec
#define RECFB_DIMMC		0xf0
#define RECFB_DIMMD		0xf4
#define RECFB_DIMME		0xf8
#define RECFB_DIMMF		0xfC

/*
 * Error indicator bits and masks
 * Error masks are according with Table 5-17 of i5400 datasheet
 */

enum error_mask {
	EMASK_M1  = 1<<0,  /* Memory Write error on non-redundant retry */
	EMASK_M2  = 1<<1,  /* Memory or FB-DIMM configuration CRC read error */
	EMASK_M3  = 1<<2,  /* Reserved */
	EMASK_M4  = 1<<3,  /* Uncorrectable Data ECC on Replay */
	EMASK_M5  = 1<<4,  /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */
	EMASK_M6  = 1<<5,  /* Unsupported on i5400 */
	EMASK_M7  = 1<<6,  /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
	EMASK_M8  = 1<<7,  /* Aliased Uncorrectable Patrol Data ECC */
	EMASK_M9  = 1<<8,  /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */
	EMASK_M10 = 1<<9,  /* Unsupported on i5400 */
	EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC  */
	EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */
	EMASK_M13 = 1<<12, /* Memory Write error on first attempt */
	EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */
	EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */
	EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */
	EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */
	EMASK_M18 = 1<<17, /* Unsupported on i5400 */
	EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */
	EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */
	EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */
	EMASK_M22 = 1<<21, /* SPD protocol Error */
	EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */
	EMASK_M24 = 1<<23, /* Refresh error */
	EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */
	EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */
	EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */
	EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */
	EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */
};

/*
 * Names to translate bit error into something useful
 */
static const char *error_name[] = {
	[0]  = "Memory Write error on non-redundant retry",
	[1]  = "Memory or FB-DIMM configuration CRC read error",
	/* Reserved */
	[3]  = "Uncorrectable Data ECC on Replay",
	[4]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
	/* M6 Unsupported on i5400 */
	[6]  = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
	[7]  = "Aliased Uncorrectable Patrol Data ECC",
	[8]  = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
	/* M10 Unsupported on i5400 */
	[10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
	[11] = "Non-Aliased Uncorrectable Patrol Data ECC",
	[12] = "Memory Write error on first attempt",
	[13] = "FB-DIMM Configuration Write error on first attempt",
	[14] = "Memory or FB-DIMM configuration CRC read error",
	[15] = "Channel Failed-Over Occurred",
	[16] = "Correctable Non-Mirrored Demand Data ECC",
	/* M18 Unsupported on i5400 */
	[18] = "Correctable Resilver- or Spare-Copy Data ECC",
	[19] = "Correctable Patrol Data ECC",
	[20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
	[21] = "SPD protocol Error",
	[22] = "Non-Redundant Fast Reset Timeout",
	[23] = "Refresh error",
	[24] = "Memory Write error on redundant retry",
	[25] = "Redundant Fast Reset Timeout",
	[26] = "Correctable Counter Threshold Exceeded",
	[27] = "DIMM-Spare Copy Completed",
	[28] = "DIMM-Isolation Completed",
};

/* Fatal errors */
#define ERROR_FAT_MASK		(EMASK_M1 | \
				 EMASK_M2 | \
				 EMASK_M23)

/* Correctable errors */
#define ERROR_NF_CORRECTABLE	(EMASK_M27 | \
				 EMASK_M20 | \
				 EMASK_M19 | \
				 EMASK_M18 | \
				 EMASK_M17 | \
				 EMASK_M16)
#define ERROR_NF_DIMM_SPARE	(EMASK_M29 | \
				 EMASK_M28)
#define ERROR_NF_SPD_PROTOCOL	(EMASK_M22)
#define ERROR_NF_NORTH_CRC	(EMASK_M21)

/* Recoverable errors */
#define ERROR_NF_RECOVERABLE	(EMASK_M26 | \
				 EMASK_M25 | \
				 EMASK_M24 | \
				 EMASK_M15 | \
				 EMASK_M14 | \
				 EMASK_M13 | \
				 EMASK_M12 | \
				 EMASK_M11 | \
				 EMASK_M9  | \
				 EMASK_M8  | \
				 EMASK_M7  | \
				 EMASK_M5)

/* uncorrectable errors */
#define ERROR_NF_UNCORRECTABLE	(EMASK_M4)

/* mask to all non-fatal errors */
#define ERROR_NF_MASK		(ERROR_NF_CORRECTABLE   | \
				 ERROR_NF_UNCORRECTABLE | \
				 ERROR_NF_RECOVERABLE   | \
				 ERROR_NF_DIMM_SPARE    | \
				 ERROR_NF_SPD_PROTOCOL  | \
				 ERROR_NF_NORTH_CRC)

/*
 * Define error masks for the several registers
 */

/* Enable all fatal and non fatal errors */
#define ENABLE_EMASK_ALL	(ERROR_FAT_MASK | ERROR_NF_MASK)

/* mask for fatal error registers */
#define FERR_FAT_MASK ERROR_FAT_MASK

/* masks for non-fatal error register */
static inline int to_nf_mask(unsigned int mask)
{
	return (mask & EMASK_M29) | (mask >> 3);
};

static inline int from_nf_ferr(unsigned int mask)
{
	return (mask & EMASK_M29) |		/* Bit 28 */
	       (mask & ((1 << 28) - 1) << 3);	/* Bits 0 to 27 */
};

#define FERR_NF_MASK		to_nf_mask(ERROR_NF_MASK)
#define FERR_NF_CORRECTABLE	to_nf_mask(ERROR_NF_CORRECTABLE)
#define FERR_NF_DIMM_SPARE	to_nf_mask(ERROR_NF_DIMM_SPARE)
#define FERR_NF_SPD_PROTOCOL	to_nf_mask(ERROR_NF_SPD_PROTOCOL)
#define FERR_NF_NORTH_CRC	to_nf_mask(ERROR_NF_NORTH_CRC)
#define FERR_NF_RECOVERABLE	to_nf_mask(ERROR_NF_RECOVERABLE)
#define FERR_NF_UNCORRECTABLE	to_nf_mask(ERROR_NF_UNCORRECTABLE)

/* Defines to extract the vaious fields from the
 *	MTRx - Memory Technology Registers
 */
#define MTR_DIMMS_PRESENT(mtr)		((mtr) & (1 << 10))
#define MTR_DIMMS_ETHROTTLE(mtr)	((mtr) & (1 << 9))
#define MTR_DRAM_WIDTH(mtr)		(((mtr) & (1 << 8)) ? 8 : 4)
#define MTR_DRAM_BANKS(mtr)		(((mtr) & (1 << 6)) ? 8 : 4)
#define MTR_DRAM_BANKS_ADDR_BITS(mtr)	((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
#define MTR_DIMM_RANK(mtr)		(((mtr) >> 5) & 0x1)
#define MTR_DIMM_RANK_ADDR_BITS(mtr)	(MTR_DIMM_RANK(mtr) ? 2 : 1)
#define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
#define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
#define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
#define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)

/* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */
static inline int extract_fbdchan_indx(u32 x)
{
	return (x>>28) & 0x3;
}

#ifdef CONFIG_EDAC_DEBUG
/* MTR NUMROW */
static const char *numrow_toString[] = {
	"8,192 - 13 rows",
	"16,384 - 14 rows",
	"32,768 - 15 rows",
	"65,536 - 16 rows"
};

/* MTR NUMCOL */
static const char *numcol_toString[] = {
	"1,024 - 10 columns",
	"2,048 - 11 columns",
	"4,096 - 12 columns",
	"reserved"
};
#endif

/* Device name and register DID (Device ID) */
struct i5400_dev_info {
	const char *ctl_name;	/* name for this device */
	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
};

/* Table of devices attributes supported by this driver */
static const struct i5400_dev_info i5400_devs[] = {
	{
		.ctl_name = "I5400",
		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
	},
};

struct i5400_dimm_info {
	int megabytes;		/* size, 0 means not present  */
};

/* driver private data structure */
struct i5400_pvt {
	struct pci_dev *system_address;		/* 16.0 */
	struct pci_dev *branchmap_werrors;	/* 16.1 */
	struct pci_dev *fsb_error_regs;		/* 16.2 */
	struct pci_dev *branch_0;		/* 21.0 */
	struct pci_dev *branch_1;		/* 22.0 */

	u16 tolm;				/* top of low memory */
	u64 ambase;				/* AMB BAR */

	u16 mir0, mir1;

	u16 b0_mtr[NUM_MTRS_PER_BRANCH];	/* Memory Technlogy Reg */
	u16 b0_ambpresent0;			/* Branch 0, Channel 0 */
	u16 b0_ambpresent1;			/* Brnach 0, Channel 1 */

	u16 b1_mtr[NUM_MTRS_PER_BRANCH];	/* Memory Technlogy Reg */
	u16 b1_ambpresent0;			/* Branch 1, Channel 8 */
	u16 b1_ambpresent1;			/* Branch 1, Channel 1 */

	/* DIMM information matrix, allocating architecture maximums */
	struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];

	/* Actual values for this controller */
	int maxch;				/* Max channels */
	int maxdimmperch;			/* Max DIMMs per channel */
};

/* I5400 MCH error information retrieved from Hardware */
struct i5400_error_info {
	/* These registers are always read from the MC */
	u32 ferr_fat_fbd;	/* First Errors Fatal */
	u32 nerr_fat_fbd;	/* Next Errors Fatal */
	u32 ferr_nf_fbd;	/* First Errors Non-Fatal */
	u32 nerr_nf_fbd;	/* Next Errors Non-Fatal */

	/* These registers are input ONLY if there was a Recoverable Error */
	u32 redmemb;		/* Recoverable Mem Data Error log B */
	u16 recmema;		/* Recoverable Mem Error log A */
	u32 recmemb;		/* Recoverable Mem Error log B */

	/* These registers are input ONLY if there was a Non-Rec Error */
	u16 nrecmema;		/* Non-Recoverable Mem log A */
	u16 nrecmemb;		/* Non-Recoverable Mem log B */

};

/* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and
   5400 better to use an inline function than a macro in this case */
static inline int nrec_bank(struct i5400_error_info *info)
{
	return ((info->nrecmema) >> 12) & 0x7;
}
static inline int nrec_rank(struct i5400_error_info *info)
{
	return ((info->nrecmema) >> 8) & 0xf;
}
static inline int nrec_buf_id(struct i5400_error_info *info)
{
	return ((info->nrecmema)) & 0xff;
}
static inline int nrec_rdwr(struct i5400_error_info *info)
{
	return (info->nrecmemb) >> 31;
}
/* This applies to both NREC and REC string so it can be used with nrec_rdwr
   and rec_rdwr */
static inline const char *rdwr_str(int rdwr)
{
	return rdwr ? "Write" : "Read";
}
static inline int nrec_cas(struct i5400_error_info *info)
{
	return ((info->nrecmemb) >> 16) & 0x1fff;
}
static inline int nrec_ras(struct i5400_error_info *info)
{
	return (info->nrecmemb) & 0xffff;
}
static inline int rec_bank(struct i5400_error_info *info)
{
	return ((info->recmema) >> 12) & 0x7;
}
static inline int rec_rank(struct i5400_error_info *info)
{
	return ((info->recmema) >> 8) & 0xf;
}
static inline int rec_rdwr(struct i5400_error_info *info)
{
	return (info->recmemb) >> 31;
}
static inline int rec_cas(struct i5400_error_info *info)
{
	return ((info->recmemb) >> 16) & 0x1fff;
}
static inline int rec_ras(struct i5400_error_info *info)
{
	return (info->recmemb) & 0xffff;
}

static struct edac_pci_ctl_info *i5400_pci;

/*
 *	i5400_get_error_info	Retrieve the hardware error information from
 *				the hardware and cache it in the 'info'
 *				structure
 */
static void i5400_get_error_info(struct mem_ctl_info *mci,
				 struct i5400_error_info *info)
{
	struct i5400_pvt *pvt;
	u32 value;

	pvt = mci->pvt_info;

	/* read in the 1st FATAL error register */
	pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);

	/* Mask only the bits that the doc says are valid
	 */
	value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);

	/* If there is an error, then read in the
	   NEXT FATAL error register and the Memory Error Log Register A
	 */
	if (value & FERR_FAT_MASK) {
		info->ferr_fat_fbd = value;

		/* harvest the various error data we need */
		pci_read_config_dword(pvt->branchmap_werrors,
				NERR_FAT_FBD, &info->nerr_fat_fbd);
		pci_read_config_word(pvt->branchmap_werrors,
				NRECMEMA, &info->nrecmema);
		pci_read_config_word(pvt->branchmap_werrors,
				NRECMEMB, &info->nrecmemb);

		/* Clear the error bits, by writing them back */
		pci_write_config_dword(pvt->branchmap_werrors,
				FERR_FAT_FBD, value);
	} else {
		info->ferr_fat_fbd = 0;
		info->nerr_fat_fbd = 0;
		info->nrecmema = 0;
		info->nrecmemb = 0;
	}

	/* read in the 1st NON-FATAL error register */
	pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);

	/* If there is an error, then read in the 1st NON-FATAL error
	 * register as well */
	if (value & FERR_NF_MASK) {
		info->ferr_nf_fbd = value;

		/* harvest the various error data we need */
		pci_read_config_dword(pvt->branchmap_werrors,
				NERR_NF_FBD, &info->nerr_nf_fbd);
		pci_read_config_word(pvt->branchmap_werrors,
				RECMEMA, &info->recmema);
		pci_read_config_dword(pvt->branchmap_werrors,
				RECMEMB, &info->recmemb);
		pci_read_config_dword(pvt->branchmap_werrors,
				REDMEMB, &info->redmemb);

		/* Clear the error bits, by writing them back */
		pci_write_config_dword(pvt->branchmap_werrors,
				FERR_NF_FBD, value);
	} else {
		info->ferr_nf_fbd = 0;
		info->nerr_nf_fbd = 0;
		info->recmema = 0;
		info->recmemb = 0;
		info->redmemb = 0;
	}
}

/*
 * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
 * 					struct i5400_error_info *info,
 * 					int handle_errors);
 *
 *	handle the Intel FATAL and unrecoverable errors, if any
 */
static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
				    struct i5400_error_info *info,
				    unsigned long allErrors)
{
	char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
	int branch;
	int channel;
	int bank;
	int buf_id;
	int rank;
	int rdwr;
	int ras, cas;
	int errnum;
	char *type = NULL;

	if (!allErrors)
		return;		/* if no error, return now */

	if (allErrors &  ERROR_FAT_MASK)
		type = "FATAL";
	else if (allErrors & FERR_NF_UNCORRECTABLE)
		type = "NON-FATAL uncorrected";
	else
		type = "NON-FATAL recoverable";

	/* ONLY ONE of the possible error bits will be set, as per the docs */

	branch = extract_fbdchan_indx(info->ferr_fat_fbd);
	channel = branch;

	/* Use the NON-Recoverable macros to extract data */
	bank = nrec_bank(info);
	rank = nrec_rank(info);
	buf_id = nrec_buf_id(info);
	rdwr = nrec_rdwr(info);
	ras = nrec_ras(info);
	cas = nrec_cas(info);

	debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
		"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
		rank, channel, channel + 1, branch >> 1, bank,
		buf_id, rdwr_str(rdwr), ras, cas);

	/* Only 1 bit will be on */
	errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));

	/* Form out message */
	snprintf(msg, sizeof(msg),
		 "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
		 "RAS=%d CAS=%d %s Err=0x%lx (%s))",
		 type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
		 type, allErrors, error_name[errnum]);

	/* Call the helper to output message */
	edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
}

/*
 * i5400_process_fatal_error_info(struct mem_ctl_info *mci,
 * 				struct i5400_error_info *info,
 * 				int handle_errors);
 *
 *	handle the Intel NON-FATAL errors, if any
 */
static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
					struct i5400_error_info *info)
{
	char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
	unsigned long allErrors;
	int branch;
	int channel;
	int bank;
	int rank;
	int rdwr;
	int ras, cas;
	int errnum;

	/* mask off the Error bits that are possible */
	allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK);
	if (!allErrors)
		return;		/* if no error, return now */

	/* ONLY ONE of the possible error bits will be set, as per the docs */

	if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
		i5400_proccess_non_recoverable_info(mci, info, allErrors);
		return;
	}

	/* Correctable errors */
	if (allErrors & ERROR_NF_CORRECTABLE) {
		debugf0("\tCorrected bits= 0x%lx\n", allErrors);

		branch = extract_fbdchan_indx(info->ferr_nf_fbd);

		channel = 0;
		if (REC_ECC_LOCATOR_ODD(info->redmemb))
			channel = 1;

		/* Convert channel to be based from zero, instead of
		 * from branch base of 0 */
		channel += branch;

		bank = rec_bank(info);
		rank = rec_rank(info);
		rdwr = rec_rdwr(info);
		ras = rec_ras(info);
		cas = rec_cas(info);

		/* Only 1 bit will be on */
		errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));

		debugf0("\t\tCSROW= %d Channel= %d  (Branch %d "
			"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
			rank, channel, branch >> 1, bank,
			rdwr_str(rdwr), ras, cas);

		/* Form out message */
		snprintf(msg, sizeof(msg),
			 "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s "
			 "RAS=%d CAS=%d, CE Err=0x%lx (%s))",
			 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
			 allErrors, error_name[errnum]);

		/* Call the helper to output message */
		edac_mc_handle_fbd_ce(mci, rank, channel, msg);

		return;
	}

	/* Miscellaneous errors */
	errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));

	branch = extract_fbdchan_indx(info->ferr_nf_fbd);

	i5400_mc_printk(mci, KERN_EMERG,
			"Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
			branch >> 1, allErrors, error_name[errnum]);
}

/*
 *	i5400_process_error_info	Process the error info that is
 *	in the 'info' structure, previously retrieved from hardware
 */
static void i5400_process_error_info(struct mem_ctl_info *mci,
				struct i5400_error_info *info)
{	u32 allErrors;

	/* First handle any fatal errors that occurred */
	allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
	i5400_proccess_non_recoverable_info(mci, info, allErrors);

	/* now handle any non-fatal errors that occurred */
	i5400_process_nonfatal_error_info(mci, info);
}

/*
 *	i5400_clear_error	Retrieve any error from the hardware
 *				but do NOT process that error.
 *				Used for 'clearing' out of previous errors
 *				Called by the Core module.
 */
static void i5400_clear_error(struct mem_ctl_info *mci)
{
	struct i5400_error_info info;

	i5400_get_error_info(mci, &info);
}

/*
 *	i5400_check_error	Retrieve and process errors reported by the
 *				hardware. Called by the Core module.
 */
static void i5400_check_error(struct mem_ctl_info *mci)
{
	struct i5400_error_info info;
	debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
	i5400_get_error_info(mci, &info);
	i5400_process_error_info(mci, &info);
}

/*
 *	i5400_put_devices	'put' all the devices that we have
 *				reserved via 'get'
 */
static void i5400_put_devices(struct mem_ctl_info *mci)
{
	struct i5400_pvt *pvt;

	pvt = mci->pvt_info;

	/* Decrement usage count for devices */
	pci_dev_put(pvt->branch_1);
	pci_dev_put(pvt->branch_0);
	pci_dev_put(pvt->fsb_error_regs);
	pci_dev_put(pvt->branchmap_werrors);
}

/*
 *	i5400_get_devices	Find and perform 'get' operation on the MCH's
 *			device/functions we want to reference for this driver
 *
 *			Need to 'get' device 16 func 1 and func 2
 */
static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
{
	struct i5400_pvt *pvt;
	struct pci_dev *pdev;

	pvt = mci->pvt_info;
	pvt->branchmap_werrors = NULL;
	pvt->fsb_error_regs = NULL;
	pvt->branch_0 = NULL;
	pvt->branch_1 = NULL;

	/* Attempt to 'get' the MCH register we want */
	pdev = NULL;
	while (!pvt->branchmap_werrors || !pvt->fsb_error_regs) {
		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
				      PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
		if (!pdev) {
			/* End of list, leave */
			i5400_printk(KERN_ERR,
				"'system address,Process Bus' "
				"device not found:"
				"vendor 0x%x device 0x%x ERR funcs "
				"(broken BIOS?)\n",
				PCI_VENDOR_ID_INTEL,
				PCI_DEVICE_ID_INTEL_5400_ERR);
			goto error;
		}

		/* Store device 16 funcs 1 and 2 */
		switch (PCI_FUNC(pdev->devfn)) {
		case 1:
			pvt->branchmap_werrors = pdev;
			break;
		case 2:
			pvt->fsb_error_regs = pdev;
			break;
		}
	}

	debugf1("System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
		pci_name(pvt->system_address),
		pvt->system_address->vendor, pvt->system_address->device);
	debugf1("Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
		pci_name(pvt->branchmap_werrors),
		pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
	debugf1("FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
		pci_name(pvt->fsb_error_regs),
		pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);

	pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
				       PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
	if (!pvt->branch_0) {
		i5400_printk(KERN_ERR,
			"MC: 'BRANCH 0' device not found:"
			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
		goto error;
	}

	/* If this device claims to have more than 2 channels then
	 * fetch Branch 1's information
	 */
	if (pvt->maxch < CHANNELS_PER_BRANCH)
		return 0;

	pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
				       PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
	if (!pvt->branch_1) {
		i5400_printk(KERN_ERR,
			"MC: 'BRANCH 1' device not found:"
			"vendor 0x%x device 0x%x Func 0 "
			"(broken BIOS?)\n",
			PCI_VENDOR_ID_INTEL,
			PCI_DEVICE_ID_INTEL_5400_FBD1);
		goto error;
	}

	return 0;

error:
	i5400_put_devices(mci);
	return -ENODEV;
}

/*
 *	determine_amb_present
 *
 *		the information is contained in NUM_MTRS_PER_BRANCH different
 *		registers determining which of the NUM_MTRS_PER_BRANCH requires
 *              knowing which channel is in question
 *
 *	2 branches, each with 2 channels
 *		b0_ambpresent0 for channel '0'
 *		b0_ambpresent1 for channel '1'
 *		b1_ambpresent0 for channel '2'
 *		b1_ambpresent1 for channel '3'
 */
static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
{
	int amb_present;

	if (channel < CHANNELS_PER_BRANCH) {
		if (channel & 0x1)
			amb_present = pvt->b0_ambpresent1;
		else
			amb_present = pvt->b0_ambpresent0;
	} else {
		if (channel & 0x1)
			amb_present = pvt->b1_ambpresent1;
		else
			amb_present = pvt->b1_ambpresent0;
	}

	return amb_present;
}

/*
 * determine_mtr(pvt, csrow, channel)
 *
 * return the proper MTR register as determine by the csrow and desired channel
 */
static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
{
	int mtr;
	int n;

	/* There is one MTR for each slot pair of FB-DIMMs,
	   Each slot pair may be at branch 0 or branch 1.
	 */
	n = csrow;

	if (n >= NUM_MTRS_PER_BRANCH) {
		debugf0("ERROR: trying to access an invalid csrow: %d\n",
			csrow);
		return 0;
	}

	if (channel < CHANNELS_PER_BRANCH)
		mtr = pvt->b0_mtr[n];
	else
		mtr = pvt->b1_mtr[n];

	return mtr;
}

/*
 */
static void decode_mtr(int slot_row, u16 mtr)
{
	int ans;

	ans = MTR_DIMMS_PRESENT(mtr);

	debugf2("\tMTR%d=0x%x:  DIMMs are %s\n", slot_row, mtr,
		ans ? "Present" : "NOT Present");
	if (!ans)
		return;

	debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));

	debugf2("\t\tELECTRICAL THROTTLING is %s\n",
		MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");

	debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
	debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
	debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
	debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}

static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
			struct i5400_dimm_info *dinfo)
{
	int mtr;
	int amb_present_reg;
	int addrBits;

	mtr = determine_mtr(pvt, csrow, channel);
	if (MTR_DIMMS_PRESENT(mtr)) {
		amb_present_reg = determine_amb_present_reg(pvt, channel);

		/* Determine if there is a DIMM present in this DIMM slot */
		if (amb_present_reg & (1 << csrow)) {
			/* Start with the number of bits for a Bank
			 * on the DRAM */
			addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
			/* Add thenumber of ROW bits */
			addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
			/* add the number of COLUMN bits */
			addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
			/* add the number of RANK bits */
			addrBits += MTR_DIMM_RANK(mtr);

			addrBits += 6;	/* add 64 bits per DIMM */
			addrBits -= 20;	/* divide by 2^^20 */
			addrBits -= 3;	/* 8 bits per bytes */

			dinfo->megabytes = 1 << addrBits;
		}
	}
}

/*
 *	calculate_dimm_size
 *
 *	also will output a DIMM matrix map, if debug is enabled, for viewing
 *	how the DIMMs are populated
 */
static void calculate_dimm_size(struct i5400_pvt *pvt)
{
	struct i5400_dimm_info *dinfo;
	int csrow, max_csrows;
	char *p, *mem_buffer;
	int space, n;
	int channel;

	/* ================= Generate some debug output ================= */
	space = PAGE_SIZE;
	mem_buffer = p = kmalloc(space, GFP_KERNEL);
	if (p == NULL) {
		i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
			__FILE__, __func__);
		return;
	}

	/* Scan all the actual CSROWS
	 * and calculate the information for each DIMM
	 * Start with the highest csrow first, to display it first
	 * and work toward the 0th csrow
	 */
	max_csrows = pvt->maxdimmperch;
	for (csrow = max_csrows - 1; csrow >= 0; csrow--) {

		/* on an odd csrow, first output a 'boundary' marker,
		 * then reset the message buffer  */
		if (csrow & 0x1) {
			n = snprintf(p, space, "---------------------------"
					"--------------------------------");
			p += n;
			space -= n;
			debugf2("%s\n", mem_buffer);
			p = mem_buffer;
			space = PAGE_SIZE;
		}
		n = snprintf(p, space, "csrow %2d    ", csrow);
		p += n;
		space -= n;

		for (channel = 0; channel < pvt->maxch; channel++) {
			dinfo = &pvt->dimm_info[csrow][channel];
			handle_channel(pvt, csrow, channel, dinfo);
			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
			p += n;
			space -= n;
		}
		debugf2("%s\n", mem_buffer);
		p = mem_buffer;
		space = PAGE_SIZE;
	}

	/* Output the last bottom 'boundary' marker */
	n = snprintf(p, space, "---------------------------"
			"--------------------------------");
	p += n;
	space -= n;
	debugf2("%s\n", mem_buffer);
	p = mem_buffer;
	space = PAGE_SIZE;

	/* now output the 'channel' labels */
	n = snprintf(p, space, "            ");
	p += n;
	space -= n;
	for (channel = 0; channel < pvt->maxch; channel++) {
		n = snprintf(p, space, "channel %d | ", channel);
		p += n;
		space -= n;
	}

	/* output the last message and free buffer */
	debugf2("%s\n", mem_buffer);
	kfree(mem_buffer);
}

/*
 *	i5400_get_mc_regs	read in the necessary registers and
 *				cache locally
 *
 *			Fills in the private data members
 */
static void i5400_get_mc_regs(struct mem_ctl_info *mci)
{
	struct i5400_pvt *pvt;
	u32 actual_tolm;
	u16 limit;
	int slot_row;
	int maxch;
	int maxdimmperch;
	int way0, way1;

	pvt = mci->pvt_info;

	pci_read_config_dword(pvt->system_address, AMBASE,
			(u32 *) &pvt->ambase);
	pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
			((u32 *) &pvt->ambase) + sizeof(u32));

	maxdimmperch = pvt->maxdimmperch;
	maxch = pvt->maxch;

	debugf2("AMBASE= 0x%lx  MAXCH= %d  MAX-DIMM-Per-CH= %d\n",
		(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);

	/* Get the Branch Map regs */
	pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
	pvt->tolm >>= 12;
	debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
		pvt->tolm);

	actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
	debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
		actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);

	pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
	pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);

	/* Get the MIR[0-1] regs */
	limit = (pvt->mir0 >> 4) & 0x0fff;
	way0 = pvt->mir0 & 0x1;
	way1 = pvt->mir0 & 0x2;
	debugf2("MIR0: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
	limit = (pvt->mir1 >> 4) & 0xfff;
	way0 = pvt->mir1 & 0x1;
	way1 = pvt->mir1 & 0x2;
	debugf2("MIR1: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);

	/* Get the set of MTR[0-3] regs by each branch */
	for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
		int where = MTR0 + (slot_row * sizeof(u16));

		/* Branch 0 set of MTR registers */
		pci_read_config_word(pvt->branch_0, where,
				&pvt->b0_mtr[slot_row]);

		debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
			pvt->b0_mtr[slot_row]);

		if (pvt->maxch < CHANNELS_PER_BRANCH) {
			pvt->b1_mtr[slot_row] = 0;
			continue;
		}

		/* Branch 1 set of MTR registers */
		pci_read_config_word(pvt->branch_1, where,
				&pvt->b1_mtr[slot_row]);
		debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
			pvt->b1_mtr[slot_row]);
	}

	/* Read and dump branch 0's MTRs */
	debugf2("\nMemory Technology Registers:\n");
	debugf2("   Branch 0:\n");
	for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
		decode_mtr(slot_row, pvt->b0_mtr[slot_row]);

	pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
			&pvt->b0_ambpresent0);
	debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
	pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
			&pvt->b0_ambpresent1);
	debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);

	/* Only if we have 2 branchs (4 channels) */
	if (pvt->maxch < CHANNELS_PER_BRANCH) {
		pvt->b1_ambpresent0 = 0;
		pvt->b1_ambpresent1 = 0;
	} else {
		/* Read and dump  branch 1's MTRs */
		debugf2("   Branch 1:\n");
		for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
			decode_mtr(slot_row, pvt->b1_mtr[slot_row]);

		pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
				&pvt->b1_ambpresent0);
		debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
			pvt->b1_ambpresent0);
		pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
				&pvt->b1_ambpresent1);
		debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
			pvt->b1_ambpresent1);
	}

	/* Go and determine the size of each DIMM and place in an
	 * orderly matrix */
	calculate_dimm_size(pvt);
}

/*
 *	i5400_init_csrows	Initialize the 'csrows' table within
 *				the mci control	structure with the
 *				addressing of memory.
 *
 *	return:
 *		0	success
 *		1	no actual memory found on this MC
 */
static int i5400_init_csrows(struct mem_ctl_info *mci)
{
	struct i5400_pvt *pvt;
	struct csrow_info *p_csrow;
	int empty, channel_count;
	int max_csrows;
	int mtr;
	int csrow_megs;
	int channel;
	int csrow;

	pvt = mci->pvt_info;

	channel_count = pvt->maxch;
	max_csrows = pvt->maxdimmperch;

	empty = 1;		/* Assume NO memory */

	for (csrow = 0; csrow < max_csrows; csrow++) {
		p_csrow = &mci->csrows[csrow];

		p_csrow->csrow_idx = csrow;

		/* use branch 0 for the basis */
		mtr = determine_mtr(pvt, csrow, 0);

		/* if no DIMMS on this row, continue */
		if (!MTR_DIMMS_PRESENT(mtr))
			continue;

		/* FAKE OUT VALUES, FIXME */
		p_csrow->first_page = 0 + csrow * 20;
		p_csrow->last_page = 9 + csrow * 20;
		p_csrow->page_mask = 0xFFF;

		p_csrow->grain = 8;

		csrow_megs = 0;
		for (channel = 0; channel < pvt->maxch; channel++)
			csrow_megs += pvt->dimm_info[csrow][channel].megabytes;

		p_csrow->nr_pages = csrow_megs << 8;

		/* Assume DDR2 for now */
		p_csrow->mtype = MEM_FB_DDR2;

		/* ask what device type on this row */
		if (MTR_DRAM_WIDTH(mtr))
			p_csrow->dtype = DEV_X8;
		else
			p_csrow->dtype = DEV_X4;

		p_csrow->edac_mode = EDAC_S8ECD8ED;

		empty = 0;
	}

	return empty;
}

/*
 *	i5400_enable_error_reporting
 *			Turn on the memory reporting features of the hardware
 */
static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
{
	struct i5400_pvt *pvt;
	u32 fbd_error_mask;

	pvt = mci->pvt_info;

	/* Read the FBD Error Mask Register */
	pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
			&fbd_error_mask);

	/* Enable with a '0' */
	fbd_error_mask &= ~(ENABLE_EMASK_ALL);

	pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
			fbd_error_mask);
}

/*
 *	i5400_probe1	Probe for ONE instance of device to see if it is
 *			present.
 *	return:
 *		0 for FOUND a device
 *		< 0 for error code
 */
static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
{
	struct mem_ctl_info *mci;
	struct i5400_pvt *pvt;
	int num_channels;
	int num_dimms_per_channel;
	int num_csrows;

	if (dev_idx >= ARRAY_SIZE(i5400_devs))
		return -EINVAL;

	debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
		__FILE__, __func__,
		pdev->bus->number,
		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));

	/* We only are looking for func 0 of the set */
	if (PCI_FUNC(pdev->devfn) != 0)
		return -ENODEV;

	/* As we don't have a motherboard identification routine to determine
	 * actual number of slots/dimms per channel, we thus utilize the
	 * resource as specified by the chipset. Thus, we might have
	 * have more DIMMs per channel than actually on the mobo, but this
	 * allows the driver to support up to the chipset max, without
	 * some fancy mobo determination.
	 */
	num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
	num_channels = MAX_CHANNELS;
	num_csrows = num_dimms_per_channel;

	debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
		__func__, num_channels, num_dimms_per_channel, num_csrows);

	/* allocate a new MC control structure */
	mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);

	if (mci == NULL)
		return -ENOMEM;

	debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);

	mci->dev = &pdev->dev;	/* record ptr  to the generic device */

	pvt = mci->pvt_info;
	pvt->system_address = pdev;	/* Record this device in our private */
	pvt->maxch = num_channels;
	pvt->maxdimmperch = num_dimms_per_channel;

	/* 'get' the pci devices we want to reserve for our use */
	if (i5400_get_devices(mci, dev_idx))
		goto fail0;

	/* Time to get serious */
	i5400_get_mc_regs(mci);	/* retrieve the hardware registers */

	mci->mc_idx = 0;
	mci->mtype_cap = MEM_FLAG_FB_DDR2;
	mci->edac_ctl_cap = EDAC_FLAG_NONE;
	mci->edac_cap = EDAC_FLAG_NONE;
	mci->mod_name = "i5400_edac.c";
	mci->mod_ver = I5400_REVISION;
	mci->ctl_name = i5400_devs[dev_idx].ctl_name;
	mci->dev_name = pci_name(pdev);
	mci->ctl_page_to_phys = NULL;

	/* Set the function pointer to an actual operation function */
	mci->edac_check = i5400_check_error;

	/* initialize the MC control structure 'csrows' table
	 * with the mapping and control information */
	if (i5400_init_csrows(mci)) {
		debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
			"    because i5400_init_csrows() returned nonzero "
			"value\n");
		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
	} else {
		debugf1("MC: Enable error reporting now\n");
		i5400_enable_error_reporting(mci);
	}

	/* add this new MC control structure to EDAC's list of MCs */
	if (edac_mc_add_mc(mci)) {
		debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
			__FILE__, __func__);
		/* FIXME: perhaps some code should go here that disables error
		 * reporting if we just enabled it
		 */
		goto fail1;
	}

	i5400_clear_error(mci);

	/* allocating generic PCI control info */
	i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
	if (!i5400_pci) {
		printk(KERN_WARNING
			"%s(): Unable to create PCI control\n",
			__func__);
		printk(KERN_WARNING
			"%s(): PCI error report via EDAC not setup\n",
			__func__);
	}

	return 0;

	/* Error exit unwinding stack */
fail1:

	i5400_put_devices(mci);

fail0:
	edac_mc_free(mci);
	return -ENODEV;
}

/*
 *	i5400_init_one	constructor for one instance of device
 *
 * 	returns:
 *		negative on error
 *		count (>= 0)
 */
static int __devinit i5400_init_one(struct pci_dev *pdev,
				const struct pci_device_id *id)
{
	int rc;

	debugf0("MC: %s: %s()\n", __FILE__, __func__);

	/* wake up device */
	rc = pci_enable_device(pdev);
	if (rc)
		return rc;

	/* now probe and enable the device */
	return i5400_probe1(pdev, id->driver_data);
}

/*
 *	i5400_remove_one	destructor for one instance of device
 *
 */
static void __devexit i5400_remove_one(struct pci_dev *pdev)
{
	struct mem_ctl_info *mci;

	debugf0("%s: %s()\n", __FILE__, __func__);

	if (i5400_pci)
		edac_pci_release_generic_ctl(i5400_pci);

	mci = edac_mc_del_mc(&pdev->dev);
	if (!mci)
		return;

	/* retrieve references to resources, and free those resources */
	i5400_put_devices(mci);

	edac_mc_free(mci);
}

/*
 *	pci_device_id	table for which devices we are looking for
 *
 *	The "E500P" device is the first device supported.
 */
static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
	{0,}			/* 0 terminated list. */
};

MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);

/*
 *	i5400_driver	pci_driver structure for this module
 *
 */
static struct pci_driver i5400_driver = {
	.name = "i5400_edac",
	.probe = i5400_init_one,
	.remove = __devexit_p(i5400_remove_one),
	.id_table = i5400_pci_tbl,
};

/*
 *	i5400_init		Module entry function
 *			Try to initialize this module for its devices
 */
static int __init i5400_init(void)
{
	int pci_rc;

	debugf2("MC: %s: %s()\n", __FILE__, __func__);

	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
	opstate_init();

	pci_rc = pci_register_driver(&i5400_driver);

	return (pci_rc < 0) ? pci_rc : 0;
}

/*
 *	i5400_exit()	Module exit function
 *			Unregister the driver
 */
static void __exit i5400_exit(void)
{
	debugf2("MC: %s: %s()\n", __FILE__, __func__);
	pci_unregister_driver(&i5400_driver);
}

module_init(i5400_init);
module_exit(i5400_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
		   I5400_REVISION);

module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
m">1<<5, /* Bit 5: Bad SSD */ PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */ PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */ PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */ PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */ PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ }; /* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ /* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ enum { PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ }; /***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ enum { PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ /***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */ PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */ /***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */ PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */ PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */ /* Bit 11: reserved */ PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */ /* Bit 9.. 8: reserved */ PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */ /* Bit 6: reserved */ PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */ /* Bit 4: reserved */ PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */ }; /***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ enum { PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */ PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */ PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */ PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */ PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */ PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */ PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */ PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */ PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */ PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */ PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */ PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */ PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */ PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */ }; #define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) /***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ /***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ enum { PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */ PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */ PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */ PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */ PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */ PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */ PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */ PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */ PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */ PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */ PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */ PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */ PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */ PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ }; #define PHY_B_DEF_MSK \ (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) /* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ enum { PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */ PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ }; /* * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) */ enum { PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */ PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ }; /** Marvell-Specific */ enum { PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ PHY_M_AN_RF = 1<<13, /* Remote Fault */ PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ }; /* special defines for FIBER (88E1011S only) */ enum { PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ }; /* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ enum { PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ }; /***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ enum { PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ }; /***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ enum { PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ }; enum { PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ }; enum { PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ }; /* for 10/100 Fast Ethernet PHY (88E3082 only) */ enum { PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ }; /***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ enum { PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ PHY_M_PS_JABBER = 1<<0, /* Jabber */ }; #define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) /* for 10/100 Fast Ethernet PHY (88E3082 only) */ enum { PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ }; enum { PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ PHY_M_IS_AN_PR = 1<<12, /* Page Received */ PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ PHY_M_IS_JABBER = 1<<0, /* Jabber */ PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR, PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, }; /***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ enum { PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ /* (88E1011 only) */ PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ /* (88E1011 only) */ PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ /* (88E1111 only) */ PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ /* !!! Errata in spec. (1 = disable) */ PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */}; #define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */ #define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */ #define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */ #define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */ /* 100=5x; 101=6x; 110=7x; 111=8x */ enum { MAC_TX_CLK_0_MHZ = 2, MAC_TX_CLK_2_5_MHZ = 6, MAC_TX_CLK_25_MHZ = 7, }; /***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ enum { PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ /* (88E1111 only) */ }; #define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) #define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) enum { PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */ /* (88E1011 only) */ PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ }; enum { PULS_NO_STR = 0, /* no pulse stretching */ PULS_21MS = 1, /* 21 ms to 42 ms */ PULS_42MS = 2, /* 42 ms to 84 ms */ PULS_84MS = 3, /* 84 ms to 170 ms */ PULS_170MS = 4, /* 170 ms to 340 ms */ PULS_340MS = 5, /* 340 ms to 670 ms */ PULS_670MS = 6, /* 670 ms to 1.3 s */ PULS_1300MS = 7, /* 1.3 s to 2.7 s */ }; enum { BLINK_42MS = 0, /* 42 ms */ BLINK_84MS = 1, /* 84 ms */ BLINK_170MS = 2, /* 170 ms */ BLINK_340MS = 3, /* 340 ms */ BLINK_670MS = 4, /* 670 ms */ }; /***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ #define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ /* Bit 13..12: reserved */ #define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ #define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ #define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ #define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ #define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ #define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ enum { MO_LED_NORM = 0, MO_LED_BLINK = 1, MO_LED_OFF = 2, MO_LED_ON = 3, }; /***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ enum { PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */ }; /***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ enum { PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ /* (88E1111 only) */ /* Bit 9.. 4: reserved (88E1011 only) */ PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ }; /***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ enum { PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */ PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */ /* (88E1111 only) */ PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */ PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */ /* (88E1111 only) */ PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */ }; /* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ enum { CABD_STAT_NORMAL= 0, CABD_STAT_SHORT = 1, CABD_STAT_OPEN = 2, CABD_STAT_FAIL = 3, }; /* for 10/100 Fast Ethernet PHY (88E3082 only) */ /***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ /* Bit 15..12: reserved (used internally) */ enum { PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ }; #define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK) #define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK) #define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK) enum { LED_PAR_CTRL_COLX = 0x00, LED_PAR_CTRL_ERROR = 0x01, LED_PAR_CTRL_DUPLEX = 0x02, LED_PAR_CTRL_DP_COL = 0x03, LED_PAR_CTRL_SPEED = 0x04, LED_PAR_CTRL_LINK = 0x05, LED_PAR_CTRL_TX = 0x06, LED_PAR_CTRL_RX = 0x07, LED_PAR_CTRL_ACT = 0x08, LED_PAR_CTRL_LNK_RX = 0x09, LED_PAR_CTRL_LNK_AC = 0x0a, LED_PAR_CTRL_ACT_BL = 0x0b, LED_PAR_CTRL_TX_BL = 0x0c, LED_PAR_CTRL_RX_BL = 0x0d, LED_PAR_CTRL_COL_BL = 0x0e, LED_PAR_CTRL_INACT = 0x0f }; /*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ enum { PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ }; /***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ enum { PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */ PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ }; #define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) #define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) #define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) #define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) /* GMAC registers */ /* Port Registers */ enum { GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ /* Source Address Registers */ GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ /* Multicast Address Hash Registers */ GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ /* Interrupt Source Registers */ GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ /* Interrupt Mask Registers */ GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ /* Serial Management Interface (SMI) Registers */ GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ }; /* MIB Counters */ #define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ #define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ /* * MIB Counters base address definitions (low word) - * use offset 4 for access to high word (32 bit r/o) */ enum { GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ /* GM_MIB_CNT_BASE + 40: reserved */ GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ /* GM_MIB_CNT_BASE + 168: reserved */ GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ /* GM_MIB_CNT_BASE + 184: reserved */ GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ }; /* GMAC Bit Definitions */ /* GM_GP_STAT 16 bit r/o General Purpose Status Register */ enum { GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */ GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ }; /* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ enum { GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ }; #define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) #define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) /* GM_TX_CTRL 16 bit r/w Transmit Control Register */ enum { GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ }; #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) #define TX_COL_DEF 0x04 /* late collision after 64 byte */ /* GM_RX_CTRL 16 bit r/w Receive Control Register */ enum { GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ }; /* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ enum { GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ TX_JAM_LEN_DEF = 0x03, TX_JAM_IPG_DEF = 0x0b, TX_IPG_JAM_DEF = 0x1c, }; #define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) #define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) #define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) /* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ enum { GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ }; #define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) #define DATA_BLIND_DEF 0x04 #define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) #define IPG_DATA_DEF 0x1e /* GM_SMI_CTRL 16 bit r/w SMI Control Register */ enum { GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */ GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */ GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ }; #define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) #define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ enum { GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ }; /* Receive Frame Status Encoding */ enum { GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ GMR_FS_LEN_SHIFT = 16, GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ /* * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) */ GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_JABBER, /* Rx GMAC FIFO Flush Mask (default) */ RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, }; /* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ enum { GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ GMF_OPER_ON = 1<<3, /* Operational Mode On */ GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ }; /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ enum { GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */ GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */ GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */ GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ }; /* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ enum { GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ }; /* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ enum { GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ GMC_PAUSE_ON = 1<<3, /* Pause On */ GMC_PAUSE_OFF = 1<<2, /* Pause Off */ GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ GMC_RST_SET = 1<<0, /* Set GMAC Reset */ }; /* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ enum { GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */ GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */ GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */ GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */ GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */ GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */ GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */ GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */ GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */ GPC_ANEG_0 = 1<<19, /* ANEG[0] */ GPC_ENA_XC = 1<<18, /* Enable MDI crossover */ GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */ GPC_ANEG_3 = 1<<16, /* ANEG[3] */ GPC_ANEG_2 = 1<<15, /* ANEG[2] */ GPC_ANEG_1 = 1<<14, /* ANEG[1] */ GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */ GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */ GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */ GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */ GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */ GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */ /* Bits 7..2: reserved */ GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ GPC_RST_SET = 1<<0, /* Set GPHY Reset */ }; #define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) #define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) #define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0) /* forced speed and duplex mode (don't mix with other ANEG bits) */ #define GPC_FRC10MBIT_HALF 0 #define GPC_FRC10MBIT_FULL GPC_ANEG_0 #define GPC_FRC100MBIT_HALF GPC_ANEG_1 #define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) /* auto-negotiation with limited advertised speeds */ /* mix only with master/slave settings (for copper) */ #define GPC_ADV_1000_HALF GPC_ANEG_2 #define GPC_ADV_1000_FULL GPC_ANEG_3 #define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) /* master/slave settings */ /* only for copper with 1000 Mbps */ #define GPC_FORCE_MASTER 0 #define GPC_FORCE_SLAVE GPC_ANEG_0 #define GPC_PREF_MASTER GPC_ANEG_1 #define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) /* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ /* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ enum { GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ #define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR) /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ /* Bits 15.. 2: reserved */ GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ /* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ WOL_CTL_LINK_CHG_OCC = 1<<15, WOL_CTL_MAGIC_PKT_OCC = 1<<14, WOL_CTL_PATTERN_OCC = 1<<13, WOL_CTL_CLEAR_RESULT = 1<<12, WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, WOL_CTL_ENA_PATTERN_UNIT = 1<<1, WOL_CTL_DIS_PATTERN_UNIT = 1<<0, }; #define WOL_CTL_DEFAULT \ (WOL_CTL_DIS_PME_ON_LINK_CHG | \ WOL_CTL_DIS_PME_ON_PATTERN | \ WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ WOL_CTL_DIS_LINK_CHG_UNIT | \ WOL_CTL_DIS_PATTERN_UNIT | \ WOL_CTL_DIS_MAGIC_PKT_UNIT) /* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ #define WOL_CTL_PATT_ENA(x) (1 << (x)) /* XMAC II registers */ enum { XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */ XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */ XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/ XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */ XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */ XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */ XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */ XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */ XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */ XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */ XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */ XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */ XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */ XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */ XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */ XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */ XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */ XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */ XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */ XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */ XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */ XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */ XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */ XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/ XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */ XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */ #define XM_EXM(reg) (XM_EXM_START + ((reg) << 3)) }; enum { XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */ XM_SA = 0x0108, /* NA reg r/w Station Address Register */ XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */ XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */ XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */ XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */ XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */ XM_MODE = 0x0124, /* 32 bit r/w Mode Register */ XM_LSA = 0x0128, /* NA reg r/o Last Source Register */ XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */ XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */ XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */ XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */ XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */ XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */ XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */ XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */ XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/ XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */ XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */ XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */ XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */ XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */ XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */ XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */ XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */ XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */ XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */ XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */ XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */ XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */ XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */ XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */ XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */ XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */ XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */ XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */ XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */ XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */ XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */ XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/ XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */ XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */ XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */ XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */ XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */ XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */ XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */ XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */ XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */ XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */ XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */ XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */ XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */ XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */ XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */ XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */ XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */ XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */ XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */ XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */ XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */ XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ }; /* XM_MMU_CMD 16 bit r/w MMU Command Register */ enum { XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */ XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */ XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */ XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */ XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */ XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */ XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */ XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */ XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */ XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */ XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */ XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */ }; /* XM_TX_CMD 16 bit r/w Transmit Command Register */ enum { XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */ XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */ XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */ XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */ XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */ XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */ }; /* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ #define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ /* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ #define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ /* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ #define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ /* XM_RX_CMD 16 bit r/w Receive Command Register */ enum { XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */ /* inrange error packets */ XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */ /* jumbo packets */ XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */ XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */ XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */ XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */ XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */ XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */ XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */ }; /* XM_GP_PORT 32 bit r/w General Purpose Port Register */ enum { XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */ XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */ XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */ XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */ }; /* XM_IMSK 16 bit r/w Interrupt Mask Register */ /* XM_ISRC 16 bit r/o Interrupt Status Register */ enum { XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */ XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */ XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */ XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */ XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */ XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */ XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */ XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */ XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */ XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */ XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */ XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */ XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ XM_IMSK_DISABLE = 0xffff, }; /* XM_HW_CFG 16 bit r/w Hardware Config Register */ enum { XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */ XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/ XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */ }; /* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ /* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ #define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ /* XM_TX_THR 16 bit r/w Tx Request Threshold */ /* XM_HT_THR 16 bit r/w Host Request Threshold */ /* XM_RX_THR 16 bit r/w Rx Request Threshold */ #define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ /* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ enum { XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */ XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */ XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */ XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */ XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */ XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/ XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */ XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */ XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */ XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */ XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occured */ XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */ XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */ XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */ XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */ }; /* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ /* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ #define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ /* XM_DEV_ID 32 bit r/o Device ID Register */ #define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ #define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ /* XM_MODE 32 bit r/w Mode Register */ enum { XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */ XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */ /* extern generated */ XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */ XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */ /* intern generated */ XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */ XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */ XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */ XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */ XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */ /* intern generated */ XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */ /* intern generated */ XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */ XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */ XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */ XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */ XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */ XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */ XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */ XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */ XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */ XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */ XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */ XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */ XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */ XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */ XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */ XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */ XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */ }; #define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) #define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) /* XM_STAT_CMD 16 bit r/w Statistics Command Register */ enum { XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */ XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */ XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */ XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */ XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */ XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */ }; /* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ /* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ enum { XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/ XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/ XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/ XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */ XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */ XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */ XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */ XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */ XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */ XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/ XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */ XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/ XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */ XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */ XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */ XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */ XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */ XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */ XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/ XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */ XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/ XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */ XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */ XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/ XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/ XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */ }; #define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) /* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ /* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ enum { XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/ XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/ XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/ XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */ XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */ XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */ XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */ XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/ XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */ XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */ XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */ XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/ XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */ XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */ XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/ XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */ XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */ XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */ XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */ XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */ XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/ XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/ XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */ }; #define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) struct skge_rx_desc { u32 control; u32 next_offset; u32 dma_lo; u32 dma_hi; u32 status; u32 timestamp; u16 csum2; u16 csum1; u16 csum2_start; u16 csum1_start; }; struct skge_tx_desc { u32 control; u32 next_offset; u32 dma_lo; u32 dma_hi; u32 status; u32 csum_offs; u16 csum_write; u16 csum_start; u32 rsvd; }; struct skge_element { struct skge_element *next; void *desc; struct sk_buff *skb; DECLARE_PCI_UNMAP_ADDR(mapaddr); DECLARE_PCI_UNMAP_LEN(maplen); }; struct skge_ring { struct skge_element *to_clean; struct skge_element *to_use; struct skge_element *start; unsigned long count; }; struct skge_hw { void __iomem *regs; struct pci_dev *pdev; spinlock_t hw_lock; u32 intr_mask; struct net_device *dev[2]; u8 chip_id; u8 chip_rev; u8 copper; u8 ports; u8 phy_type; u32 ram_size; u32 ram_offset; u16 phy_addr; spinlock_t phy_lock; struct tasklet_struct phy_task; }; enum pause_control { FLOW_MODE_NONE = 1, /* No Flow-Control */ FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or * just the remote station may send PAUSE */ }; enum pause_status { FLOW_STAT_INDETERMINATED=0, /* indeterminated */ FLOW_STAT_NONE, /* No Flow Control */ FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ }; struct skge_port { struct skge_hw *hw; struct net_device *netdev; struct napi_struct napi; int port; u32 msg_enable; struct skge_ring tx_ring; struct skge_ring rx_ring ____cacheline_aligned_in_smp; unsigned int rx_buf_size; struct timer_list link_timer; enum pause_control flow_control; enum pause_status flow_status; u8 rx_csum; u8 blink_on; u8 wol; u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ u16 speed; /* SPEED_1000, SPEED_100, ... */ u32 advertising; void *mem; /* PCI memory for rings */ dma_addr_t dma; unsigned long mem_size; #ifdef CONFIG_SKGE_DEBUG struct dentry *debugfs; #endif }; /* Register accessor for memory mapped device */ static inline u32 skge_read32(const struct skge_hw *hw, int reg) { return readl(hw->regs + reg); } static inline u16 skge_read16(const struct skge_hw *hw, int reg) { return readw(hw->regs + reg); } static inline u8 skge_read8(const struct skge_hw *hw, int reg) { return readb(hw->regs + reg); } static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val) { writel(val, hw->regs + reg); } static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val) { writew(val, hw->regs + reg); } static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) { writeb(val, hw->regs + reg); } /* MAC Related Registers inside the device. */ #define SK_REG(port,reg) (((port)<<7)+(u16)(reg)) #define SK_XMAC_REG(port, reg) \ ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) { u32 v; v = skge_read16(hw, SK_XMAC_REG(port, reg)); v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; return v; } static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) { return skge_read16(hw, SK_XMAC_REG(port,reg)); } static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) { skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); } static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) { skge_write16(hw, SK_XMAC_REG(port,r), v); } static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, const u8 *hash) { xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); } static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, const u8 *addr) { xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); } #define SK_GMAC_REG(port,reg) \ (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) { return skge_read16(hw, SK_GMAC_REG(port,reg)); } static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) { return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); } static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) { skge_write16(hw, SK_GMAC_REG(port,r), v); } static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, const u8 *addr) { gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); } #endif