aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorNils Carlson <nils.carlson@ericsson.com>2011-08-08 05:21:26 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2011-11-01 08:01:51 -0400
commit535e9c78e1a80946283cecc742b687b3a5ff5109 (patch)
tree11903a603ce0428373dcf389c02a2cf4b91f6868 /drivers/edac
parent168eb34dedf2f9456cc26f513d27de65c64fc608 (diff)
i7core_edac: scrubbing fixups
Get a more reliable DCLK value from DMI, name the SCRUBINTERVAL mask and guard against potential overflow in the scrub rate computations. Signed-off-by: Nils Carlson <nils.carlson@ericsson.com>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/i7core_edac.c141
1 files changed, 133 insertions, 8 deletions
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 6ae7795dea86..7cb68decf57d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -31,6 +31,7 @@
31#include <linux/pci_ids.h> 31#include <linux/pci_ids.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/dmi.h>
34#include <linux/edac.h> 35#include <linux/edac.h>
35#include <linux/mmzone.h> 36#include <linux/mmzone.h>
36#include <linux/smp.h> 37#include <linux/smp.h>
@@ -107,6 +108,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
107 108
108#define MC_SCRUB_CONTROL 0x4c 109#define MC_SCRUB_CONTROL 0x4c
109 #define STARTSCRUB (1 << 24) 110 #define STARTSCRUB (1 << 24)
111 #define SCRUBINTERVAL_MASK 0xffffff
110 112
111#define MC_COR_ECC_CNT_0 0x80 113#define MC_COR_ECC_CNT_0 0x80
112#define MC_COR_ECC_CNT_1 0x84 114#define MC_COR_ECC_CNT_1 0x84
@@ -275,6 +277,9 @@ struct i7core_pvt {
275 /* Count indicator to show errors not got */ 277 /* Count indicator to show errors not got */
276 unsigned mce_overrun; 278 unsigned mce_overrun;
277 279
280 /* DCLK Frequency used for computing scrub rate */
281 int dclk_freq;
282
278 /* Struct to control EDAC polling */ 283 /* Struct to control EDAC polling */
279 struct edac_pci_ctl_info *i7core_pci; 284 struct edac_pci_ctl_info *i7core_pci;
280}; 285};
@@ -1952,6 +1957,112 @@ static struct notifier_block i7_mce_dec = {
1952 .notifier_call = i7core_mce_check_error, 1957 .notifier_call = i7core_mce_check_error,
1953}; 1958};
1954 1959
1960struct memdev_dmi_entry {
1961 u8 type;
1962 u8 length;
1963 u16 handle;
1964 u16 phys_mem_array_handle;
1965 u16 mem_err_info_handle;
1966 u16 total_width;
1967 u16 data_width;
1968 u16 size;
1969 u8 form;
1970 u8 device_set;
1971 u8 device_locator;
1972 u8 bank_locator;
1973 u8 memory_type;
1974 u16 type_detail;
1975 u16 speed;
1976 u8 manufacturer;
1977 u8 serial_number;
1978 u8 asset_tag;
1979 u8 part_number;
1980 u8 attributes;
1981 u32 extended_size;
1982 u16 conf_mem_clk_speed;
1983} __attribute__((__packed__));
1984
1985
1986/*
1987 * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1988 * memory devices show the same speed, and if they don't then consider
1989 * all speeds to be invalid.
1990 */
1991static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1992{
1993 int *dclk_freq = _dclk_freq;
1994 u16 dmi_mem_clk_speed;
1995
1996 if (*dclk_freq == -1)
1997 return;
1998
1999 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
2000 struct memdev_dmi_entry *memdev_dmi_entry =
2001 (struct memdev_dmi_entry *)dh;
2002 unsigned long conf_mem_clk_speed_offset =
2003 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
2004 (unsigned long)&memdev_dmi_entry->type;
2005 unsigned long speed_offset =
2006 (unsigned long)&memdev_dmi_entry->speed -
2007 (unsigned long)&memdev_dmi_entry->type;
2008
2009 /* Check that a DIMM is present */
2010 if (memdev_dmi_entry->size == 0)
2011 return;
2012
2013 /*
2014 * Pick the configured speed if it's available, otherwise
2015 * pick the DIMM speed, or we don't have a speed.
2016 */
2017 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2018 dmi_mem_clk_speed =
2019 memdev_dmi_entry->conf_mem_clk_speed;
2020 } else if (memdev_dmi_entry->length > speed_offset) {
2021 dmi_mem_clk_speed = memdev_dmi_entry->speed;
2022 } else {
2023 *dclk_freq = -1;
2024 return;
2025 }
2026
2027 if (*dclk_freq == 0) {
2028 /* First pass, speed was 0 */
2029 if (dmi_mem_clk_speed > 0) {
2030 /* Set speed if a valid speed is read */
2031 *dclk_freq = dmi_mem_clk_speed;
2032 } else {
2033 /* Otherwise we don't have a valid speed */
2034 *dclk_freq = -1;
2035 }
2036 } else if (*dclk_freq > 0 &&
2037 *dclk_freq != dmi_mem_clk_speed) {
2038 /*
2039 * If we have a speed, check that all DIMMS are the same
2040 * speed, otherwise set the speed as invalid.
2041 */
2042 *dclk_freq = -1;
2043 }
2044 }
2045}
2046
2047/*
2048 * The default DCLK frequency is used as a fallback if we
2049 * fail to find anything reliable in the DMI. The value
2050 * is taken straight from the datasheet.
2051 */
2052#define DEFAULT_DCLK_FREQ 800
2053
2054static int get_dclk_freq(void)
2055{
2056 int dclk_freq = 0;
2057
2058 dmi_walk(decode_dclk, (void *)&dclk_freq);
2059
2060 if (dclk_freq < 1)
2061 return DEFAULT_DCLK_FREQ;
2062
2063 return dclk_freq;
2064}
2065
1955/* 2066/*
1956 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate 2067 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
1957 * to hardware according to SCRUBINTERVAL formula 2068 * to hardware according to SCRUBINTERVAL formula
@@ -1961,8 +2072,6 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
1961{ 2072{
1962 struct i7core_pvt *pvt = mci->pvt_info; 2073 struct i7core_pvt *pvt = mci->pvt_info;
1963 struct pci_dev *pdev; 2074 struct pci_dev *pdev;
1964 const u32 cache_line_size = 64;
1965 const u32 freq_dclk = 800*1000000;
1966 u32 dw_scrub; 2075 u32 dw_scrub;
1967 u32 dw_ssr; 2076 u32 dw_ssr;
1968 2077
@@ -1977,18 +2086,28 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
1977 /* Prepare to disable petrol scrub */ 2086 /* Prepare to disable petrol scrub */
1978 dw_scrub &= ~STARTSCRUB; 2087 dw_scrub &= ~STARTSCRUB;
1979 /* Stop the patrol scrub engine */ 2088 /* Stop the patrol scrub engine */
1980 write_and_test(pdev, MC_SCRUB_CONTROL, dw_scrub & ~0x00ffffff); 2089 write_and_test(pdev, MC_SCRUB_CONTROL,
2090 dw_scrub & ~SCRUBINTERVAL_MASK);
1981 2091
1982 /* Get current status of scrub rate and set bit to disable */ 2092 /* Get current status of scrub rate and set bit to disable */
1983 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); 2093 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
1984 dw_ssr &= ~SSR_MODE_MASK; 2094 dw_ssr &= ~SSR_MODE_MASK;
1985 dw_ssr |= SSR_MODE_DISABLE; 2095 dw_ssr |= SSR_MODE_DISABLE;
1986 } else { 2096 } else {
2097 const int cache_line_size = 64;
2098 const u32 freq_dclk_mhz = pvt->dclk_freq;
2099 unsigned long long scrub_interval;
1987 /* 2100 /*
1988 * Translate the desired scrub rate to a register value and 2101 * Translate the desired scrub rate to a register value and
1989 * program the cooresponding register value. 2102 * program the corresponding register value.
1990 */ 2103 */
1991 dw_scrub = 0x00ffffff & (cache_line_size * freq_dclk / new_bw); 2104 scrub_interval = (unsigned long long)freq_dclk_mhz *
2105 cache_line_size * 1000000 / new_bw;
2106
2107 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2108 return -EINVAL;
2109
2110 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
1992 2111
1993 /* Start the patrol scrub engine */ 2112 /* Start the patrol scrub engine */
1994 pci_write_config_dword(pdev, MC_SCRUB_CONTROL, 2113 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
@@ -2015,7 +2134,8 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2015 struct i7core_pvt *pvt = mci->pvt_info; 2134 struct i7core_pvt *pvt = mci->pvt_info;
2016 struct pci_dev *pdev; 2135 struct pci_dev *pdev;
2017 const u32 cache_line_size = 64; 2136 const u32 cache_line_size = 64;
2018 const u32 freq_dclk = 800*1000000; 2137 const u32 freq_dclk_mhz = pvt->dclk_freq;
2138 unsigned long long scrub_rate;
2019 u32 scrubval; 2139 u32 scrubval;
2020 2140
2021 /* Get data from the MC register, function 2 */ 2141 /* Get data from the MC register, function 2 */
@@ -2027,12 +2147,14 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2027 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval); 2147 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2028 2148
2029 /* Mask highest 8-bits to 0 */ 2149 /* Mask highest 8-bits to 0 */
2030 scrubval &= 0x00ffffff; 2150 scrubval &= SCRUBINTERVAL_MASK;
2031 if (!scrubval) 2151 if (!scrubval)
2032 return 0; 2152 return 0;
2033 2153
2034 /* Calculate scrub rate value into byte/sec bandwidth */ 2154 /* Calculate scrub rate value into byte/sec bandwidth */
2035 return 0xffffffff & (cache_line_size * freq_dclk / (u64) scrubval); 2155 scrub_rate = (unsigned long long)freq_dclk_mhz *
2156 1000000 * cache_line_size / scrubval;
2157 return (int)scrub_rate;
2036} 2158}
2037 2159
2038static void enable_sdram_scrub_setting(struct mem_ctl_info *mci) 2160static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
@@ -2204,6 +2326,9 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2204 /* allocating generic PCI control info */ 2326 /* allocating generic PCI control info */
2205 i7core_pci_ctl_create(pvt); 2327 i7core_pci_ctl_create(pvt);
2206 2328
2329 /* DCLK for scrub rate setting */
2330 pvt->dclk_freq = get_dclk_freq();
2331
2207 atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec); 2332 atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
2208 2333
2209 return 0; 2334 return 0;