aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac/amd64_edac.c
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2011-01-07 10:26:49 -0500
committerBorislav Petkov <borislav.petkov@amd.com>2011-03-17 09:46:18 -0400
commit5980bb9cd88a3fa44cc5beab599f08fbc928b832 (patch)
treed5af45439240fb8e6535fc8a11c6f6903966c7ed /drivers/edac/amd64_edac.c
parentbcd781f46a5f892ef2ae5843839849aa579fe096 (diff)
amd64_edac: Cleanup old defines cruft
Remove unused defines, drop family names from define names. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac/amd64_edac.c')
-rw-r--r--drivers/edac/amd64_edac.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 04d481b578e4..729d9f1aecb9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -229,7 +229,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
229 229
230 scrubval = scrubrates[i].scrubval; 230 scrubval = scrubrates[i].scrubval;
231 231
232 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); 232 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
233 233
234 if (scrubval) 234 if (scrubval)
235 return scrubrates[i].bandwidth; 235 return scrubrates[i].bandwidth;
@@ -250,7 +250,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
250 u32 scrubval = 0; 250 u32 scrubval = 0;
251 int i, retval = -EINVAL; 251 int i, retval = -EINVAL;
252 252
253 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); 253 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
254 254
255 scrubval = scrubval & 0x001F; 255 scrubval = scrubval & 0x001F;
256 256
@@ -843,11 +843,11 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
843 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 843 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
844 844
845 debugf1(" NB two channel DRAM capable: %s\n", 845 debugf1(" NB two channel DRAM capable: %s\n",
846 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); 846 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
847 847
848 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", 848 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
849 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", 849 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
850 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); 850 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
851 851
852 amd64_dump_dramcfg_low(pvt->dclr0, 0); 852 amd64_dump_dramcfg_low(pvt->dclr0, 0);
853 853
@@ -1814,7 +1814,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1814 int ecc_type = (info->nbsh >> 13) & 0x3; 1814 int ecc_type = (info->nbsh >> 13) & 0x3;
1815 1815
1816 /* Bail early out if this was an 'observed' error */ 1816 /* Bail early out if this was an 'observed' error */
1817 if (PP(ec) == K8_NBSL_PP_OBS) 1817 if (PP(ec) == NBSL_PP_OBS)
1818 return; 1818 return;
1819 1819
1820 /* Do only ECC errors */ 1820 /* Do only ECC errors */
@@ -1906,7 +1906,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
1906 } else 1906 } else
1907 debugf0(" TOP_MEM2 disabled.\n"); 1907 debugf0(" TOP_MEM2 disabled.\n");
1908 1908
1909 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap); 1909 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
1910 1910
1911 if (pvt->ops->read_dram_ctl_register) 1911 if (pvt->ops->read_dram_ctl_register)
1912 pvt->ops->read_dram_ctl_register(pvt); 1912 pvt->ops->read_dram_ctl_register(pvt);
@@ -2126,7 +2126,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2126 2126
2127 for_each_cpu(cpu, mask) { 2127 for_each_cpu(cpu, mask) {
2128 struct msr *reg = per_cpu_ptr(msrs, cpu); 2128 struct msr *reg = per_cpu_ptr(msrs, cpu);
2129 nbe = reg->l & K8_MSR_MCGCTL_NBE; 2129 nbe = reg->l & MSR_MCGCTL_NBE;
2130 2130
2131 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2131 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2132 cpu, reg->q, 2132 cpu, reg->q,
@@ -2161,16 +2161,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2161 struct msr *reg = per_cpu_ptr(msrs, cpu); 2161 struct msr *reg = per_cpu_ptr(msrs, cpu);
2162 2162
2163 if (on) { 2163 if (on) {
2164 if (reg->l & K8_MSR_MCGCTL_NBE) 2164 if (reg->l & MSR_MCGCTL_NBE)
2165 s->flags.nb_mce_enable = 1; 2165 s->flags.nb_mce_enable = 1;
2166 2166
2167 reg->l |= K8_MSR_MCGCTL_NBE; 2167 reg->l |= MSR_MCGCTL_NBE;
2168 } else { 2168 } else {
2169 /* 2169 /*
2170 * Turn off NB MCE reporting only when it was off before 2170 * Turn off NB MCE reporting only when it was off before
2171 */ 2171 */
2172 if (!s->flags.nb_mce_enable) 2172 if (!s->flags.nb_mce_enable)
2173 reg->l &= ~K8_MSR_MCGCTL_NBE; 2173 reg->l &= ~MSR_MCGCTL_NBE;
2174 } 2174 }
2175 } 2175 }
2176 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2176 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2324,10 +2324,10 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
2324 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; 2324 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2325 mci->edac_ctl_cap = EDAC_FLAG_NONE; 2325 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2326 2326
2327 if (pvt->nbcap & K8_NBCAP_SECDED) 2327 if (pvt->nbcap & NBCAP_SECDED)
2328 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 2328 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2329 2329
2330 if (pvt->nbcap & K8_NBCAP_CHIPKILL) 2330 if (pvt->nbcap & NBCAP_CHIPKILL)
2331 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 2331 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2332 2332
2333 mci->edac_cap = amd64_determine_edac_cap(pvt); 2333 mci->edac_cap = amd64_determine_edac_cap(pvt);