summaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorAravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>2014-09-15 12:37:38 -0400
committerBorislav Petkov <bp@suse.de>2014-09-23 07:16:05 -0400
commit7981a28f1ab5259754843b65c21879337785cb14 (patch)
tree12732e3b330419f93a9f7e21630ebd829899370b /drivers/edac
parent2d34056d27687180c0bab7dc40957a40d7ce0140 (diff)
amd64_edac: Modify usage of amd64_read_dct_pci_cfg()
Rationale behind this change: - F2x1xx addresses were stopped from being mapped explicitly to DCT1 from F15h (OR) onwards. They use _dct[0:1] mechanism to access the registers. So we should move away from using address ranges to select DCT for these families. - On newer processors, the address ranges used to indicate DCT1 (0x140, 0x1a0) have different meanings than what is assumed currently. Changes introduced: - amd64_read_dct_pci_cfg() now takes in dct value and uses it for 'selecting the dct' - Update usage of the function. Keep in mind that different families have specific handling requirements - Remove [k8|f10]_read_dct_pci_cfg() as they don't do much different from amd64_read_pci_cfg() - Move the k8 specific check to amd64_read_pci_cfg - Remove f15_read_dct_pci_cfg() and move logic to amd64_read_dct_pci_cfg() - Remove now needless .read_dct_pci_cfg Testing: - Tested on Fam 10h; Fam15h Models: 00h, 30h; Fam16h using 'EDAC_DEBUG' and mce_amd_inj - driver obtains info from F2x registers and caches it in pvt structures correctly - ECC decoding works fine Signed-off-by: Aravind Gopalakrishnan <aravind.gopalakrishnan@amd.com> Link: http://lkml.kernel.org/r/1410799058-3149-1-git-send-email-aravind.gopalakrishnan@amd.com Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c146
-rw-r--r--drivers/edac/amd64_edac.h5
2 files changed, 80 insertions, 71 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f8bf00010d45..bbd65149cdb2 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -87,61 +87,73 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
87} 87}
88 88
89/* 89/*
90 * Select DCT to which PCI cfg accesses are routed
91 */
92static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
93{
94 u32 reg = 0;
95
96 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
97 reg &= (pvt->model == 0x30) ? ~3 : ~1;
98 reg |= dct;
99 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
100}
101
102/*
90 * 103 *
91 * Depending on the family, F2 DCT reads need special handling: 104 * Depending on the family, F2 DCT reads need special handling:
92 * 105 *
93 * K8: has a single DCT only 106 * K8: has a single DCT only and no address offsets >= 0x100
94 * 107 *
95 * F10h: each DCT has its own set of regs 108 * F10h: each DCT has its own set of regs
96 * DCT0 -> F2x040.. 109 * DCT0 -> F2x040..
97 * DCT1 -> F2x140.. 110 * DCT1 -> F2x140..
98 * 111 *
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100 *
101 * F16h: has only 1 DCT 112 * F16h: has only 1 DCT
113 *
114 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 */ 115 */
103static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, 116static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
104 const char *func) 117 int offset, u32 *val)
105{ 118{
106 if (addr >= 0x100) 119 switch (pvt->fam) {
107 return -EINVAL; 120 case 0xf:
108 121 if (dct || offset >= 0x100)
109 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); 122 return -EINVAL;
110} 123 break;
111 124
112static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, 125 case 0x10:
113 const char *func) 126 if (dct) {
114{ 127 /*
115 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); 128 * Note: If ganging is enabled, barring the regs
116} 129 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
130 * return 0. (cf. Section 2.8.1 F10h BKDG)
131 */
132 if (dct_ganging_enabled(pvt))
133 return 0;
117 134
118/* 135 offset += 0x100;
119 * Select DCT to which PCI cfg accesses are routed 136 }
120 */ 137 break;
121static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
122{
123 u32 reg = 0;
124 138
125 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg); 139 case 0x15:
126 reg &= (pvt->model >= 0x30) ? ~3 : ~1; 140 /*
127 reg |= dct; 141 * F15h: F2x1xx addresses do not map explicitly to DCT1.
128 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); 142 * We should select which DCT we access using F1x10C[DctCfgSel]
129} 143 */
144 dct = (dct && pvt->model == 0x30) ? 3 : dct;
145 f15h_select_dct(pvt, dct);
146 break;
130 147
131static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, 148 case 0x16:
132 const char *func) 149 if (dct)
133{ 150 return -EINVAL;
134 u8 dct = 0; 151 break;
135 152
136 /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */ 153 default:
137 if (addr >= 0x140 && addr <= 0x1a0) { 154 break;
138 dct = (pvt->model >= 0x30) ? 3 : 1;
139 addr -= 0x100;
140 } 155 }
141 156 return amd64_read_pci_cfg(pvt->F2, offset, val);
142 f15h_select_dct(pvt, dct);
143
144 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
145} 157}
146 158
147/* 159/*
@@ -768,16 +780,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
768 u32 *base0 = &pvt->csels[0].csbases[cs]; 780 u32 *base0 = &pvt->csels[0].csbases[cs];
769 u32 *base1 = &pvt->csels[1].csbases[cs]; 781 u32 *base1 = &pvt->csels[1].csbases[cs];
770 782
771 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) 783 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
772 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", 784 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
773 cs, *base0, reg0); 785 cs, *base0, reg0);
774 786
775 if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) 787 if (pvt->fam == 0xf)
776 continue; 788 continue;
777 789
778 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) 790 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
779 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", 791 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
780 cs, *base1, reg1); 792 cs, *base1, (pvt->fam == 0x10) ? reg1
793 : reg0);
781 } 794 }
782 795
783 for_each_chip_select_mask(cs, 0, pvt) { 796 for_each_chip_select_mask(cs, 0, pvt) {
@@ -786,16 +799,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
786 u32 *mask0 = &pvt->csels[0].csmasks[cs]; 799 u32 *mask0 = &pvt->csels[0].csmasks[cs];
787 u32 *mask1 = &pvt->csels[1].csmasks[cs]; 800 u32 *mask1 = &pvt->csels[1].csmasks[cs];
788 801
789 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) 802 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
790 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", 803 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
791 cs, *mask0, reg0); 804 cs, *mask0, reg0);
792 805
793 if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) 806 if (pvt->fam == 0xf)
794 continue; 807 continue;
795 808
796 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) 809 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
797 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", 810 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
798 cs, *mask1, reg1); 811 cs, *mask1, (pvt->fam == 0x10) ? reg1
812 : reg0);
799 } 813 }
800} 814}
801 815
@@ -1198,7 +1212,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
1198 if (pvt->fam == 0xf) 1212 if (pvt->fam == 0xf)
1199 return; 1213 return;
1200 1214
1201 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { 1215 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1202 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", 1216 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1203 pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); 1217 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1204 1218
@@ -1219,7 +1233,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
1219 dct_sel_interleave_addr(pvt)); 1233 dct_sel_interleave_addr(pvt));
1220 } 1234 }
1221 1235
1222 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); 1236 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1223} 1237}
1224 1238
1225/* 1239/*
@@ -1430,7 +1444,7 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1430 return sys_addr; 1444 return sys_addr;
1431 } 1445 }
1432 1446
1433 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); 1447 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1434 1448
1435 if (!(swap_reg & 0x1)) 1449 if (!(swap_reg & 0x1))
1436 return sys_addr; 1450 return sys_addr;
@@ -1723,10 +1737,16 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1723 WARN_ON(ctrl != 0); 1737 WARN_ON(ctrl != 0);
1724 } 1738 }
1725 1739
1726 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; 1740 if (pvt->fam == 0x10) {
1727 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases 1741 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1728 : pvt->csels[0].csbases; 1742 : pvt->dbam0;
1729 1743 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1744 pvt->csels[1].csbases :
1745 pvt->csels[0].csbases;
1746 } else if (ctrl) {
1747 dbam = pvt->dbam0;
1748 dcsb = pvt->csels[1].csbases;
1749 }
1730 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", 1750 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1731 ctrl, dbam); 1751 ctrl, dbam);
1732 1752
@@ -1760,7 +1780,6 @@ static struct amd64_family_type family_types[] = {
1760 .early_channel_count = k8_early_channel_count, 1780 .early_channel_count = k8_early_channel_count,
1761 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1781 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1762 .dbam_to_cs = k8_dbam_to_chip_select, 1782 .dbam_to_cs = k8_dbam_to_chip_select,
1763 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1764 } 1783 }
1765 }, 1784 },
1766 [F10_CPUS] = { 1785 [F10_CPUS] = {
@@ -1771,7 +1790,6 @@ static struct amd64_family_type family_types[] = {
1771 .early_channel_count = f1x_early_channel_count, 1790 .early_channel_count = f1x_early_channel_count,
1772 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1791 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1773 .dbam_to_cs = f10_dbam_to_chip_select, 1792 .dbam_to_cs = f10_dbam_to_chip_select,
1774 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1775 } 1793 }
1776 }, 1794 },
1777 [F15_CPUS] = { 1795 [F15_CPUS] = {
@@ -1782,7 +1800,6 @@ static struct amd64_family_type family_types[] = {
1782 .early_channel_count = f1x_early_channel_count, 1800 .early_channel_count = f1x_early_channel_count,
1783 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1801 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1784 .dbam_to_cs = f15_dbam_to_chip_select, 1802 .dbam_to_cs = f15_dbam_to_chip_select,
1785 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1786 } 1803 }
1787 }, 1804 },
1788 [F15_M30H_CPUS] = { 1805 [F15_M30H_CPUS] = {
@@ -1793,7 +1810,6 @@ static struct amd64_family_type family_types[] = {
1793 .early_channel_count = f1x_early_channel_count, 1810 .early_channel_count = f1x_early_channel_count,
1794 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1811 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1795 .dbam_to_cs = f16_dbam_to_chip_select, 1812 .dbam_to_cs = f16_dbam_to_chip_select,
1796 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1797 } 1813 }
1798 }, 1814 },
1799 [F16_CPUS] = { 1815 [F16_CPUS] = {
@@ -1804,7 +1820,6 @@ static struct amd64_family_type family_types[] = {
1804 .early_channel_count = f1x_early_channel_count, 1820 .early_channel_count = f1x_early_channel_count,
1805 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1821 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1806 .dbam_to_cs = f16_dbam_to_chip_select, 1822 .dbam_to_cs = f16_dbam_to_chip_select,
1807 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1808 } 1823 }
1809 }, 1824 },
1810 [F16_M30H_CPUS] = { 1825 [F16_M30H_CPUS] = {
@@ -1815,7 +1830,6 @@ static struct amd64_family_type family_types[] = {
1815 .early_channel_count = f1x_early_channel_count, 1830 .early_channel_count = f1x_early_channel_count,
1816 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1831 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1817 .dbam_to_cs = f16_dbam_to_chip_select, 1832 .dbam_to_cs = f16_dbam_to_chip_select,
1818 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1819 } 1833 }
1820 }, 1834 },
1821}; 1835};
@@ -2148,25 +2162,25 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2148 read_dct_base_mask(pvt); 2162 read_dct_base_mask(pvt);
2149 2163
2150 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); 2164 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2151 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); 2165 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2152 2166
2153 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); 2167 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2154 2168
2155 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); 2169 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2156 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); 2170 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2157 2171
2158 if (!dct_ganging_enabled(pvt)) { 2172 if (!dct_ganging_enabled(pvt)) {
2159 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); 2173 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2160 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); 2174 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2161 } 2175 }
2162 2176
2163 pvt->ecc_sym_sz = 4; 2177 pvt->ecc_sym_sz = 4;
2164 2178
2165 if (pvt->fam >= 0x10) { 2179 if (pvt->fam >= 0x10) {
2166 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); 2180 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2181 /* F16h has only DCT0, so no need to read dbam1 */
2167 if (pvt->fam != 0x16) 2182 if (pvt->fam != 0x16)
2168 /* F16h has only DCT0 */ 2183 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2169 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2170 2184
2171 /* F10h, revD and later can do x8 ECC too */ 2185 /* F10h, revD and later can do x8 ECC too */
2172 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) 2186 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index d903e0c21144..55fb5941c6d4 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -481,8 +481,6 @@ struct low_ops {
481 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, 481 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
482 struct err_info *); 482 struct err_info *);
483 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); 483 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
484 int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
485 u32 *val, const char *func);
486}; 484};
487 485
488struct amd64_family_type { 486struct amd64_family_type {
@@ -502,9 +500,6 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
502#define amd64_write_pci_cfg(pdev, offset, val) \ 500#define amd64_write_pci_cfg(pdev, offset, val) \
503 __amd64_write_pci_cfg_dword(pdev, offset, val, __func__) 501 __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
504 502
505#define amd64_read_dct_pci_cfg(pvt, offset, val) \
506 pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
507
508int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 503int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
509 u64 *hole_offset, u64 *hole_size); 504 u64 *hole_offset, u64 *hole_size);
510 505