aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac/amd64_edac.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac/amd64_edac.h')
-rw-r--r--drivers/edac/amd64_edac.h369
1 files changed, 137 insertions, 232 deletions
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 613ec72b0f65..11be36a311eb 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -144,7 +144,7 @@
144 * sections 3.5.4 and 3.5.5 for more information. 144 * sections 3.5.4 and 3.5.5 for more information.
145 */ 145 */
146 146
147#define EDAC_AMD64_VERSION "v3.3.0" 147#define EDAC_AMD64_VERSION "3.4.0"
148#define EDAC_MOD_STR "amd64_edac" 148#define EDAC_MOD_STR "amd64_edac"
149 149
150/* Extended Model from CPUID, for CPU Revision numbers */ 150/* Extended Model from CPUID, for CPU Revision numbers */
@@ -153,85 +153,64 @@
153#define K8_REV_F 4 153#define K8_REV_F 4
154 154
155/* Hardware limit on ChipSelect rows per MC and processors per system */ 155/* Hardware limit on ChipSelect rows per MC and processors per system */
156#define MAX_CS_COUNT 8 156#define NUM_CHIPSELECTS 8
157#define DRAM_REG_COUNT 8 157#define DRAM_RANGES 8
158 158
159#define ON true 159#define ON true
160#define OFF false 160#define OFF false
161 161
162/* 162/*
163 * Create a contiguous bitmask starting at bit position @lo and ending at
164 * position @hi. For example
165 *
166 * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
167 */
168#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
169
170/*
163 * PCI-defined configuration space registers 171 * PCI-defined configuration space registers
164 */ 172 */
173#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
174#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
165 175
166 176
167/* 177/*
168 * Function 1 - Address Map 178 * Function 1 - Address Map
169 */ 179 */
170#define K8_DRAM_BASE_LOW 0x40 180#define DRAM_BASE_LO 0x40
171#define K8_DRAM_LIMIT_LOW 0x44 181#define DRAM_LIMIT_LO 0x44
172#define K8_DHAR 0xf0
173
174#define DHAR_VALID BIT(0)
175#define F10_DRAM_MEM_HOIST_VALID BIT(1)
176 182
177#define DHAR_BASE_MASK 0xff000000 183#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
178#define dhar_base(dhar) (dhar & DHAR_BASE_MASK) 184#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
185#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
186#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
179 187
180#define K8_DHAR_OFFSET_MASK 0x0000ff00 188#define DHAR 0xf0
181#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16) 189#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
190#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
191#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
192#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
182 193
183#define F10_DHAR_OFFSET_MASK 0x0000ff80
184 /* NOTE: Extra mask bit vs K8 */ 194 /* NOTE: Extra mask bit vs K8 */
185#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16) 195#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
186 196
197#define DCT_CFG_SEL 0x10C
187 198
188/* F10 High BASE/LIMIT registers */ 199#define DRAM_BASE_HI 0x140
189#define F10_DRAM_BASE_HIGH 0x140 200#define DRAM_LIMIT_HI 0x144
190#define F10_DRAM_LIMIT_HIGH 0x144
191 201
192 202
193/* 203/*
194 * Function 2 - DRAM controller 204 * Function 2 - DRAM controller
195 */ 205 */
196#define K8_DCSB0 0x40 206#define DCSB0 0x40
197#define F10_DCSB1 0x140 207#define DCSB1 0x140
208#define DCSB_CS_ENABLE BIT(0)
198 209
199#define K8_DCSB_CS_ENABLE BIT(0) 210#define DCSM0 0x60
200#define K8_DCSB_NPT_SPARE BIT(1) 211#define DCSM1 0x160
201#define K8_DCSB_NPT_TESTFAIL BIT(2)
202 212
203/* 213#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
204 * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
205 * the address
206 */
207#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
208#define REV_E_DCS_SHIFT 4
209
210#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
211#define REV_F_F1Xh_DCS_SHIFT 8
212
213/*
214 * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
215 * to form the address
216 */
217#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
218#define REV_F_DCS_SHIFT 8
219
220/* DRAM CS Mask Registers */
221#define K8_DCSM0 0x60
222#define F10_DCSM1 0x160
223
224/* REV E: select [29:21] and [15:9] from DCSM */
225#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
226
227/* unused bits [24:20] and [12:0] */
228#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
229
230/* REV F and later: select [28:19] and [13:5] from DCSM */
231#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
232
233/* unused bits [26:22] and [12:0] */
234#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
235 214
236#define DBAM0 0x80 215#define DBAM0 0x80
237#define DBAM1 0x180 216#define DBAM1 0x180
@@ -241,148 +220,84 @@
241 220
242#define DBAM_MAX_VALUE 11 221#define DBAM_MAX_VALUE 11
243 222
244 223#define DCLR0 0x90
245#define F10_DCLR_0 0x90 224#define DCLR1 0x190
246#define F10_DCLR_1 0x190
247#define REVE_WIDTH_128 BIT(16) 225#define REVE_WIDTH_128 BIT(16)
248#define F10_WIDTH_128 BIT(11) 226#define WIDTH_128 BIT(11)
249 227
228#define DCHR0 0x94
229#define DCHR1 0x194
230#define DDR3_MODE BIT(8)
250 231
251#define F10_DCHR_0 0x94 232#define DCT_SEL_LO 0x110
252#define F10_DCHR_1 0x194 233#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
234#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
235#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
236#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
253 237
254#define F10_DCHR_FOUR_RANK_DIMM BIT(18) 238#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
255#define DDR3_MODE BIT(8)
256#define F10_DCHR_MblMode BIT(6)
257 239
240#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
241#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
258 242
259#define F10_DCTL_SEL_LOW 0x110 243#define SWAP_INTLV_REG 0x10c
260#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
261#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
262#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
263#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
264#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
265#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
266#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
267#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
268 244
269#define F10_DCTL_SEL_HIGH 0x114 245#define DCT_SEL_HI 0x114
270 246
271/* 247/*
272 * Function 3 - Misc Control 248 * Function 3 - Misc Control
273 */ 249 */
274#define K8_NBCTL 0x40 250#define NBCTL 0x40
275
276/* Correctable ECC error reporting enable */
277#define K8_NBCTL_CECCEn BIT(0)
278
279/* UnCorrectable ECC error reporting enable */
280#define K8_NBCTL_UECCEn BIT(1)
281 251
282#define K8_NBCFG 0x44 252#define NBCFG 0x44
283#define K8_NBCFG_CHIPKILL BIT(23) 253#define NBCFG_CHIPKILL BIT(23)
284#define K8_NBCFG_ECC_ENABLE BIT(22) 254#define NBCFG_ECC_ENABLE BIT(22)
285 255
286#define K8_NBSL 0x48 256/* F3x48: NBSL */
287
288
289/* Family F10h: Normalized Extended Error Codes */
290#define F10_NBSL_EXT_ERR_RES 0x0
291#define F10_NBSL_EXT_ERR_ECC 0x8 257#define F10_NBSL_EXT_ERR_ECC 0x8
258#define NBSL_PP_OBS 0x2
292 259
293/* Next two are overloaded values */ 260#define SCRCTRL 0x58
294#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
295#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
296
297#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
298#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
299#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
300
301/* Next two are overloaded values */
302#define F10_NBSL_EXT_ERR_GART_WALK 0xF
303#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
304
305/* 0x10 to 0x1B: Reserved */
306#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
307#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
308#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
309
310/* K8: Normalized Extended Error Codes */
311#define K8_NBSL_EXT_ERR_ECC 0x0
312#define K8_NBSL_EXT_ERR_CRC 0x1
313#define K8_NBSL_EXT_ERR_SYNC 0x2
314#define K8_NBSL_EXT_ERR_MST 0x3
315#define K8_NBSL_EXT_ERR_TGT 0x4
316#define K8_NBSL_EXT_ERR_GART 0x5
317#define K8_NBSL_EXT_ERR_RMW 0x6
318#define K8_NBSL_EXT_ERR_WDT 0x7
319#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
320#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
321
322/*
323 * The following are for BUS type errors AFTER values have been normalized by
324 * shifting right
325 */
326#define K8_NBSL_PP_SRC 0x0
327#define K8_NBSL_PP_RES 0x1
328#define K8_NBSL_PP_OBS 0x2
329#define K8_NBSL_PP_GENERIC 0x3
330
331#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
332
333#define K8_NBEAL 0x50
334#define K8_NBEAH 0x54
335#define K8_SCRCTRL 0x58
336
337#define F10_NB_CFG_LOW 0x88
338 261
339#define F10_ONLINE_SPARE 0xB0 262#define F10_ONLINE_SPARE 0xB0
340#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) 263#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
341#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3)) 264#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
342#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
343#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
344 265
345#define F10_NB_ARRAY_ADDR 0xB8 266#define F10_NB_ARRAY_ADDR 0xB8
346 267#define F10_NB_ARRAY_DRAM_ECC BIT(31)
347#define F10_NB_ARRAY_DRAM_ECC 0x80000000
348 268
349/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ 269/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
350#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) 270#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
351 271
352#define F10_NB_ARRAY_DATA 0xBC 272#define F10_NB_ARRAY_DATA 0xBC
353
354#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ 273#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
355 (BIT(((word) & 0xF) + 20) | \ 274 (BIT(((word) & 0xF) + 20) | \
356 BIT(17) | bits) 275 BIT(17) | bits)
357
358#define SET_NB_DRAM_INJECTION_READ(word, bits) \ 276#define SET_NB_DRAM_INJECTION_READ(word, bits) \
359 (BIT(((word) & 0xF) + 20) | \ 277 (BIT(((word) & 0xF) + 20) | \
360 BIT(16) | bits) 278 BIT(16) | bits)
361 279
362#define K8_NBCAP 0xE8 280#define NBCAP 0xE8
363#define K8_NBCAP_CORES (BIT(12)|BIT(13)) 281#define NBCAP_CHIPKILL BIT(4)
364#define K8_NBCAP_CHIPKILL BIT(4) 282#define NBCAP_SECDED BIT(3)
365#define K8_NBCAP_SECDED BIT(3) 283#define NBCAP_DCT_DUAL BIT(0)
366#define K8_NBCAP_DCT_DUAL BIT(0)
367 284
368#define EXT_NB_MCA_CFG 0x180 285#define EXT_NB_MCA_CFG 0x180
369 286
370/* MSRs */ 287/* MSRs */
371#define K8_MSR_MCGCTL_NBE BIT(4) 288#define MSR_MCGCTL_NBE BIT(4)
372
373#define K8_MSR_MC4CTL 0x0410
374#define K8_MSR_MC4STAT 0x0411
375#define K8_MSR_MC4ADDR 0x0412
376 289
377/* AMD sets the first MC device at device ID 0x18. */ 290/* AMD sets the first MC device at device ID 0x18. */
378static inline int get_node_id(struct pci_dev *pdev) 291static inline u8 get_node_id(struct pci_dev *pdev)
379{ 292{
380 return PCI_SLOT(pdev->devfn) - 0x18; 293 return PCI_SLOT(pdev->devfn) - 0x18;
381} 294}
382 295
383enum amd64_chipset_families { 296enum amd_families {
384 K8_CPUS = 0, 297 K8_CPUS = 0,
385 F10_CPUS, 298 F10_CPUS,
299 F15_CPUS,
300 NUM_FAMILIES,
386}; 301};
387 302
388/* Error injection control structure */ 303/* Error injection control structure */
@@ -392,13 +307,35 @@ struct error_injection {
392 u32 bit_map; 307 u32 bit_map;
393}; 308};
394 309
310/* low and high part of PCI config space regs */
311struct reg_pair {
312 u32 lo, hi;
313};
314
315/*
316 * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
317 */
318struct dram_range {
319 struct reg_pair base;
320 struct reg_pair lim;
321};
322
323/* A DCT chip selects collection */
324struct chip_select {
325 u32 csbases[NUM_CHIPSELECTS];
326 u8 b_cnt;
327
328 u32 csmasks[NUM_CHIPSELECTS];
329 u8 m_cnt;
330};
331
395struct amd64_pvt { 332struct amd64_pvt {
396 struct low_ops *ops; 333 struct low_ops *ops;
397 334
398 /* pci_device handles which we utilize */ 335 /* pci_device handles which we utilize */
399 struct pci_dev *F1, *F2, *F3; 336 struct pci_dev *F1, *F2, *F3;
400 337
401 int mc_node_id; /* MC index of this MC node */ 338 unsigned mc_node_id; /* MC index of this MC node */
402 int ext_model; /* extended model value of this node */ 339 int ext_model; /* extended model value of this node */
403 int channel_count; 340 int channel_count;
404 341
@@ -414,60 +351,50 @@ struct amd64_pvt {
414 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ 351 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
415 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ 352 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
416 353
417 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ 354 /* one for each DCT */
418 u32 dcsb0[MAX_CS_COUNT]; 355 struct chip_select csels[2];
419 u32 dcsb1[MAX_CS_COUNT]; 356
420 357 /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
421 /* DRAM CS Mask Registers F2x[1,0][6C:60] */ 358 struct dram_range ranges[DRAM_RANGES];
422 u32 dcsm0[MAX_CS_COUNT];
423 u32 dcsm1[MAX_CS_COUNT];
424
425 /*
426 * Decoded parts of DRAM BASE and LIMIT Registers
427 * F1x[78,70,68,60,58,50,48,40]
428 */
429 u64 dram_base[DRAM_REG_COUNT];
430 u64 dram_limit[DRAM_REG_COUNT];
431 u8 dram_IntlvSel[DRAM_REG_COUNT];
432 u8 dram_IntlvEn[DRAM_REG_COUNT];
433 u8 dram_DstNode[DRAM_REG_COUNT];
434 u8 dram_rw_en[DRAM_REG_COUNT];
435
436 /*
437 * The following fields are set at (load) run time, after CPU revision
438 * has been determined, since the dct_base and dct_mask registers vary
439 * based on revision
440 */
441 u32 dcsb_base; /* DCSB base bits */
442 u32 dcsm_mask; /* DCSM mask bits */
443 u32 cs_count; /* num chip selects (== num DCSB registers) */
444 u32 num_dcsm; /* Number of DCSM registers */
445 u32 dcs_mask_notused; /* DCSM notused mask bits */
446 u32 dcs_shift; /* DCSB and DCSM shift value */
447 359
448 u64 top_mem; /* top of memory below 4GB */ 360 u64 top_mem; /* top of memory below 4GB */
449 u64 top_mem2; /* top of memory above 4GB */ 361 u64 top_mem2; /* top of memory above 4GB */
450 362
451 u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */ 363 u32 dct_sel_lo; /* DRAM Controller Select Low */
452 u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ 364 u32 dct_sel_hi; /* DRAM Controller Select High */
453 u32 online_spare; /* On-Line spare Reg */ 365 u32 online_spare; /* On-Line spare Reg */
454 366
455 /* x4 or x8 syndromes in use */ 367 /* x4 or x8 syndromes in use */
456 u8 syn_type; 368 u8 ecc_sym_sz;
457
458 /* temp storage for when input is received from sysfs */
459 struct err_regs ctl_error_info;
460 369
461 /* place to store error injection parameters prior to issue */ 370 /* place to store error injection parameters prior to issue */
462 struct error_injection injection; 371 struct error_injection injection;
372};
463 373
464 /* DCT per-family scrubrate setting */ 374static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
465 u32 min_scrubrate; 375{
376 u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
466 377
467 /* family name this instance is running on */ 378 if (boot_cpu_data.x86 == 0xf)
468 const char *ctl_name; 379 return addr;
469 380
470}; 381 return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
382}
383
384static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
385{
386 u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
387
388 if (boot_cpu_data.x86 == 0xf)
389 return lim;
390
391 return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
392}
393
394static inline u16 extract_syndrome(u64 status)
395{
396 return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
397}
471 398
472/* 399/*
473 * per-node ECC settings descriptor 400 * per-node ECC settings descriptor
@@ -482,14 +409,6 @@ struct ecc_settings {
482 } flags; 409 } flags;
483}; 410};
484 411
485extern const char *tt_msgs[4];
486extern const char *ll_msgs[4];
487extern const char *rrrr_msgs[16];
488extern const char *to_msgs[2];
489extern const char *pp_msgs[4];
490extern const char *ii_msgs[4];
491extern const char *htlink_msgs[8];
492
493#ifdef CONFIG_EDAC_DEBUG 412#ifdef CONFIG_EDAC_DEBUG
494#define NUM_DBG_ATTRS 5 413#define NUM_DBG_ATTRS 5
495#else 414#else
@@ -511,14 +430,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
511 */ 430 */
512struct low_ops { 431struct low_ops {
513 int (*early_channel_count) (struct amd64_pvt *pvt); 432 int (*early_channel_count) (struct amd64_pvt *pvt);
514 433 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
515 u64 (*get_error_address) (struct mem_ctl_info *mci, 434 u16 syndrome);
516 struct err_regs *info); 435 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
517 void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram); 436 int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
518 void (*read_dram_ctl_register) (struct amd64_pvt *pvt); 437 u32 *val, const char *func);
519 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
520 struct err_regs *info, u64 SystemAddr);
521 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
522}; 438};
523 439
524struct amd64_family_type { 440struct amd64_family_type {
@@ -527,28 +443,17 @@ struct amd64_family_type {
527 struct low_ops ops; 443 struct low_ops ops;
528}; 444};
529 445
530static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, 446int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
531 u32 *val, const char *func) 447 u32 val, const char *func);
532{
533 int err = 0;
534
535 err = pci_read_config_dword(pdev, offset, val);
536 if (err)
537 amd64_warn("%s: error reading F%dx%x.\n",
538 func, PCI_FUNC(pdev->devfn), offset);
539
540 return err;
541}
542 448
543#define amd64_read_pci_cfg(pdev, offset, val) \ 449#define amd64_read_pci_cfg(pdev, offset, val) \
544 amd64_read_pci_cfg_dword(pdev, offset, val, __func__) 450 __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
545 451
546/* 452#define amd64_write_pci_cfg(pdev, offset, val) \
547 * For future CPU versions, verify the following as new 'slow' rates appear and 453 __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
548 * modify the necessary skip values for the supported CPU. 454
549 */ 455#define amd64_read_dct_pci_cfg(pvt, offset, val) \
550#define K8_MIN_SCRUB_RATE_BITS 0x0 456 pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
551#define F10_MIN_SCRUB_RATE_BITS 0x5
552 457
553int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 458int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
554 u64 *hole_offset, u64 *hole_size); 459 u64 *hole_offset, u64 *hole_size);