diff options
-rw-r--r-- | drivers/edac/altera_edac.c | 26 | ||||
-rw-r--r-- | drivers/edac/i5000_edac.c | 6 | ||||
-rw-r--r-- | drivers/edac/i5400_edac.c | 4 | ||||
-rw-r--r-- | drivers/edac/ie31200_edac.c | 13 | ||||
-rw-r--r-- | drivers/edac/mce_amd.c | 2 | ||||
-rw-r--r-- | drivers/edac/mv64x60_edac.c | 88 | ||||
-rw-r--r-- | drivers/edac/pnd2_edac.c | 20 | ||||
-rw-r--r-- | drivers/edac/sb_edac.c | 682 | ||||
-rw-r--r-- | drivers/edac/thunderx_edac.c | 2 |
9 files changed, 399 insertions, 444 deletions
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 7717b094fabb..db75d4b614f7 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c | |||
@@ -214,24 +214,16 @@ static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci) | |||
214 | static unsigned long get_total_mem(void) | 214 | static unsigned long get_total_mem(void) |
215 | { | 215 | { |
216 | struct device_node *np = NULL; | 216 | struct device_node *np = NULL; |
217 | const unsigned int *reg, *reg_end; | 217 | struct resource res; |
218 | int len, sw, aw; | 218 | int ret; |
219 | unsigned long start, size, total_mem = 0; | 219 | unsigned long total_mem = 0; |
220 | 220 | ||
221 | for_each_node_by_type(np, "memory") { | 221 | for_each_node_by_type(np, "memory") { |
222 | aw = of_n_addr_cells(np); | 222 | ret = of_address_to_resource(np, 0, &res); |
223 | sw = of_n_size_cells(np); | 223 | if (ret) |
224 | reg = (const unsigned int *)of_get_property(np, "reg", &len); | 224 | continue; |
225 | reg_end = reg + (len / sizeof(u32)); | 225 | |
226 | 226 | total_mem += resource_size(&res); | |
227 | total_mem = 0; | ||
228 | do { | ||
229 | start = of_read_number(reg, aw); | ||
230 | reg += aw; | ||
231 | size = of_read_number(reg, sw); | ||
232 | reg += sw; | ||
233 | total_mem += size; | ||
234 | } while (reg < reg_end); | ||
235 | } | 227 | } |
236 | edac_dbg(0, "total_mem 0x%lx\n", total_mem); | 228 | edac_dbg(0, "total_mem 0x%lx\n", total_mem); |
237 | return total_mem; | 229 | return total_mem; |
@@ -1839,7 +1831,7 @@ static int a10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq, | |||
1839 | return 0; | 1831 | return 0; |
1840 | } | 1832 | } |
1841 | 1833 | ||
1842 | static struct irq_domain_ops a10_eccmgr_ic_ops = { | 1834 | static const struct irq_domain_ops a10_eccmgr_ic_ops = { |
1843 | .map = a10_eccmgr_irqdomain_map, | 1835 | .map = a10_eccmgr_irqdomain_map, |
1844 | .xlate = irq_domain_xlate_twocell, | 1836 | .xlate = irq_domain_xlate_twocell, |
1845 | }; | 1837 | }; |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index f683919981b0..8f5a56e25bd2 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
@@ -227,7 +227,7 @@ | |||
227 | #define NREC_RDWR(x) (((x)>>11) & 1) | 227 | #define NREC_RDWR(x) (((x)>>11) & 1) |
228 | #define NREC_RANK(x) (((x)>>8) & 0x7) | 228 | #define NREC_RANK(x) (((x)>>8) & 0x7) |
229 | #define NRECMEMB 0xC0 | 229 | #define NRECMEMB 0xC0 |
230 | #define NREC_CAS(x) (((x)>>16) & 0xFFFFFF) | 230 | #define NREC_CAS(x) (((x)>>16) & 0xFFF) |
231 | #define NREC_RAS(x) ((x) & 0x7FFF) | 231 | #define NREC_RAS(x) ((x) & 0x7FFF) |
232 | #define NRECFGLOG 0xC4 | 232 | #define NRECFGLOG 0xC4 |
233 | #define NREEECFBDA 0xC8 | 233 | #define NREEECFBDA 0xC8 |
@@ -371,7 +371,7 @@ struct i5000_error_info { | |||
371 | /* These registers are input ONLY if there was a | 371 | /* These registers are input ONLY if there was a |
372 | * Non-Recoverable Error */ | 372 | * Non-Recoverable Error */ |
373 | u16 nrecmema; /* Non-Recoverable Mem log A */ | 373 | u16 nrecmema; /* Non-Recoverable Mem log A */ |
374 | u16 nrecmemb; /* Non-Recoverable Mem log B */ | 374 | u32 nrecmemb; /* Non-Recoverable Mem log B */ |
375 | 375 | ||
376 | }; | 376 | }; |
377 | 377 | ||
@@ -407,7 +407,7 @@ static void i5000_get_error_info(struct mem_ctl_info *mci, | |||
407 | NERR_FAT_FBD, &info->nerr_fat_fbd); | 407 | NERR_FAT_FBD, &info->nerr_fat_fbd); |
408 | pci_read_config_word(pvt->branchmap_werrors, | 408 | pci_read_config_word(pvt->branchmap_werrors, |
409 | NRECMEMA, &info->nrecmema); | 409 | NRECMEMA, &info->nrecmema); |
410 | pci_read_config_word(pvt->branchmap_werrors, | 410 | pci_read_config_dword(pvt->branchmap_werrors, |
411 | NRECMEMB, &info->nrecmemb); | 411 | NRECMEMB, &info->nrecmemb); |
412 | 412 | ||
413 | /* Clear the error bits, by writing them back */ | 413 | /* Clear the error bits, by writing them back */ |
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index 37a9ba71da44..cd889edc8516 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c | |||
@@ -368,7 +368,7 @@ struct i5400_error_info { | |||
368 | 368 | ||
369 | /* These registers are input ONLY if there was a Non-Rec Error */ | 369 | /* These registers are input ONLY if there was a Non-Rec Error */ |
370 | u16 nrecmema; /* Non-Recoverable Mem log A */ | 370 | u16 nrecmema; /* Non-Recoverable Mem log A */ |
371 | u16 nrecmemb; /* Non-Recoverable Mem log B */ | 371 | u32 nrecmemb; /* Non-Recoverable Mem log B */ |
372 | 372 | ||
373 | }; | 373 | }; |
374 | 374 | ||
@@ -458,7 +458,7 @@ static void i5400_get_error_info(struct mem_ctl_info *mci, | |||
458 | NERR_FAT_FBD, &info->nerr_fat_fbd); | 458 | NERR_FAT_FBD, &info->nerr_fat_fbd); |
459 | pci_read_config_word(pvt->branchmap_werrors, | 459 | pci_read_config_word(pvt->branchmap_werrors, |
460 | NRECMEMA, &info->nrecmema); | 460 | NRECMEMA, &info->nrecmema); |
461 | pci_read_config_word(pvt->branchmap_werrors, | 461 | pci_read_config_dword(pvt->branchmap_werrors, |
462 | NRECMEMB, &info->nrecmemb); | 462 | NRECMEMB, &info->nrecmemb); |
463 | 463 | ||
464 | /* Clear the error bits, by writing them back */ | 464 | /* Clear the error bits, by writing them back */ |
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 2733fb5938a4..4260579e6901 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c | |||
@@ -18,10 +18,12 @@ | |||
18 | * 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller | 18 | * 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller |
19 | * 0c08: Xeon E3-1200 v3 Processor DRAM Controller | 19 | * 0c08: Xeon E3-1200 v3 Processor DRAM Controller |
20 | * 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers | 20 | * 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers |
21 | * 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers | ||
21 | * | 22 | * |
22 | * Based on Intel specification: | 23 | * Based on Intel specification: |
23 | * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf | 24 | * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf |
24 | * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html | 25 | * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html |
26 | * http://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html | ||
25 | * | 27 | * |
26 | * According to the above datasheet (p.16): | 28 | * According to the above datasheet (p.16): |
27 | * " | 29 | * " |
@@ -57,6 +59,7 @@ | |||
57 | #define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04 | 59 | #define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04 |
58 | #define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08 | 60 | #define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08 |
59 | #define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918 | 61 | #define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918 |
62 | #define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x5918 | ||
60 | 63 | ||
61 | #define IE31200_DIMMS 4 | 64 | #define IE31200_DIMMS 4 |
62 | #define IE31200_RANKS 8 | 65 | #define IE31200_RANKS 8 |
@@ -376,7 +379,12 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) | |||
376 | void __iomem *window; | 379 | void __iomem *window; |
377 | struct ie31200_priv *priv; | 380 | struct ie31200_priv *priv; |
378 | u32 addr_decode, mad_offset; | 381 | u32 addr_decode, mad_offset; |
379 | bool skl = (pdev->device == PCI_DEVICE_ID_INTEL_IE31200_HB_8); | 382 | |
383 | /* | ||
384 | * Kaby Lake seems to work like Skylake. Please re-visit this logic | ||
385 | * when adding new CPU support. | ||
386 | */ | ||
387 | bool skl = (pdev->device >= PCI_DEVICE_ID_INTEL_IE31200_HB_8); | ||
380 | 388 | ||
381 | edac_dbg(0, "MC:\n"); | 389 | edac_dbg(0, "MC:\n"); |
382 | 390 | ||
@@ -560,6 +568,9 @@ static const struct pci_device_id ie31200_pci_tbl[] = { | |||
560 | PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 568 | PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
561 | IE31200}, | 569 | IE31200}, |
562 | { | 570 | { |
571 | PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
572 | IE31200}, | ||
573 | { | ||
563 | 0, | 574 | 0, |
564 | } /* 0 terminated list. */ | 575 | } /* 0 terminated list. */ |
565 | }; | 576 | }; |
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ba35b7ea3686..9a2658a256a9 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c | |||
@@ -161,7 +161,7 @@ static const char * const smca_ls_mce_desc[] = { | |||
161 | "Sys Read data error thread 0", | 161 | "Sys Read data error thread 0", |
162 | "Sys read data error thread 1", | 162 | "Sys read data error thread 1", |
163 | "DC tag error type 2", | 163 | "DC tag error type 2", |
164 | "DC data error type 1 (poison comsumption)", | 164 | "DC data error type 1 (poison consumption)", |
165 | "DC data error type 2", | 165 | "DC data error type 2", |
166 | "DC data error type 3", | 166 | "DC data error type 3", |
167 | "DC tag error type 4", | 167 | "DC tag error type 4", |
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index 14b7e7b71eaa..d3650df94fe8 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c | |||
@@ -32,21 +32,21 @@ static void mv64x60_pci_check(struct edac_pci_ctl_info *pci) | |||
32 | struct mv64x60_pci_pdata *pdata = pci->pvt_info; | 32 | struct mv64x60_pci_pdata *pdata = pci->pvt_info; |
33 | u32 cause; | 33 | u32 cause; |
34 | 34 | ||
35 | cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); | 35 | cause = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); |
36 | if (!cause) | 36 | if (!cause) |
37 | return; | 37 | return; |
38 | 38 | ||
39 | printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose); | 39 | printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose); |
40 | printk(KERN_ERR "Cause register: 0x%08x\n", cause); | 40 | printk(KERN_ERR "Cause register: 0x%08x\n", cause); |
41 | printk(KERN_ERR "Address Low: 0x%08x\n", | 41 | printk(KERN_ERR "Address Low: 0x%08x\n", |
42 | in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO)); | 42 | readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO)); |
43 | printk(KERN_ERR "Address High: 0x%08x\n", | 43 | printk(KERN_ERR "Address High: 0x%08x\n", |
44 | in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI)); | 44 | readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI)); |
45 | printk(KERN_ERR "Attribute: 0x%08x\n", | 45 | printk(KERN_ERR "Attribute: 0x%08x\n", |
46 | in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR)); | 46 | readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR)); |
47 | printk(KERN_ERR "Command: 0x%08x\n", | 47 | printk(KERN_ERR "Command: 0x%08x\n", |
48 | in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD)); | 48 | readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD)); |
49 | out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause); | 49 | writel(~cause, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); |
50 | 50 | ||
51 | if (cause & MV64X60_PCI_PE_MASK) | 51 | if (cause & MV64X60_PCI_PE_MASK) |
52 | edac_pci_handle_pe(pci, pci->ctl_name); | 52 | edac_pci_handle_pe(pci, pci->ctl_name); |
@@ -61,7 +61,7 @@ static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id) | |||
61 | struct mv64x60_pci_pdata *pdata = pci->pvt_info; | 61 | struct mv64x60_pci_pdata *pdata = pci->pvt_info; |
62 | u32 val; | 62 | u32 val; |
63 | 63 | ||
64 | val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); | 64 | val = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); |
65 | if (!val) | 65 | if (!val) |
66 | return IRQ_NONE; | 66 | return IRQ_NONE; |
67 | 67 | ||
@@ -93,7 +93,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev) | |||
93 | if (!pci_serr) | 93 | if (!pci_serr) |
94 | return -ENOMEM; | 94 | return -ENOMEM; |
95 | 95 | ||
96 | out_le32(pci_serr, in_le32(pci_serr) & ~0x1); | 96 | writel(readl(pci_serr) & ~0x1, pci_serr); |
97 | iounmap(pci_serr); | 97 | iounmap(pci_serr); |
98 | 98 | ||
99 | return 0; | 99 | return 0; |
@@ -116,7 +116,7 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev) | |||
116 | pdata = pci->pvt_info; | 116 | pdata = pci->pvt_info; |
117 | 117 | ||
118 | pdata->pci_hose = pdev->id; | 118 | pdata->pci_hose = pdev->id; |
119 | pdata->name = "mpc85xx_pci_err"; | 119 | pdata->name = "mv64x60_pci_err"; |
120 | platform_set_drvdata(pdev, pci); | 120 | platform_set_drvdata(pdev, pci); |
121 | pci->dev = &pdev->dev; | 121 | pci->dev = &pdev->dev; |
122 | pci->dev_name = dev_name(&pdev->dev); | 122 | pci->dev_name = dev_name(&pdev->dev); |
@@ -161,10 +161,10 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev) | |||
161 | goto err; | 161 | goto err; |
162 | } | 162 | } |
163 | 163 | ||
164 | out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0); | 164 | writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); |
165 | out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0); | 165 | writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_MASK); |
166 | out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, | 166 | writel(MV64X60_PCIx_ERR_MASK_VAL, |
167 | MV64X60_PCIx_ERR_MASK_VAL); | 167 | pdata->pci_vbase + MV64X60_PCI_ERROR_MASK); |
168 | 168 | ||
169 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { | 169 | if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { |
170 | edac_dbg(3, "failed edac_pci_add_device()\n"); | 170 | edac_dbg(3, "failed edac_pci_add_device()\n"); |
@@ -233,23 +233,23 @@ static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev) | |||
233 | struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info; | 233 | struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info; |
234 | u32 cause; | 234 | u32 cause; |
235 | 235 | ||
236 | cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); | 236 | cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); |
237 | if (!cause) | 237 | if (!cause) |
238 | return; | 238 | return; |
239 | 239 | ||
240 | printk(KERN_ERR "Error in internal SRAM\n"); | 240 | printk(KERN_ERR "Error in internal SRAM\n"); |
241 | printk(KERN_ERR "Cause register: 0x%08x\n", cause); | 241 | printk(KERN_ERR "Cause register: 0x%08x\n", cause); |
242 | printk(KERN_ERR "Address Low: 0x%08x\n", | 242 | printk(KERN_ERR "Address Low: 0x%08x\n", |
243 | in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO)); | 243 | readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO)); |
244 | printk(KERN_ERR "Address High: 0x%08x\n", | 244 | printk(KERN_ERR "Address High: 0x%08x\n", |
245 | in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI)); | 245 | readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI)); |
246 | printk(KERN_ERR "Data Low: 0x%08x\n", | 246 | printk(KERN_ERR "Data Low: 0x%08x\n", |
247 | in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO)); | 247 | readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO)); |
248 | printk(KERN_ERR "Data High: 0x%08x\n", | 248 | printk(KERN_ERR "Data High: 0x%08x\n", |
249 | in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI)); | 249 | readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI)); |
250 | printk(KERN_ERR "Parity: 0x%08x\n", | 250 | printk(KERN_ERR "Parity: 0x%08x\n", |
251 | in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY)); | 251 | readl(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY)); |
252 | out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0); | 252 | writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); |
253 | 253 | ||
254 | edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); | 254 | edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); |
255 | } | 255 | } |
@@ -260,7 +260,7 @@ static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id) | |||
260 | struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info; | 260 | struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info; |
261 | u32 cause; | 261 | u32 cause; |
262 | 262 | ||
263 | cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); | 263 | cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); |
264 | if (!cause) | 264 | if (!cause) |
265 | return IRQ_NONE; | 265 | return IRQ_NONE; |
266 | 266 | ||
@@ -322,7 +322,7 @@ static int mv64x60_sram_err_probe(struct platform_device *pdev) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | /* setup SRAM err registers */ | 324 | /* setup SRAM err registers */ |
325 | out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0); | 325 | writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); |
326 | 326 | ||
327 | edac_dev->mod_name = EDAC_MOD_STR; | 327 | edac_dev->mod_name = EDAC_MOD_STR; |
328 | edac_dev->ctl_name = pdata->name; | 328 | edac_dev->ctl_name = pdata->name; |
@@ -398,7 +398,7 @@ static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev) | |||
398 | struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info; | 398 | struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info; |
399 | u32 cause; | 399 | u32 cause; |
400 | 400 | ||
401 | cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) & | 401 | cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) & |
402 | MV64x60_CPU_CAUSE_MASK; | 402 | MV64x60_CPU_CAUSE_MASK; |
403 | if (!cause) | 403 | if (!cause) |
404 | return; | 404 | return; |
@@ -406,16 +406,16 @@ static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev) | |||
406 | printk(KERN_ERR "Error on CPU interface\n"); | 406 | printk(KERN_ERR "Error on CPU interface\n"); |
407 | printk(KERN_ERR "Cause register: 0x%08x\n", cause); | 407 | printk(KERN_ERR "Cause register: 0x%08x\n", cause); |
408 | printk(KERN_ERR "Address Low: 0x%08x\n", | 408 | printk(KERN_ERR "Address Low: 0x%08x\n", |
409 | in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO)); | 409 | readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO)); |
410 | printk(KERN_ERR "Address High: 0x%08x\n", | 410 | printk(KERN_ERR "Address High: 0x%08x\n", |
411 | in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI)); | 411 | readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI)); |
412 | printk(KERN_ERR "Data Low: 0x%08x\n", | 412 | printk(KERN_ERR "Data Low: 0x%08x\n", |
413 | in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO)); | 413 | readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO)); |
414 | printk(KERN_ERR "Data High: 0x%08x\n", | 414 | printk(KERN_ERR "Data High: 0x%08x\n", |
415 | in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI)); | 415 | readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI)); |
416 | printk(KERN_ERR "Parity: 0x%08x\n", | 416 | printk(KERN_ERR "Parity: 0x%08x\n", |
417 | in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY)); | 417 | readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY)); |
418 | out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0); | 418 | writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE); |
419 | 419 | ||
420 | edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); | 420 | edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); |
421 | } | 421 | } |
@@ -426,7 +426,7 @@ static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id) | |||
426 | struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info; | 426 | struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info; |
427 | u32 cause; | 427 | u32 cause; |
428 | 428 | ||
429 | cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) & | 429 | cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) & |
430 | MV64x60_CPU_CAUSE_MASK; | 430 | MV64x60_CPU_CAUSE_MASK; |
431 | if (!cause) | 431 | if (!cause) |
432 | return IRQ_NONE; | 432 | return IRQ_NONE; |
@@ -515,9 +515,9 @@ static int mv64x60_cpu_err_probe(struct platform_device *pdev) | |||
515 | } | 515 | } |
516 | 516 | ||
517 | /* setup CPU err registers */ | 517 | /* setup CPU err registers */ |
518 | out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0); | 518 | writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE); |
519 | out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0); | 519 | writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK); |
520 | out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff); | 520 | writel(0x000000ff, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK); |
521 | 521 | ||
522 | edac_dev->mod_name = EDAC_MOD_STR; | 522 | edac_dev->mod_name = EDAC_MOD_STR; |
523 | edac_dev->ctl_name = pdata->name; | 523 | edac_dev->ctl_name = pdata->name; |
@@ -596,13 +596,13 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci) | |||
596 | u32 comp_ecc; | 596 | u32 comp_ecc; |
597 | u32 syndrome; | 597 | u32 syndrome; |
598 | 598 | ||
599 | reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); | 599 | reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); |
600 | if (!reg) | 600 | if (!reg) |
601 | return; | 601 | return; |
602 | 602 | ||
603 | err_addr = reg & ~0x3; | 603 | err_addr = reg & ~0x3; |
604 | sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD); | 604 | sdram_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD); |
605 | comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC); | 605 | comp_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC); |
606 | syndrome = sdram_ecc ^ comp_ecc; | 606 | syndrome = sdram_ecc ^ comp_ecc; |
607 | 607 | ||
608 | /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ | 608 | /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ |
@@ -620,7 +620,7 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci) | |||
620 | mci->ctl_name, ""); | 620 | mci->ctl_name, ""); |
621 | 621 | ||
622 | /* clear the error */ | 622 | /* clear the error */ |
623 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); | 623 | writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); |
624 | } | 624 | } |
625 | 625 | ||
626 | static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id) | 626 | static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id) |
@@ -629,7 +629,7 @@ static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id) | |||
629 | struct mv64x60_mc_pdata *pdata = mci->pvt_info; | 629 | struct mv64x60_mc_pdata *pdata = mci->pvt_info; |
630 | u32 reg; | 630 | u32 reg; |
631 | 631 | ||
632 | reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); | 632 | reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); |
633 | if (!reg) | 633 | if (!reg) |
634 | return IRQ_NONE; | 634 | return IRQ_NONE; |
635 | 635 | ||
@@ -664,7 +664,7 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci, | |||
664 | 664 | ||
665 | get_total_mem(pdata); | 665 | get_total_mem(pdata); |
666 | 666 | ||
667 | ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); | 667 | ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); |
668 | 668 | ||
669 | csrow = mci->csrows[0]; | 669 | csrow = mci->csrows[0]; |
670 | dimm = csrow->channels[0]->dimm; | 670 | dimm = csrow->channels[0]->dimm; |
@@ -753,7 +753,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
753 | goto err; | 753 | goto err; |
754 | } | 754 | } |
755 | 755 | ||
756 | ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); | 756 | ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); |
757 | if (!(ctl & MV64X60_SDRAM_ECC)) { | 757 | if (!(ctl & MV64X60_SDRAM_ECC)) { |
758 | /* Non-ECC RAM? */ | 758 | /* Non-ECC RAM? */ |
759 | printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); | 759 | printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); |
@@ -779,10 +779,10 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
779 | mv64x60_init_csrows(mci, pdata); | 779 | mv64x60_init_csrows(mci, pdata); |
780 | 780 | ||
781 | /* setup MC registers */ | 781 | /* setup MC registers */ |
782 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); | 782 | writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); |
783 | ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL); | 783 | ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL); |
784 | ctl = (ctl & 0xff00ffff) | 0x10000; | 784 | ctl = (ctl & 0xff00ffff) | 0x10000; |
785 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); | 785 | writel(ctl, pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL); |
786 | 786 | ||
787 | res = edac_mc_add_mc(mci); | 787 | res = edac_mc_add_mc(mci); |
788 | if (res) { | 788 | if (res) { |
@@ -853,10 +853,10 @@ static struct platform_driver * const drivers[] = { | |||
853 | 853 | ||
854 | static int __init mv64x60_edac_init(void) | 854 | static int __init mv64x60_edac_init(void) |
855 | { | 855 | { |
856 | int ret = 0; | ||
857 | 856 | ||
858 | printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n"); | 857 | printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n"); |
859 | printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n"); | 858 | printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n"); |
859 | |||
860 | /* make sure error reporting method is sane */ | 860 | /* make sure error reporting method is sane */ |
861 | switch (edac_op_state) { | 861 | switch (edac_op_state) { |
862 | case EDAC_OPSTATE_POLL: | 862 | case EDAC_OPSTATE_POLL: |
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index 1cad5a9af8d0..8e599490f6de 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c | |||
@@ -131,7 +131,7 @@ static struct mem_ctl_info *pnd2_mci; | |||
131 | 131 | ||
132 | #ifdef CONFIG_X86_INTEL_SBI_APL | 132 | #ifdef CONFIG_X86_INTEL_SBI_APL |
133 | #include "linux/platform_data/sbi_apl.h" | 133 | #include "linux/platform_data/sbi_apl.h" |
134 | int sbi_send(int port, int off, int op, u32 *data) | 134 | static int sbi_send(int port, int off, int op, u32 *data) |
135 | { | 135 | { |
136 | struct sbi_apl_message sbi_arg; | 136 | struct sbi_apl_message sbi_arg; |
137 | int ret, read = 0; | 137 | int ret, read = 0; |
@@ -160,7 +160,7 @@ int sbi_send(int port, int off, int op, u32 *data) | |||
160 | return ret; | 160 | return ret; |
161 | } | 161 | } |
162 | #else | 162 | #else |
163 | int sbi_send(int port, int off, int op, u32 *data) | 163 | static int sbi_send(int port, int off, int op, u32 *data) |
164 | { | 164 | { |
165 | return -EUNATCH; | 165 | return -EUNATCH; |
166 | } | 166 | } |
@@ -168,14 +168,15 @@ int sbi_send(int port, int off, int op, u32 *data) | |||
168 | 168 | ||
169 | static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) | 169 | static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) |
170 | { | 170 | { |
171 | int ret = 0; | 171 | int ret = 0; |
172 | 172 | ||
173 | edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op); | 173 | edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op); |
174 | switch (sz) { | 174 | switch (sz) { |
175 | case 8: | 175 | case 8: |
176 | ret = sbi_send(port, off + 4, op, (u32 *)(data + 4)); | 176 | ret = sbi_send(port, off + 4, op, (u32 *)(data + 4)); |
177 | /* fall through */ | ||
177 | case 4: | 178 | case 4: |
178 | ret = sbi_send(port, off, op, (u32 *)data); | 179 | ret |= sbi_send(port, off, op, (u32 *)data); |
179 | pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, | 180 | pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, |
180 | sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret); | 181 | sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret); |
181 | break; | 182 | break; |
@@ -423,16 +424,21 @@ static void dnv_mk_region(char *name, struct region *rp, void *asym) | |||
423 | 424 | ||
424 | static int apl_get_registers(void) | 425 | static int apl_get_registers(void) |
425 | { | 426 | { |
427 | int ret = -ENODEV; | ||
426 | int i; | 428 | int i; |
427 | 429 | ||
428 | if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar)) | 430 | if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar)) |
429 | return -ENODEV; | 431 | return -ENODEV; |
430 | 432 | ||
433 | /* | ||
434 | * RD_REGP() will fail for unpopulated or non-existent | ||
435 | * DIMM slots. Return success if we find at least one DIMM. | ||
436 | */ | ||
431 | for (i = 0; i < APL_NUM_CHANNELS; i++) | 437 | for (i = 0; i < APL_NUM_CHANNELS; i++) |
432 | if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i])) | 438 | if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i])) |
433 | return -ENODEV; | 439 | ret = 0; |
434 | 440 | ||
435 | return 0; | 441 | return ret; |
436 | } | 442 | } |
437 | 443 | ||
438 | static int dnv_get_registers(void) | 444 | static int dnv_get_registers(void) |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index ea21cb651b3c..80d860cb0746 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -35,7 +35,7 @@ static LIST_HEAD(sbridge_edac_list); | |||
35 | /* | 35 | /* |
36 | * Alter this version for the module when modifications are made | 36 | * Alter this version for the module when modifications are made |
37 | */ | 37 | */ |
38 | #define SBRIDGE_REVISION " Ver: 1.1.1 " | 38 | #define SBRIDGE_REVISION " Ver: 1.1.2 " |
39 | #define EDAC_MOD_STR "sbridge_edac" | 39 | #define EDAC_MOD_STR "sbridge_edac" |
40 | 40 | ||
41 | /* | 41 | /* |
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = { | |||
279 | * sbridge structs | 279 | * sbridge structs |
280 | */ | 280 | */ |
281 | 281 | ||
282 | #define NUM_CHANNELS 8 /* 2MC per socket, four chan per MC */ | 282 | #define NUM_CHANNELS 4 /* Max channels per MC */ |
283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ | 283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ |
284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ | 284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ |
285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ | 285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ |
@@ -294,6 +294,12 @@ enum type { | |||
294 | KNIGHTS_LANDING, | 294 | KNIGHTS_LANDING, |
295 | }; | 295 | }; |
296 | 296 | ||
297 | enum domain { | ||
298 | IMC0 = 0, | ||
299 | IMC1, | ||
300 | SOCK, | ||
301 | }; | ||
302 | |||
297 | struct sbridge_pvt; | 303 | struct sbridge_pvt; |
298 | struct sbridge_info { | 304 | struct sbridge_info { |
299 | enum type type; | 305 | enum type type; |
@@ -324,11 +330,14 @@ struct sbridge_channel { | |||
324 | struct pci_id_descr { | 330 | struct pci_id_descr { |
325 | int dev_id; | 331 | int dev_id; |
326 | int optional; | 332 | int optional; |
333 | enum domain dom; | ||
327 | }; | 334 | }; |
328 | 335 | ||
329 | struct pci_id_table { | 336 | struct pci_id_table { |
330 | const struct pci_id_descr *descr; | 337 | const struct pci_id_descr *descr; |
331 | int n_devs; | 338 | int n_devs_per_imc; |
339 | int n_devs_per_sock; | ||
340 | int n_imcs_per_sock; | ||
332 | enum type type; | 341 | enum type type; |
333 | }; | 342 | }; |
334 | 343 | ||
@@ -337,7 +346,9 @@ struct sbridge_dev { | |||
337 | u8 bus, mc; | 346 | u8 bus, mc; |
338 | u8 node_id, source_id; | 347 | u8 node_id, source_id; |
339 | struct pci_dev **pdev; | 348 | struct pci_dev **pdev; |
349 | enum domain dom; | ||
340 | int n_devs; | 350 | int n_devs; |
351 | int i_devs; | ||
341 | struct mem_ctl_info *mci; | 352 | struct mem_ctl_info *mci; |
342 | }; | 353 | }; |
343 | 354 | ||
@@ -352,11 +363,12 @@ struct knl_pvt { | |||
352 | }; | 363 | }; |
353 | 364 | ||
354 | struct sbridge_pvt { | 365 | struct sbridge_pvt { |
355 | struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; | 366 | /* Devices per socket */ |
367 | struct pci_dev *pci_ddrio; | ||
356 | struct pci_dev *pci_sad0, *pci_sad1; | 368 | struct pci_dev *pci_sad0, *pci_sad1; |
357 | struct pci_dev *pci_ha0, *pci_ha1; | ||
358 | struct pci_dev *pci_br0, *pci_br1; | 369 | struct pci_dev *pci_br0, *pci_br1; |
359 | struct pci_dev *pci_ha1_ta; | 370 | /* Devices per memory controller */ |
371 | struct pci_dev *pci_ha, *pci_ta, *pci_ras; | ||
360 | struct pci_dev *pci_tad[NUM_CHANNELS]; | 372 | struct pci_dev *pci_tad[NUM_CHANNELS]; |
361 | 373 | ||
362 | struct sbridge_dev *sbridge_dev; | 374 | struct sbridge_dev *sbridge_dev; |
@@ -373,39 +385,42 @@ struct sbridge_pvt { | |||
373 | struct knl_pvt knl; | 385 | struct knl_pvt knl; |
374 | }; | 386 | }; |
375 | 387 | ||
376 | #define PCI_DESCR(device_id, opt) \ | 388 | #define PCI_DESCR(device_id, opt, domain) \ |
377 | .dev_id = (device_id), \ | 389 | .dev_id = (device_id), \ |
378 | .optional = opt | 390 | .optional = opt, \ |
391 | .dom = domain | ||
379 | 392 | ||
380 | static const struct pci_id_descr pci_dev_descr_sbridge[] = { | 393 | static const struct pci_id_descr pci_dev_descr_sbridge[] = { |
381 | /* Processor Home Agent */ | 394 | /* Processor Home Agent */ |
382 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) }, | 395 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) }, |
383 | 396 | ||
384 | /* Memory controller */ | 397 | /* Memory controller */ |
385 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) }, | 398 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) }, |
386 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) }, | 399 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) }, |
387 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) }, | 400 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) }, |
388 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) }, | 401 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) }, |
389 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) }, | 402 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) }, |
390 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) }, | 403 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) }, |
391 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) }, | 404 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) }, |
392 | 405 | ||
393 | /* System Address Decoder */ | 406 | /* System Address Decoder */ |
394 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) }, | 407 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) }, |
395 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) }, | 408 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) }, |
396 | 409 | ||
397 | /* Broadcast Registers */ | 410 | /* Broadcast Registers */ |
398 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, | 411 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) }, |
399 | }; | 412 | }; |
400 | 413 | ||
401 | #define PCI_ID_TABLE_ENTRY(A, T) { \ | 414 | #define PCI_ID_TABLE_ENTRY(A, N, M, T) { \ |
402 | .descr = A, \ | 415 | .descr = A, \ |
403 | .n_devs = ARRAY_SIZE(A), \ | 416 | .n_devs_per_imc = N, \ |
417 | .n_devs_per_sock = ARRAY_SIZE(A), \ | ||
418 | .n_imcs_per_sock = M, \ | ||
404 | .type = T \ | 419 | .type = T \ |
405 | } | 420 | } |
406 | 421 | ||
407 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { | 422 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { |
408 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE), | 423 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE), |
409 | {0,} /* 0 terminated list. */ | 424 | {0,} /* 0 terminated list. */ |
410 | }; | 425 | }; |
411 | 426 | ||
@@ -439,40 +454,39 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { | |||
439 | 454 | ||
440 | static const struct pci_id_descr pci_dev_descr_ibridge[] = { | 455 | static const struct pci_id_descr pci_dev_descr_ibridge[] = { |
441 | /* Processor Home Agent */ | 456 | /* Processor Home Agent */ |
442 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, | 457 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) }, |
443 | 458 | ||
444 | /* Memory controller */ | 459 | /* Memory controller */ |
445 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, | 460 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) }, |
446 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, | 461 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) }, |
447 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, | 462 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) }, |
448 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, | 463 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) }, |
449 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, | 464 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) }, |
450 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, | 465 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) }, |
466 | |||
467 | /* Optional, mode 2HA */ | ||
468 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, | ||
469 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) }, | ||
470 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) }, | ||
471 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) }, | ||
472 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) }, | ||
473 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) }, | ||
474 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) }, | ||
475 | |||
476 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) }, | ||
477 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) }, | ||
451 | 478 | ||
452 | /* System Address Decoder */ | 479 | /* System Address Decoder */ |
453 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, | 480 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) }, |
454 | 481 | ||
455 | /* Broadcast Registers */ | 482 | /* Broadcast Registers */ |
456 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, | 483 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) }, |
457 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, | 484 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) }, |
458 | 485 | ||
459 | /* Optional, mode 2HA */ | ||
460 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, | ||
461 | #if 0 | ||
462 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, | ||
463 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, | ||
464 | #endif | ||
465 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, | ||
466 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, | ||
467 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1) }, | ||
468 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1) }, | ||
469 | |||
470 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, | ||
471 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, | ||
472 | }; | 486 | }; |
473 | 487 | ||
474 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { | 488 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { |
475 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE), | 489 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE), |
476 | {0,} /* 0 terminated list. */ | 490 | {0,} /* 0 terminated list. */ |
477 | }; | 491 | }; |
478 | 492 | ||
@@ -498,9 +512,9 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = { | |||
498 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 | 512 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 |
499 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60 | 513 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60 |
500 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8 | 514 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8 |
501 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71 | 515 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71 |
502 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68 | 516 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68 |
503 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79 | 517 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79 |
504 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc | 518 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc |
505 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd | 519 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd |
506 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa | 520 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa |
@@ -517,35 +531,33 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = { | |||
517 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb | 531 | #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb |
518 | static const struct pci_id_descr pci_dev_descr_haswell[] = { | 532 | static const struct pci_id_descr pci_dev_descr_haswell[] = { |
519 | /* first item must be the HA */ | 533 | /* first item must be the HA */ |
520 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) }, | 534 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) }, |
521 | 535 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) }, | |
522 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) }, | 536 | |
523 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) }, | 537 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) }, |
524 | 538 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) }, | |
525 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) }, | 539 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) }, |
526 | 540 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) }, | |
527 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) }, | 541 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) }, |
528 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) }, | 542 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) }, |
529 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) }, | 543 | |
530 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) }, | 544 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) }, |
531 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) }, | 545 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) }, |
532 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) }, | 546 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) }, |
533 | 547 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) }, | |
534 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) }, | 548 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) }, |
535 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) }, | 549 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) }, |
536 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) }, | 550 | |
537 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) }, | 551 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) }, |
538 | 552 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) }, | |
539 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) }, | 553 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) }, |
540 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) }, | 554 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) }, |
541 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) }, | 555 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) }, |
542 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) }, | 556 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) }, |
543 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) }, | ||
544 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) }, | ||
545 | }; | 557 | }; |
546 | 558 | ||
547 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { | 559 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { |
548 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL), | 560 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL), |
549 | {0,} /* 0 terminated list. */ | 561 | {0,} /* 0 terminated list. */ |
550 | }; | 562 | }; |
551 | 563 | ||
@@ -559,7 +571,7 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { | |||
559 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ | 571 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ |
560 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 | 572 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 |
561 | /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */ | 573 | /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */ |
562 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL 0x7843 | 574 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843 |
563 | /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */ | 575 | /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */ |
564 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844 | 576 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844 |
565 | /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */ | 577 | /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */ |
@@ -579,17 +591,17 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { | |||
579 | */ | 591 | */ |
580 | 592 | ||
581 | static const struct pci_id_descr pci_dev_descr_knl[] = { | 593 | static const struct pci_id_descr pci_dev_descr_knl[] = { |
582 | [0] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) }, | 594 | [0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)}, |
583 | [1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) }, | 595 | [2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) }, |
584 | [2 ... 3] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)}, | 596 | [8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) }, |
585 | [4 ... 41] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) }, | 597 | [9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) }, |
586 | [42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) }, | 598 | [10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) }, |
587 | [48] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) }, | 599 | [11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) }, |
588 | [49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) }, | 600 | [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) }, |
589 | }; | 601 | }; |
590 | 602 | ||
591 | static const struct pci_id_table pci_dev_descr_knl_table[] = { | 603 | static const struct pci_id_table pci_dev_descr_knl_table[] = { |
592 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING), | 604 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING), |
593 | {0,} | 605 | {0,} |
594 | }; | 606 | }; |
595 | 607 | ||
@@ -615,9 +627,9 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = { | |||
615 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0 | 627 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0 |
616 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60 | 628 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60 |
617 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8 | 629 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8 |
618 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71 | 630 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71 |
619 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68 | 631 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68 |
620 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL 0x6f79 | 632 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79 |
621 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc | 633 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc |
622 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd | 634 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd |
623 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa | 635 | #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa |
@@ -632,32 +644,30 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = { | |||
632 | 644 | ||
633 | static const struct pci_id_descr pci_dev_descr_broadwell[] = { | 645 | static const struct pci_id_descr pci_dev_descr_broadwell[] = { |
634 | /* first item must be the HA */ | 646 | /* first item must be the HA */ |
635 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) }, | 647 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) }, |
636 | 648 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) }, | |
637 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) }, | 649 | |
638 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) }, | 650 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) }, |
639 | 651 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) }, | |
640 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1) }, | 652 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) }, |
641 | 653 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) }, | |
642 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) }, | 654 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) }, |
643 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) }, | 655 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) }, |
644 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) }, | 656 | |
645 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) }, | 657 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) }, |
646 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1) }, | 658 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) }, |
647 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1) }, | 659 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) }, |
648 | 660 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) }, | |
649 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) }, | 661 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) }, |
650 | 662 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) }, | |
651 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1) }, | 663 | |
652 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL, 1) }, | 664 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) }, |
653 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1) }, | 665 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) }, |
654 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1) }, | 666 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) }, |
655 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1) }, | ||
656 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1) }, | ||
657 | }; | 667 | }; |
658 | 668 | ||
659 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { | 669 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { |
660 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL), | 670 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL), |
661 | {0,} /* 0 terminated list. */ | 671 | {0,} /* 0 terminated list. */ |
662 | }; | 672 | }; |
663 | 673 | ||
@@ -709,7 +719,8 @@ static inline int numcol(u32 mtr) | |||
709 | return 1 << cols; | 719 | return 1 << cols; |
710 | } | 720 | } |
711 | 721 | ||
712 | static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus) | 722 | static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus, |
723 | struct sbridge_dev *prev) | ||
713 | { | 724 | { |
714 | struct sbridge_dev *sbridge_dev; | 725 | struct sbridge_dev *sbridge_dev; |
715 | 726 | ||
@@ -722,16 +733,19 @@ static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus) | |||
722 | struct sbridge_dev, list); | 733 | struct sbridge_dev, list); |
723 | } | 734 | } |
724 | 735 | ||
725 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { | 736 | sbridge_dev = list_entry(prev ? prev->list.next |
726 | if (sbridge_dev->bus == bus) | 737 | : sbridge_edac_list.next, struct sbridge_dev, list); |
738 | |||
739 | list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) { | ||
740 | if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom)) | ||
727 | return sbridge_dev; | 741 | return sbridge_dev; |
728 | } | 742 | } |
729 | 743 | ||
730 | return NULL; | 744 | return NULL; |
731 | } | 745 | } |
732 | 746 | ||
733 | static struct sbridge_dev *alloc_sbridge_dev(u8 bus, | 747 | static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom, |
734 | const struct pci_id_table *table) | 748 | const struct pci_id_table *table) |
735 | { | 749 | { |
736 | struct sbridge_dev *sbridge_dev; | 750 | struct sbridge_dev *sbridge_dev; |
737 | 751 | ||
@@ -739,15 +753,17 @@ static struct sbridge_dev *alloc_sbridge_dev(u8 bus, | |||
739 | if (!sbridge_dev) | 753 | if (!sbridge_dev) |
740 | return NULL; | 754 | return NULL; |
741 | 755 | ||
742 | sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs, | 756 | sbridge_dev->pdev = kcalloc(table->n_devs_per_imc, |
743 | GFP_KERNEL); | 757 | sizeof(*sbridge_dev->pdev), |
758 | GFP_KERNEL); | ||
744 | if (!sbridge_dev->pdev) { | 759 | if (!sbridge_dev->pdev) { |
745 | kfree(sbridge_dev); | 760 | kfree(sbridge_dev); |
746 | return NULL; | 761 | return NULL; |
747 | } | 762 | } |
748 | 763 | ||
749 | sbridge_dev->bus = bus; | 764 | sbridge_dev->bus = bus; |
750 | sbridge_dev->n_devs = table->n_devs; | 765 | sbridge_dev->dom = dom; |
766 | sbridge_dev->n_devs = table->n_devs_per_imc; | ||
751 | list_add_tail(&sbridge_dev->list, &sbridge_edac_list); | 767 | list_add_tail(&sbridge_dev->list, &sbridge_edac_list); |
752 | 768 | ||
753 | return sbridge_dev; | 769 | return sbridge_dev; |
@@ -1044,79 +1060,6 @@ static int haswell_chan_hash(int idx, u64 addr) | |||
1044 | return idx; | 1060 | return idx; |
1045 | } | 1061 | } |
1046 | 1062 | ||
1047 | /**************************************************************************** | ||
1048 | Memory check routines | ||
1049 | ****************************************************************************/ | ||
1050 | static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id) | ||
1051 | { | ||
1052 | struct pci_dev *pdev = NULL; | ||
1053 | |||
1054 | do { | ||
1055 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev); | ||
1056 | if (pdev && pdev->bus->number == bus) | ||
1057 | break; | ||
1058 | } while (pdev); | ||
1059 | |||
1060 | return pdev; | ||
1061 | } | ||
1062 | |||
1063 | /** | ||
1064 | * check_if_ecc_is_active() - Checks if ECC is active | ||
1065 | * @bus: Device bus | ||
1066 | * @type: Memory controller type | ||
1067 | * returns: 0 in case ECC is active, -ENODEV if it can't be determined or | ||
1068 | * disabled | ||
1069 | */ | ||
1070 | static int check_if_ecc_is_active(const u8 bus, enum type type) | ||
1071 | { | ||
1072 | struct pci_dev *pdev = NULL; | ||
1073 | u32 mcmtr, id; | ||
1074 | |||
1075 | switch (type) { | ||
1076 | case IVY_BRIDGE: | ||
1077 | id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA; | ||
1078 | break; | ||
1079 | case HASWELL: | ||
1080 | id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA; | ||
1081 | break; | ||
1082 | case SANDY_BRIDGE: | ||
1083 | id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA; | ||
1084 | break; | ||
1085 | case BROADWELL: | ||
1086 | id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA; | ||
1087 | break; | ||
1088 | case KNIGHTS_LANDING: | ||
1089 | /* | ||
1090 | * KNL doesn't group things by bus the same way | ||
1091 | * SB/IB/Haswell does. | ||
1092 | */ | ||
1093 | id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA; | ||
1094 | break; | ||
1095 | default: | ||
1096 | return -ENODEV; | ||
1097 | } | ||
1098 | |||
1099 | if (type != KNIGHTS_LANDING) | ||
1100 | pdev = get_pdev_same_bus(bus, id); | ||
1101 | else | ||
1102 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0); | ||
1103 | |||
1104 | if (!pdev) { | ||
1105 | sbridge_printk(KERN_ERR, "Couldn't find PCI device " | ||
1106 | "%04x:%04x! on bus %02d\n", | ||
1107 | PCI_VENDOR_ID_INTEL, id, bus); | ||
1108 | return -ENODEV; | ||
1109 | } | ||
1110 | |||
1111 | pci_read_config_dword(pdev, | ||
1112 | type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr); | ||
1113 | if (!IS_ECC_ENABLED(mcmtr)) { | ||
1114 | sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); | ||
1115 | return -ENODEV; | ||
1116 | } | ||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1120 | /* Low bits of TAD limit, and some metadata. */ | 1063 | /* Low bits of TAD limit, and some metadata. */ |
1121 | static const u32 knl_tad_dram_limit_lo[] = { | 1064 | static const u32 knl_tad_dram_limit_lo[] = { |
1122 | 0x400, 0x500, 0x600, 0x700, | 1065 | 0x400, 0x500, 0x600, 0x700, |
@@ -1587,25 +1530,13 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) | |||
1587 | return 0; | 1530 | return 0; |
1588 | } | 1531 | } |
1589 | 1532 | ||
1590 | static int get_dimm_config(struct mem_ctl_info *mci) | 1533 | static void get_source_id(struct mem_ctl_info *mci) |
1591 | { | 1534 | { |
1592 | struct sbridge_pvt *pvt = mci->pvt_info; | 1535 | struct sbridge_pvt *pvt = mci->pvt_info; |
1593 | struct dimm_info *dimm; | ||
1594 | unsigned i, j, banks, ranks, rows, cols, npages; | ||
1595 | u64 size; | ||
1596 | u32 reg; | 1536 | u32 reg; |
1597 | enum edac_type mode; | ||
1598 | enum mem_type mtype; | ||
1599 | int channels = pvt->info.type == KNIGHTS_LANDING ? | ||
1600 | KNL_MAX_CHANNELS : NUM_CHANNELS; | ||
1601 | u64 knl_mc_sizes[KNL_MAX_CHANNELS]; | ||
1602 | 1537 | ||
1603 | if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { | ||
1604 | pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, ®); | ||
1605 | pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21); | ||
1606 | } | ||
1607 | if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || | 1538 | if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || |
1608 | pvt->info.type == KNIGHTS_LANDING) | 1539 | pvt->info.type == KNIGHTS_LANDING) |
1609 | pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®); | 1540 | pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®); |
1610 | else | 1541 | else |
1611 | pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); | 1542 | pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); |
@@ -1614,50 +1545,19 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
1614 | pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg); | 1545 | pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg); |
1615 | else | 1546 | else |
1616 | pvt->sbridge_dev->source_id = SOURCE_ID(reg); | 1547 | pvt->sbridge_dev->source_id = SOURCE_ID(reg); |
1548 | } | ||
1617 | 1549 | ||
1618 | pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); | 1550 | static int __populate_dimms(struct mem_ctl_info *mci, |
1619 | edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", | 1551 | u64 knl_mc_sizes[KNL_MAX_CHANNELS], |
1620 | pvt->sbridge_dev->mc, | 1552 | enum edac_type mode) |
1621 | pvt->sbridge_dev->node_id, | 1553 | { |
1622 | pvt->sbridge_dev->source_id); | 1554 | struct sbridge_pvt *pvt = mci->pvt_info; |
1623 | 1555 | int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS | |
1624 | /* KNL doesn't support mirroring or lockstep, | 1556 | : NUM_CHANNELS; |
1625 | * and is always closed page | 1557 | unsigned int i, j, banks, ranks, rows, cols, npages; |
1626 | */ | 1558 | struct dimm_info *dimm; |
1627 | if (pvt->info.type == KNIGHTS_LANDING) { | 1559 | enum mem_type mtype; |
1628 | mode = EDAC_S4ECD4ED; | 1560 | u64 size; |
1629 | pvt->is_mirrored = false; | ||
1630 | |||
1631 | if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0) | ||
1632 | return -1; | ||
1633 | } else { | ||
1634 | pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); | ||
1635 | if (IS_MIRROR_ENABLED(reg)) { | ||
1636 | edac_dbg(0, "Memory mirror is enabled\n"); | ||
1637 | pvt->is_mirrored = true; | ||
1638 | } else { | ||
1639 | edac_dbg(0, "Memory mirror is disabled\n"); | ||
1640 | pvt->is_mirrored = false; | ||
1641 | } | ||
1642 | |||
1643 | pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); | ||
1644 | if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { | ||
1645 | edac_dbg(0, "Lockstep is enabled\n"); | ||
1646 | mode = EDAC_S8ECD8ED; | ||
1647 | pvt->is_lockstep = true; | ||
1648 | } else { | ||
1649 | edac_dbg(0, "Lockstep is disabled\n"); | ||
1650 | mode = EDAC_S4ECD4ED; | ||
1651 | pvt->is_lockstep = false; | ||
1652 | } | ||
1653 | if (IS_CLOSE_PG(pvt->info.mcmtr)) { | ||
1654 | edac_dbg(0, "address map is on closed page mode\n"); | ||
1655 | pvt->is_close_pg = true; | ||
1656 | } else { | ||
1657 | edac_dbg(0, "address map is on open page mode\n"); | ||
1658 | pvt->is_close_pg = false; | ||
1659 | } | ||
1660 | } | ||
1661 | 1561 | ||
1662 | mtype = pvt->info.get_memory_type(pvt); | 1562 | mtype = pvt->info.get_memory_type(pvt); |
1663 | if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4) | 1563 | if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4) |
@@ -1688,8 +1588,7 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
1688 | } | 1588 | } |
1689 | 1589 | ||
1690 | for (j = 0; j < max_dimms_per_channel; j++) { | 1590 | for (j = 0; j < max_dimms_per_channel; j++) { |
1691 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, | 1591 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); |
1692 | i, j, 0); | ||
1693 | if (pvt->info.type == KNIGHTS_LANDING) { | 1592 | if (pvt->info.type == KNIGHTS_LANDING) { |
1694 | pci_read_config_dword(pvt->knl.pci_channel[i], | 1593 | pci_read_config_dword(pvt->knl.pci_channel[i], |
1695 | knl_mtr_reg, &mtr); | 1594 | knl_mtr_reg, &mtr); |
@@ -1699,6 +1598,12 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
1699 | } | 1598 | } |
1700 | edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); | 1599 | edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); |
1701 | if (IS_DIMM_PRESENT(mtr)) { | 1600 | if (IS_DIMM_PRESENT(mtr)) { |
1601 | if (!IS_ECC_ENABLED(pvt->info.mcmtr)) { | ||
1602 | sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n", | ||
1603 | pvt->sbridge_dev->source_id, | ||
1604 | pvt->sbridge_dev->dom, i); | ||
1605 | return -ENODEV; | ||
1606 | } | ||
1702 | pvt->channel[i].dimms++; | 1607 | pvt->channel[i].dimms++; |
1703 | 1608 | ||
1704 | ranks = numrank(pvt->info.type, mtr); | 1609 | ranks = numrank(pvt->info.type, mtr); |
@@ -1717,7 +1622,7 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
1717 | npages = MiB_TO_PAGES(size); | 1622 | npages = MiB_TO_PAGES(size); |
1718 | 1623 | ||
1719 | edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", | 1624 | edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", |
1720 | pvt->sbridge_dev->mc, i/4, i%4, j, | 1625 | pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j, |
1721 | size, npages, | 1626 | size, npages, |
1722 | banks, ranks, rows, cols); | 1627 | banks, ranks, rows, cols); |
1723 | 1628 | ||
@@ -1727,8 +1632,8 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
1727 | dimm->mtype = mtype; | 1632 | dimm->mtype = mtype; |
1728 | dimm->edac_mode = mode; | 1633 | dimm->edac_mode = mode; |
1729 | snprintf(dimm->label, sizeof(dimm->label), | 1634 | snprintf(dimm->label, sizeof(dimm->label), |
1730 | "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u", | 1635 | "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u", |
1731 | pvt->sbridge_dev->source_id, i/4, i%4, j); | 1636 | pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j); |
1732 | } | 1637 | } |
1733 | } | 1638 | } |
1734 | } | 1639 | } |
@@ -1736,6 +1641,65 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
1736 | return 0; | 1641 | return 0; |
1737 | } | 1642 | } |
1738 | 1643 | ||
1644 | static int get_dimm_config(struct mem_ctl_info *mci) | ||
1645 | { | ||
1646 | struct sbridge_pvt *pvt = mci->pvt_info; | ||
1647 | u64 knl_mc_sizes[KNL_MAX_CHANNELS]; | ||
1648 | enum edac_type mode; | ||
1649 | u32 reg; | ||
1650 | |||
1651 | if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) { | ||
1652 | pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®); | ||
1653 | pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21); | ||
1654 | } | ||
1655 | pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); | ||
1656 | edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", | ||
1657 | pvt->sbridge_dev->mc, | ||
1658 | pvt->sbridge_dev->node_id, | ||
1659 | pvt->sbridge_dev->source_id); | ||
1660 | |||
1661 | /* KNL doesn't support mirroring or lockstep, | ||
1662 | * and is always closed page | ||
1663 | */ | ||
1664 | if (pvt->info.type == KNIGHTS_LANDING) { | ||
1665 | mode = EDAC_S4ECD4ED; | ||
1666 | pvt->is_mirrored = false; | ||
1667 | |||
1668 | if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0) | ||
1669 | return -1; | ||
1670 | pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr); | ||
1671 | } else { | ||
1672 | pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); | ||
1673 | if (IS_MIRROR_ENABLED(reg)) { | ||
1674 | edac_dbg(0, "Memory mirror is enabled\n"); | ||
1675 | pvt->is_mirrored = true; | ||
1676 | } else { | ||
1677 | edac_dbg(0, "Memory mirror is disabled\n"); | ||
1678 | pvt->is_mirrored = false; | ||
1679 | } | ||
1680 | |||
1681 | pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); | ||
1682 | if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { | ||
1683 | edac_dbg(0, "Lockstep is enabled\n"); | ||
1684 | mode = EDAC_S8ECD8ED; | ||
1685 | pvt->is_lockstep = true; | ||
1686 | } else { | ||
1687 | edac_dbg(0, "Lockstep is disabled\n"); | ||
1688 | mode = EDAC_S4ECD4ED; | ||
1689 | pvt->is_lockstep = false; | ||
1690 | } | ||
1691 | if (IS_CLOSE_PG(pvt->info.mcmtr)) { | ||
1692 | edac_dbg(0, "address map is on closed page mode\n"); | ||
1693 | pvt->is_close_pg = true; | ||
1694 | } else { | ||
1695 | edac_dbg(0, "address map is on open page mode\n"); | ||
1696 | pvt->is_close_pg = false; | ||
1697 | } | ||
1698 | } | ||
1699 | |||
1700 | return __populate_dimms(mci, knl_mc_sizes, mode); | ||
1701 | } | ||
1702 | |||
1739 | static void get_memory_layout(const struct mem_ctl_info *mci) | 1703 | static void get_memory_layout(const struct mem_ctl_info *mci) |
1740 | { | 1704 | { |
1741 | struct sbridge_pvt *pvt = mci->pvt_info; | 1705 | struct sbridge_pvt *pvt = mci->pvt_info; |
@@ -1816,8 +1780,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci) | |||
1816 | */ | 1780 | */ |
1817 | prv = 0; | 1781 | prv = 0; |
1818 | for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { | 1782 | for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { |
1819 | pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], | 1783 | pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], ®); |
1820 | ®); | ||
1821 | limit = TAD_LIMIT(reg); | 1784 | limit = TAD_LIMIT(reg); |
1822 | if (limit <= prv) | 1785 | if (limit <= prv) |
1823 | break; | 1786 | break; |
@@ -1899,12 +1862,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci) | |||
1899 | } | 1862 | } |
1900 | } | 1863 | } |
1901 | 1864 | ||
1902 | static struct mem_ctl_info *get_mci_for_node_id(u8 node_id) | 1865 | static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha) |
1903 | { | 1866 | { |
1904 | struct sbridge_dev *sbridge_dev; | 1867 | struct sbridge_dev *sbridge_dev; |
1905 | 1868 | ||
1906 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { | 1869 | list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { |
1907 | if (sbridge_dev->node_id == node_id) | 1870 | if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha) |
1908 | return sbridge_dev->mci; | 1871 | return sbridge_dev->mci; |
1909 | } | 1872 | } |
1910 | return NULL; | 1873 | return NULL; |
@@ -1925,7 +1888,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
1925 | int interleave_mode, shiftup = 0; | 1888 | int interleave_mode, shiftup = 0; |
1926 | unsigned sad_interleave[pvt->info.max_interleave]; | 1889 | unsigned sad_interleave[pvt->info.max_interleave]; |
1927 | u32 reg, dram_rule; | 1890 | u32 reg, dram_rule; |
1928 | u8 ch_way, sck_way, pkg, sad_ha = 0, ch_add = 0; | 1891 | u8 ch_way, sck_way, pkg, sad_ha = 0; |
1929 | u32 tad_offset; | 1892 | u32 tad_offset; |
1930 | u32 rir_way; | 1893 | u32 rir_way; |
1931 | u32 mb, gb; | 1894 | u32 mb, gb; |
@@ -2038,13 +2001,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
2038 | pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); | 2001 | pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); |
2039 | *socket = sad_pkg_socket(pkg); | 2002 | *socket = sad_pkg_socket(pkg); |
2040 | sad_ha = sad_pkg_ha(pkg); | 2003 | sad_ha = sad_pkg_ha(pkg); |
2041 | if (sad_ha) | ||
2042 | ch_add = 4; | ||
2043 | 2004 | ||
2044 | if (a7mode) { | 2005 | if (a7mode) { |
2045 | /* MCChanShiftUpEnable */ | 2006 | /* MCChanShiftUpEnable */ |
2046 | pci_read_config_dword(pvt->pci_ha0, | 2007 | pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®); |
2047 | HASWELL_HASYSDEFEATURE2, ®); | ||
2048 | shiftup = GET_BITFIELD(reg, 22, 22); | 2008 | shiftup = GET_BITFIELD(reg, 22, 22); |
2049 | } | 2009 | } |
2050 | 2010 | ||
@@ -2056,8 +2016,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
2056 | pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); | 2016 | pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); |
2057 | *socket = sad_pkg_socket(pkg); | 2017 | *socket = sad_pkg_socket(pkg); |
2058 | sad_ha = sad_pkg_ha(pkg); | 2018 | sad_ha = sad_pkg_ha(pkg); |
2059 | if (sad_ha) | ||
2060 | ch_add = 4; | ||
2061 | edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", | 2019 | edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", |
2062 | idx, *socket, sad_ha); | 2020 | idx, *socket, sad_ha); |
2063 | } | 2021 | } |
@@ -2068,7 +2026,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
2068 | * Move to the proper node structure, in order to access the | 2026 | * Move to the proper node structure, in order to access the |
2069 | * right PCI registers | 2027 | * right PCI registers |
2070 | */ | 2028 | */ |
2071 | new_mci = get_mci_for_node_id(*socket); | 2029 | new_mci = get_mci_for_node_id(*socket, sad_ha); |
2072 | if (!new_mci) { | 2030 | if (!new_mci) { |
2073 | sprintf(msg, "Struct for socket #%u wasn't initialized", | 2031 | sprintf(msg, "Struct for socket #%u wasn't initialized", |
2074 | *socket); | 2032 | *socket); |
@@ -2081,14 +2039,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
2081 | * Step 2) Get memory channel | 2039 | * Step 2) Get memory channel |
2082 | */ | 2040 | */ |
2083 | prv = 0; | 2041 | prv = 0; |
2084 | if (pvt->info.type == SANDY_BRIDGE) | 2042 | pci_ha = pvt->pci_ha; |
2085 | pci_ha = pvt->pci_ha0; | ||
2086 | else { | ||
2087 | if (sad_ha) | ||
2088 | pci_ha = pvt->pci_ha1; | ||
2089 | else | ||
2090 | pci_ha = pvt->pci_ha0; | ||
2091 | } | ||
2092 | for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { | 2043 | for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { |
2093 | pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®); | 2044 | pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®); |
2094 | limit = TAD_LIMIT(reg); | 2045 | limit = TAD_LIMIT(reg); |
@@ -2139,9 +2090,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
2139 | } | 2090 | } |
2140 | *channel_mask = 1 << base_ch; | 2091 | *channel_mask = 1 << base_ch; |
2141 | 2092 | ||
2142 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], | 2093 | pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset); |
2143 | tad_ch_nilv_offset[n_tads], | ||
2144 | &tad_offset); | ||
2145 | 2094 | ||
2146 | if (pvt->is_mirrored) { | 2095 | if (pvt->is_mirrored) { |
2147 | *channel_mask |= 1 << ((base_ch + 2) % 4); | 2096 | *channel_mask |= 1 << ((base_ch + 2) % 4); |
@@ -2192,9 +2141,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
2192 | * Step 3) Decode rank | 2141 | * Step 3) Decode rank |
2193 | */ | 2142 | */ |
2194 | for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { | 2143 | for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { |
2195 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], | 2144 | pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], ®); |
2196 | rir_way_limit[n_rir], | ||
2197 | ®); | ||
2198 | 2145 | ||
2199 | if (!IS_RIR_VALID(reg)) | 2146 | if (!IS_RIR_VALID(reg)) |
2200 | continue; | 2147 | continue; |
@@ -2222,9 +2169,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
2222 | idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ | 2169 | idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ |
2223 | idx %= 1 << rir_way; | 2170 | idx %= 1 << rir_way; |
2224 | 2171 | ||
2225 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], | 2172 | pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®); |
2226 | rir_offset[n_rir][idx], | ||
2227 | ®); | ||
2228 | *rank = RIR_RNK_TGT(pvt->info.type, reg); | 2173 | *rank = RIR_RNK_TGT(pvt->info.type, reg); |
2229 | 2174 | ||
2230 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", | 2175 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", |
@@ -2277,10 +2222,11 @@ static int sbridge_get_onedevice(struct pci_dev **prev, | |||
2277 | const unsigned devno, | 2222 | const unsigned devno, |
2278 | const int multi_bus) | 2223 | const int multi_bus) |
2279 | { | 2224 | { |
2280 | struct sbridge_dev *sbridge_dev; | 2225 | struct sbridge_dev *sbridge_dev = NULL; |
2281 | const struct pci_id_descr *dev_descr = &table->descr[devno]; | 2226 | const struct pci_id_descr *dev_descr = &table->descr[devno]; |
2282 | struct pci_dev *pdev = NULL; | 2227 | struct pci_dev *pdev = NULL; |
2283 | u8 bus = 0; | 2228 | u8 bus = 0; |
2229 | int i = 0; | ||
2284 | 2230 | ||
2285 | sbridge_printk(KERN_DEBUG, | 2231 | sbridge_printk(KERN_DEBUG, |
2286 | "Seeking for: PCI ID %04x:%04x\n", | 2232 | "Seeking for: PCI ID %04x:%04x\n", |
@@ -2311,9 +2257,14 @@ static int sbridge_get_onedevice(struct pci_dev **prev, | |||
2311 | } | 2257 | } |
2312 | bus = pdev->bus->number; | 2258 | bus = pdev->bus->number; |
2313 | 2259 | ||
2314 | sbridge_dev = get_sbridge_dev(bus, multi_bus); | 2260 | next_imc: |
2261 | sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev); | ||
2315 | if (!sbridge_dev) { | 2262 | if (!sbridge_dev) { |
2316 | sbridge_dev = alloc_sbridge_dev(bus, table); | 2263 | |
2264 | if (dev_descr->dom == SOCK) | ||
2265 | goto out_imc; | ||
2266 | |||
2267 | sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table); | ||
2317 | if (!sbridge_dev) { | 2268 | if (!sbridge_dev) { |
2318 | pci_dev_put(pdev); | 2269 | pci_dev_put(pdev); |
2319 | return -ENOMEM; | 2270 | return -ENOMEM; |
@@ -2321,7 +2272,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev, | |||
2321 | (*num_mc)++; | 2272 | (*num_mc)++; |
2322 | } | 2273 | } |
2323 | 2274 | ||
2324 | if (sbridge_dev->pdev[devno]) { | 2275 | if (sbridge_dev->pdev[sbridge_dev->i_devs]) { |
2325 | sbridge_printk(KERN_ERR, | 2276 | sbridge_printk(KERN_ERR, |
2326 | "Duplicated device for %04x:%04x\n", | 2277 | "Duplicated device for %04x:%04x\n", |
2327 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | 2278 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); |
@@ -2329,8 +2280,16 @@ static int sbridge_get_onedevice(struct pci_dev **prev, | |||
2329 | return -ENODEV; | 2280 | return -ENODEV; |
2330 | } | 2281 | } |
2331 | 2282 | ||
2332 | sbridge_dev->pdev[devno] = pdev; | 2283 | sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev; |
2284 | |||
2285 | /* pdev belongs to more than one IMC, do extra gets */ | ||
2286 | if (++i > 1) | ||
2287 | pci_dev_get(pdev); | ||
2333 | 2288 | ||
2289 | if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock) | ||
2290 | goto next_imc; | ||
2291 | |||
2292 | out_imc: | ||
2334 | /* Be sure that the device is enabled */ | 2293 | /* Be sure that the device is enabled */ |
2335 | if (unlikely(pci_enable_device(pdev) < 0)) { | 2294 | if (unlikely(pci_enable_device(pdev) < 0)) { |
2336 | sbridge_printk(KERN_ERR, | 2295 | sbridge_printk(KERN_ERR, |
@@ -2374,7 +2333,7 @@ static int sbridge_get_all_devices(u8 *num_mc, | |||
2374 | if (table->type == KNIGHTS_LANDING) | 2333 | if (table->type == KNIGHTS_LANDING) |
2375 | allow_dups = multi_bus = 1; | 2334 | allow_dups = multi_bus = 1; |
2376 | while (table && table->descr) { | 2335 | while (table && table->descr) { |
2377 | for (i = 0; i < table->n_devs; i++) { | 2336 | for (i = 0; i < table->n_devs_per_sock; i++) { |
2378 | if (!allow_dups || i == 0 || | 2337 | if (!allow_dups || i == 0 || |
2379 | table->descr[i].dev_id != | 2338 | table->descr[i].dev_id != |
2380 | table->descr[i-1].dev_id) { | 2339 | table->descr[i-1].dev_id) { |
@@ -2385,7 +2344,7 @@ static int sbridge_get_all_devices(u8 *num_mc, | |||
2385 | table, i, multi_bus); | 2344 | table, i, multi_bus); |
2386 | if (rc < 0) { | 2345 | if (rc < 0) { |
2387 | if (i == 0) { | 2346 | if (i == 0) { |
2388 | i = table->n_devs; | 2347 | i = table->n_devs_per_sock; |
2389 | break; | 2348 | break; |
2390 | } | 2349 | } |
2391 | sbridge_put_all_devices(); | 2350 | sbridge_put_all_devices(); |
@@ -2399,6 +2358,13 @@ static int sbridge_get_all_devices(u8 *num_mc, | |||
2399 | return 0; | 2358 | return 0; |
2400 | } | 2359 | } |
2401 | 2360 | ||
2361 | /* | ||
2362 | * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in | ||
2363 | * the format: XXXa. So we can convert from a device to the corresponding | ||
2364 | * channel like this | ||
2365 | */ | ||
2366 | #define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa) | ||
2367 | |||
2402 | static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, | 2368 | static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, |
2403 | struct sbridge_dev *sbridge_dev) | 2369 | struct sbridge_dev *sbridge_dev) |
2404 | { | 2370 | { |
@@ -2423,7 +2389,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, | |||
2423 | pvt->pci_br0 = pdev; | 2389 | pvt->pci_br0 = pdev; |
2424 | break; | 2390 | break; |
2425 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: | 2391 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: |
2426 | pvt->pci_ha0 = pdev; | 2392 | pvt->pci_ha = pdev; |
2427 | break; | 2393 | break; |
2428 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: | 2394 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: |
2429 | pvt->pci_ta = pdev; | 2395 | pvt->pci_ta = pdev; |
@@ -2436,7 +2402,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, | |||
2436 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2: | 2402 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2: |
2437 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3: | 2403 | case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3: |
2438 | { | 2404 | { |
2439 | int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0; | 2405 | int id = TAD_DEV_TO_CHAN(pdev->device); |
2440 | pvt->pci_tad[id] = pdev; | 2406 | pvt->pci_tad[id] = pdev; |
2441 | saw_chan_mask |= 1 << id; | 2407 | saw_chan_mask |= 1 << id; |
2442 | } | 2408 | } |
@@ -2455,7 +2421,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, | |||
2455 | } | 2421 | } |
2456 | 2422 | ||
2457 | /* Check if everything were registered */ | 2423 | /* Check if everything were registered */ |
2458 | if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 || | 2424 | if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha || |
2459 | !pvt->pci_ras || !pvt->pci_ta) | 2425 | !pvt->pci_ras || !pvt->pci_ta) |
2460 | goto enodev; | 2426 | goto enodev; |
2461 | 2427 | ||
@@ -2488,19 +2454,26 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, | |||
2488 | 2454 | ||
2489 | switch (pdev->device) { | 2455 | switch (pdev->device) { |
2490 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0: | 2456 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0: |
2491 | pvt->pci_ha0 = pdev; | 2457 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1: |
2458 | pvt->pci_ha = pdev; | ||
2492 | break; | 2459 | break; |
2493 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: | 2460 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: |
2461 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA: | ||
2494 | pvt->pci_ta = pdev; | 2462 | pvt->pci_ta = pdev; |
2495 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: | 2463 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: |
2464 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS: | ||
2496 | pvt->pci_ras = pdev; | 2465 | pvt->pci_ras = pdev; |
2497 | break; | 2466 | break; |
2498 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0: | 2467 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0: |
2499 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1: | 2468 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1: |
2500 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2: | 2469 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2: |
2501 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3: | 2470 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3: |
2471 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0: | ||
2472 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1: | ||
2473 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2: | ||
2474 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3: | ||
2502 | { | 2475 | { |
2503 | int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0; | 2476 | int id = TAD_DEV_TO_CHAN(pdev->device); |
2504 | pvt->pci_tad[id] = pdev; | 2477 | pvt->pci_tad[id] = pdev; |
2505 | saw_chan_mask |= 1 << id; | 2478 | saw_chan_mask |= 1 << id; |
2506 | } | 2479 | } |
@@ -2520,19 +2493,6 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, | |||
2520 | case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1: | 2493 | case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1: |
2521 | pvt->pci_br1 = pdev; | 2494 | pvt->pci_br1 = pdev; |
2522 | break; | 2495 | break; |
2523 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1: | ||
2524 | pvt->pci_ha1 = pdev; | ||
2525 | break; | ||
2526 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0: | ||
2527 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1: | ||
2528 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2: | ||
2529 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3: | ||
2530 | { | ||
2531 | int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 4; | ||
2532 | pvt->pci_tad[id] = pdev; | ||
2533 | saw_chan_mask |= 1 << id; | ||
2534 | } | ||
2535 | break; | ||
2536 | default: | 2496 | default: |
2537 | goto error; | 2497 | goto error; |
2538 | } | 2498 | } |
@@ -2544,13 +2504,12 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, | |||
2544 | } | 2504 | } |
2545 | 2505 | ||
2546 | /* Check if everything were registered */ | 2506 | /* Check if everything were registered */ |
2547 | if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 || | 2507 | if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 || |
2548 | !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta) | 2508 | !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta) |
2549 | goto enodev; | 2509 | goto enodev; |
2550 | 2510 | ||
2551 | if (saw_chan_mask != 0x0f && /* -EN */ | 2511 | if (saw_chan_mask != 0x0f && /* -EN/-EX */ |
2552 | saw_chan_mask != 0x33 && /* -EP */ | 2512 | saw_chan_mask != 0x03) /* -EP */ |
2553 | saw_chan_mask != 0xff) /* -EX */ | ||
2554 | goto enodev; | 2513 | goto enodev; |
2555 | return 0; | 2514 | return 0; |
2556 | 2515 | ||
@@ -2593,32 +2552,27 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci, | |||
2593 | pvt->pci_sad1 = pdev; | 2552 | pvt->pci_sad1 = pdev; |
2594 | break; | 2553 | break; |
2595 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: | 2554 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: |
2596 | pvt->pci_ha0 = pdev; | 2555 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: |
2556 | pvt->pci_ha = pdev; | ||
2597 | break; | 2557 | break; |
2598 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA: | 2558 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA: |
2559 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA: | ||
2599 | pvt->pci_ta = pdev; | 2560 | pvt->pci_ta = pdev; |
2600 | break; | 2561 | break; |
2601 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL: | 2562 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM: |
2563 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM: | ||
2602 | pvt->pci_ras = pdev; | 2564 | pvt->pci_ras = pdev; |
2603 | break; | 2565 | break; |
2604 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0: | 2566 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0: |
2605 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1: | 2567 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1: |
2606 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2: | 2568 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2: |
2607 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3: | 2569 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3: |
2608 | { | ||
2609 | int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0; | ||
2610 | |||
2611 | pvt->pci_tad[id] = pdev; | ||
2612 | saw_chan_mask |= 1 << id; | ||
2613 | } | ||
2614 | break; | ||
2615 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0: | 2570 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0: |
2616 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1: | 2571 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1: |
2617 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2: | 2572 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2: |
2618 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3: | 2573 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3: |
2619 | { | 2574 | { |
2620 | int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 + 4; | 2575 | int id = TAD_DEV_TO_CHAN(pdev->device); |
2621 | |||
2622 | pvt->pci_tad[id] = pdev; | 2576 | pvt->pci_tad[id] = pdev; |
2623 | saw_chan_mask |= 1 << id; | 2577 | saw_chan_mask |= 1 << id; |
2624 | } | 2578 | } |
@@ -2630,12 +2584,6 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci, | |||
2630 | if (!pvt->pci_ddrio) | 2584 | if (!pvt->pci_ddrio) |
2631 | pvt->pci_ddrio = pdev; | 2585 | pvt->pci_ddrio = pdev; |
2632 | break; | 2586 | break; |
2633 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: | ||
2634 | pvt->pci_ha1 = pdev; | ||
2635 | break; | ||
2636 | case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA: | ||
2637 | pvt->pci_ha1_ta = pdev; | ||
2638 | break; | ||
2639 | default: | 2587 | default: |
2640 | break; | 2588 | break; |
2641 | } | 2589 | } |
@@ -2647,13 +2595,12 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci, | |||
2647 | } | 2595 | } |
2648 | 2596 | ||
2649 | /* Check if everything were registered */ | 2597 | /* Check if everything were registered */ |
2650 | if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || | 2598 | if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 || |
2651 | !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) | 2599 | !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) |
2652 | goto enodev; | 2600 | goto enodev; |
2653 | 2601 | ||
2654 | if (saw_chan_mask != 0x0f && /* -EN */ | 2602 | if (saw_chan_mask != 0x0f && /* -EN/-EX */ |
2655 | saw_chan_mask != 0x33 && /* -EP */ | 2603 | saw_chan_mask != 0x03) /* -EP */ |
2656 | saw_chan_mask != 0xff) /* -EX */ | ||
2657 | goto enodev; | 2604 | goto enodev; |
2658 | return 0; | 2605 | return 0; |
2659 | 2606 | ||
@@ -2690,30 +2637,27 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci, | |||
2690 | pvt->pci_sad1 = pdev; | 2637 | pvt->pci_sad1 = pdev; |
2691 | break; | 2638 | break; |
2692 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0: | 2639 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0: |
2693 | pvt->pci_ha0 = pdev; | 2640 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1: |
2641 | pvt->pci_ha = pdev; | ||
2694 | break; | 2642 | break; |
2695 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA: | 2643 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA: |
2644 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA: | ||
2696 | pvt->pci_ta = pdev; | 2645 | pvt->pci_ta = pdev; |
2697 | break; | 2646 | break; |
2698 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL: | 2647 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM: |
2648 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM: | ||
2699 | pvt->pci_ras = pdev; | 2649 | pvt->pci_ras = pdev; |
2700 | break; | 2650 | break; |
2701 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0: | 2651 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0: |
2702 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1: | 2652 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1: |
2703 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2: | 2653 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2: |
2704 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3: | 2654 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3: |
2705 | { | ||
2706 | int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0; | ||
2707 | pvt->pci_tad[id] = pdev; | ||
2708 | saw_chan_mask |= 1 << id; | ||
2709 | } | ||
2710 | break; | ||
2711 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0: | 2655 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0: |
2712 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1: | 2656 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1: |
2713 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2: | 2657 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2: |
2714 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3: | 2658 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3: |
2715 | { | 2659 | { |
2716 | int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 + 4; | 2660 | int id = TAD_DEV_TO_CHAN(pdev->device); |
2717 | pvt->pci_tad[id] = pdev; | 2661 | pvt->pci_tad[id] = pdev; |
2718 | saw_chan_mask |= 1 << id; | 2662 | saw_chan_mask |= 1 << id; |
2719 | } | 2663 | } |
@@ -2721,12 +2665,6 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci, | |||
2721 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0: | 2665 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0: |
2722 | pvt->pci_ddrio = pdev; | 2666 | pvt->pci_ddrio = pdev; |
2723 | break; | 2667 | break; |
2724 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1: | ||
2725 | pvt->pci_ha1 = pdev; | ||
2726 | break; | ||
2727 | case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA: | ||
2728 | pvt->pci_ha1_ta = pdev; | ||
2729 | break; | ||
2730 | default: | 2668 | default: |
2731 | break; | 2669 | break; |
2732 | } | 2670 | } |
@@ -2738,13 +2676,12 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci, | |||
2738 | } | 2676 | } |
2739 | 2677 | ||
2740 | /* Check if everything were registered */ | 2678 | /* Check if everything were registered */ |
2741 | if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || | 2679 | if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 || |
2742 | !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) | 2680 | !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) |
2743 | goto enodev; | 2681 | goto enodev; |
2744 | 2682 | ||
2745 | if (saw_chan_mask != 0x0f && /* -EN */ | 2683 | if (saw_chan_mask != 0x0f && /* -EN/-EX */ |
2746 | saw_chan_mask != 0x33 && /* -EP */ | 2684 | saw_chan_mask != 0x03) /* -EP */ |
2747 | saw_chan_mask != 0xff) /* -EX */ | ||
2748 | goto enodev; | 2685 | goto enodev; |
2749 | return 0; | 2686 | return 0; |
2750 | 2687 | ||
@@ -2812,7 +2749,7 @@ static int knl_mci_bind_devs(struct mem_ctl_info *mci, | |||
2812 | pvt->knl.pci_cha[devidx] = pdev; | 2749 | pvt->knl.pci_cha[devidx] = pdev; |
2813 | break; | 2750 | break; |
2814 | 2751 | ||
2815 | case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL: | 2752 | case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN: |
2816 | devidx = -1; | 2753 | devidx = -1; |
2817 | 2754 | ||
2818 | /* | 2755 | /* |
@@ -3006,7 +2943,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, | |||
3006 | 2943 | ||
3007 | if (rc < 0) | 2944 | if (rc < 0) |
3008 | goto err_parsing; | 2945 | goto err_parsing; |
3009 | new_mci = get_mci_for_node_id(socket); | 2946 | new_mci = get_mci_for_node_id(socket, ha); |
3010 | if (!new_mci) { | 2947 | if (!new_mci) { |
3011 | strcpy(msg, "Error: socket got corrupted!"); | 2948 | strcpy(msg, "Error: socket got corrupted!"); |
3012 | goto err_parsing; | 2949 | goto err_parsing; |
@@ -3053,7 +2990,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, | |||
3053 | /* Call the helper to output message */ | 2990 | /* Call the helper to output message */ |
3054 | edac_mc_handle_error(tp_event, mci, core_err_cnt, | 2991 | edac_mc_handle_error(tp_event, mci, core_err_cnt, |
3055 | m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, | 2992 | m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, |
3056 | 4*ha+channel, dimm, -1, | 2993 | channel, dimm, -1, |
3057 | optype, msg); | 2994 | optype, msg); |
3058 | return; | 2995 | return; |
3059 | err_parsing: | 2996 | err_parsing: |
@@ -3078,7 +3015,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, | |||
3078 | if (edac_get_report_status() == EDAC_REPORTING_DISABLED) | 3015 | if (edac_get_report_status() == EDAC_REPORTING_DISABLED) |
3079 | return NOTIFY_DONE; | 3016 | return NOTIFY_DONE; |
3080 | 3017 | ||
3081 | mci = get_mci_for_node_id(mce->socketid); | 3018 | mci = get_mci_for_node_id(mce->socketid, IMC0); |
3082 | if (!mci) | 3019 | if (!mci) |
3083 | return NOTIFY_DONE; | 3020 | return NOTIFY_DONE; |
3084 | pvt = mci->pvt_info; | 3021 | pvt = mci->pvt_info; |
@@ -3159,11 +3096,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3159 | struct pci_dev *pdev = sbridge_dev->pdev[0]; | 3096 | struct pci_dev *pdev = sbridge_dev->pdev[0]; |
3160 | int rc; | 3097 | int rc; |
3161 | 3098 | ||
3162 | /* Check the number of active and not disabled channels */ | ||
3163 | rc = check_if_ecc_is_active(sbridge_dev->bus, type); | ||
3164 | if (unlikely(rc < 0)) | ||
3165 | return rc; | ||
3166 | |||
3167 | /* allocate a new MC control structure */ | 3099 | /* allocate a new MC control structure */ |
3168 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | 3100 | layers[0].type = EDAC_MC_LAYER_CHANNEL; |
3169 | layers[0].size = type == KNIGHTS_LANDING ? | 3101 | layers[0].size = type == KNIGHTS_LANDING ? |
@@ -3192,7 +3124,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3192 | MEM_FLAG_DDR4 : MEM_FLAG_DDR3; | 3124 | MEM_FLAG_DDR4 : MEM_FLAG_DDR3; |
3193 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | 3125 | mci->edac_ctl_cap = EDAC_FLAG_NONE; |
3194 | mci->edac_cap = EDAC_FLAG_NONE; | 3126 | mci->edac_cap = EDAC_FLAG_NONE; |
3195 | mci->mod_name = "sbridge_edac.c"; | 3127 | mci->mod_name = "sb_edac.c"; |
3196 | mci->mod_ver = SBRIDGE_REVISION; | 3128 | mci->mod_ver = SBRIDGE_REVISION; |
3197 | mci->dev_name = pci_name(pdev); | 3129 | mci->dev_name = pci_name(pdev); |
3198 | mci->ctl_page_to_phys = NULL; | 3130 | mci->ctl_page_to_phys = NULL; |
@@ -3215,12 +3147,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3215 | pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); | 3147 | pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); |
3216 | pvt->info.interleave_pkg = ibridge_interleave_pkg; | 3148 | pvt->info.interleave_pkg = ibridge_interleave_pkg; |
3217 | pvt->info.get_width = ibridge_get_width; | 3149 | pvt->info.get_width = ibridge_get_width; |
3218 | mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); | ||
3219 | 3150 | ||
3220 | /* Store pci devices at mci for faster access */ | 3151 | /* Store pci devices at mci for faster access */ |
3221 | rc = ibridge_mci_bind_devs(mci, sbridge_dev); | 3152 | rc = ibridge_mci_bind_devs(mci, sbridge_dev); |
3222 | if (unlikely(rc < 0)) | 3153 | if (unlikely(rc < 0)) |
3223 | goto fail0; | 3154 | goto fail0; |
3155 | get_source_id(mci); | ||
3156 | mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d", | ||
3157 | pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); | ||
3224 | break; | 3158 | break; |
3225 | case SANDY_BRIDGE: | 3159 | case SANDY_BRIDGE: |
3226 | pvt->info.rankcfgr = SB_RANK_CFG_A; | 3160 | pvt->info.rankcfgr = SB_RANK_CFG_A; |
@@ -3238,12 +3172,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3238 | pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); | 3172 | pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); |
3239 | pvt->info.interleave_pkg = sbridge_interleave_pkg; | 3173 | pvt->info.interleave_pkg = sbridge_interleave_pkg; |
3240 | pvt->info.get_width = sbridge_get_width; | 3174 | pvt->info.get_width = sbridge_get_width; |
3241 | mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); | ||
3242 | 3175 | ||
3243 | /* Store pci devices at mci for faster access */ | 3176 | /* Store pci devices at mci for faster access */ |
3244 | rc = sbridge_mci_bind_devs(mci, sbridge_dev); | 3177 | rc = sbridge_mci_bind_devs(mci, sbridge_dev); |
3245 | if (unlikely(rc < 0)) | 3178 | if (unlikely(rc < 0)) |
3246 | goto fail0; | 3179 | goto fail0; |
3180 | get_source_id(mci); | ||
3181 | mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d", | ||
3182 | pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); | ||
3247 | break; | 3183 | break; |
3248 | case HASWELL: | 3184 | case HASWELL: |
3249 | /* rankcfgr isn't used */ | 3185 | /* rankcfgr isn't used */ |
@@ -3261,12 +3197,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3261 | pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); | 3197 | pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); |
3262 | pvt->info.interleave_pkg = ibridge_interleave_pkg; | 3198 | pvt->info.interleave_pkg = ibridge_interleave_pkg; |
3263 | pvt->info.get_width = ibridge_get_width; | 3199 | pvt->info.get_width = ibridge_get_width; |
3264 | mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx); | ||
3265 | 3200 | ||
3266 | /* Store pci devices at mci for faster access */ | 3201 | /* Store pci devices at mci for faster access */ |
3267 | rc = haswell_mci_bind_devs(mci, sbridge_dev); | 3202 | rc = haswell_mci_bind_devs(mci, sbridge_dev); |
3268 | if (unlikely(rc < 0)) | 3203 | if (unlikely(rc < 0)) |
3269 | goto fail0; | 3204 | goto fail0; |
3205 | get_source_id(mci); | ||
3206 | mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d", | ||
3207 | pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); | ||
3270 | break; | 3208 | break; |
3271 | case BROADWELL: | 3209 | case BROADWELL: |
3272 | /* rankcfgr isn't used */ | 3210 | /* rankcfgr isn't used */ |
@@ -3284,12 +3222,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3284 | pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); | 3222 | pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); |
3285 | pvt->info.interleave_pkg = ibridge_interleave_pkg; | 3223 | pvt->info.interleave_pkg = ibridge_interleave_pkg; |
3286 | pvt->info.get_width = broadwell_get_width; | 3224 | pvt->info.get_width = broadwell_get_width; |
3287 | mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx); | ||
3288 | 3225 | ||
3289 | /* Store pci devices at mci for faster access */ | 3226 | /* Store pci devices at mci for faster access */ |
3290 | rc = broadwell_mci_bind_devs(mci, sbridge_dev); | 3227 | rc = broadwell_mci_bind_devs(mci, sbridge_dev); |
3291 | if (unlikely(rc < 0)) | 3228 | if (unlikely(rc < 0)) |
3292 | goto fail0; | 3229 | goto fail0; |
3230 | get_source_id(mci); | ||
3231 | mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d", | ||
3232 | pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); | ||
3293 | break; | 3233 | break; |
3294 | case KNIGHTS_LANDING: | 3234 | case KNIGHTS_LANDING: |
3295 | /* pvt->info.rankcfgr == ??? */ | 3235 | /* pvt->info.rankcfgr == ??? */ |
@@ -3307,17 +3247,22 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3307 | pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list); | 3247 | pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list); |
3308 | pvt->info.interleave_pkg = ibridge_interleave_pkg; | 3248 | pvt->info.interleave_pkg = ibridge_interleave_pkg; |
3309 | pvt->info.get_width = knl_get_width; | 3249 | pvt->info.get_width = knl_get_width; |
3310 | mci->ctl_name = kasprintf(GFP_KERNEL, | ||
3311 | "Knights Landing Socket#%d", mci->mc_idx); | ||
3312 | 3250 | ||
3313 | rc = knl_mci_bind_devs(mci, sbridge_dev); | 3251 | rc = knl_mci_bind_devs(mci, sbridge_dev); |
3314 | if (unlikely(rc < 0)) | 3252 | if (unlikely(rc < 0)) |
3315 | goto fail0; | 3253 | goto fail0; |
3254 | get_source_id(mci); | ||
3255 | mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d", | ||
3256 | pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom); | ||
3316 | break; | 3257 | break; |
3317 | } | 3258 | } |
3318 | 3259 | ||
3319 | /* Get dimm basic config and the memory layout */ | 3260 | /* Get dimm basic config and the memory layout */ |
3320 | get_dimm_config(mci); | 3261 | rc = get_dimm_config(mci); |
3262 | if (rc < 0) { | ||
3263 | edac_dbg(0, "MC: failed to get_dimm_config()\n"); | ||
3264 | goto fail; | ||
3265 | } | ||
3321 | get_memory_layout(mci); | 3266 | get_memory_layout(mci); |
3322 | 3267 | ||
3323 | /* record ptr to the generic device */ | 3268 | /* record ptr to the generic device */ |
@@ -3327,13 +3272,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) | |||
3327 | if (unlikely(edac_mc_add_mc(mci))) { | 3272 | if (unlikely(edac_mc_add_mc(mci))) { |
3328 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | 3273 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); |
3329 | rc = -EINVAL; | 3274 | rc = -EINVAL; |
3330 | goto fail0; | 3275 | goto fail; |
3331 | } | 3276 | } |
3332 | 3277 | ||
3333 | return 0; | 3278 | return 0; |
3334 | 3279 | ||
3335 | fail0: | 3280 | fail: |
3336 | kfree(mci->ctl_name); | 3281 | kfree(mci->ctl_name); |
3282 | fail0: | ||
3337 | edac_mc_free(mci); | 3283 | edac_mc_free(mci); |
3338 | sbridge_dev->mci = NULL; | 3284 | sbridge_dev->mci = NULL; |
3339 | return rc; | 3285 | return rc; |
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index 86d585cb6d32..2d352b40ae1c 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c | |||
@@ -2080,7 +2080,7 @@ static int thunderx_l2c_probe(struct pci_dev *pdev, | |||
2080 | if (IS_ENABLED(CONFIG_EDAC_DEBUG)) { | 2080 | if (IS_ENABLED(CONFIG_EDAC_DEBUG)) { |
2081 | l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name); | 2081 | l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name); |
2082 | 2082 | ||
2083 | thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr, | 2083 | ret = thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr, |
2084 | l2c, dfs_entries); | 2084 | l2c, dfs_entries); |
2085 | 2085 | ||
2086 | if (ret != dfs_entries) { | 2086 | if (ret != dfs_entries) { |