aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2013-12-15 11:54:27 -0500
committerBorislav Petkov <bp@suse.de>2013-12-15 11:54:27 -0500
commitd1ea71cdc9801c829d97eb42204329c28a4b2f52 (patch)
tree34dcfe082dfb308d9dfd351265a86011fab12353 /drivers/edac
parentdf781d0386a3e29c31f1d8eaf845b3224e65dd7f (diff)
amd64_edac: Remove "amd64" prefix from static functions
No need for the namespace tagging there. Cleanup setup_pci_device while at it. Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c118
1 files changed, 56 insertions, 62 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 568e97fc1489..98e14ee4833c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,7 +1,7 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2#include <asm/amd_nb.h> 2#include <asm/amd_nb.h>
3 3
4static struct edac_pci_ctl_info *amd64_ctl_pci; 4static struct edac_pci_ctl_info *pci_ctl;
5 5
6static int report_gart_errors; 6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644); 7module_param(report_gart_errors, int, 0644);
@@ -162,7 +162,7 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
162 * scan the scrub rate mapping table for a close or matching bandwidth value to 162 * scan the scrub rate mapping table for a close or matching bandwidth value to
163 * issue. If requested is too big, then use last maximum value found. 163 * issue. If requested is too big, then use last maximum value found.
164 */ 164 */
165static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) 165static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
166{ 166{
167 u32 scrubval; 167 u32 scrubval;
168 int i; 168 int i;
@@ -198,7 +198,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
198 return 0; 198 return 0;
199} 199}
200 200
201static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 201static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
202{ 202{
203 struct amd64_pvt *pvt = mci->pvt_info; 203 struct amd64_pvt *pvt = mci->pvt_info;
204 u32 min_scrubrate = 0x5; 204 u32 min_scrubrate = 0x5;
@@ -210,10 +210,10 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
210 if (pvt->fam == 0x15 && pvt->model < 0x10) 210 if (pvt->fam == 0x15 && pvt->model < 0x10)
211 f15h_select_dct(pvt, 0); 211 f15h_select_dct(pvt, 0);
212 212
213 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); 213 return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
214} 214}
215 215
216static int amd64_get_scrub_rate(struct mem_ctl_info *mci) 216static int get_scrub_rate(struct mem_ctl_info *mci)
217{ 217{
218 struct amd64_pvt *pvt = mci->pvt_info; 218 struct amd64_pvt *pvt = mci->pvt_info;
219 u32 scrubval = 0; 219 u32 scrubval = 0;
@@ -240,8 +240,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
240 * returns true if the SysAddr given by sys_addr matches the 240 * returns true if the SysAddr given by sys_addr matches the
241 * DRAM base/limit associated with node_id 241 * DRAM base/limit associated with node_id
242 */ 242 */
243static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, 243static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
244 u8 nid)
245{ 244{
246 u64 addr; 245 u64 addr;
247 246
@@ -285,7 +284,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
285 284
286 if (intlv_en == 0) { 285 if (intlv_en == 0) {
287 for (node_id = 0; node_id < DRAM_RANGES; node_id++) { 286 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
288 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 287 if (base_limit_match(pvt, sys_addr, node_id))
289 goto found; 288 goto found;
290 } 289 }
291 goto err_no_match; 290 goto err_no_match;
@@ -309,7 +308,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
309 } 308 }
310 309
311 /* sanity test for sys_addr */ 310 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 311 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" 312 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
314 "range for node %d with node interleaving enabled.\n", 313 "range for node %d with node interleaving enabled.\n",
315 __func__, sys_addr, node_id); 314 __func__, sys_addr, node_id);
@@ -660,7 +659,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
660 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 659 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
661 * are ECC capable. 660 * are ECC capable.
662 */ 661 */
663static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) 662static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
664{ 663{
665 u8 bit; 664 u8 bit;
666 unsigned long edac_cap = EDAC_FLAG_NONE; 665 unsigned long edac_cap = EDAC_FLAG_NONE;
@@ -675,9 +674,9 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
675 return edac_cap; 674 return edac_cap;
676} 675}
677 676
678static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); 677static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
679 678
680static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) 679static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
681{ 680{
682 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 681 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
683 682
@@ -711,7 +710,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
711 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", 710 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
712 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); 711 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
713 712
714 amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0); 713 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
715 714
716 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 715 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
717 716
@@ -722,19 +721,19 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
722 721
723 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); 722 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
724 723
725 amd64_debug_display_dimm_sizes(pvt, 0); 724 debug_display_dimm_sizes(pvt, 0);
726 725
727 /* everything below this point is Fam10h and above */ 726 /* everything below this point is Fam10h and above */
728 if (pvt->fam == 0xf) 727 if (pvt->fam == 0xf)
729 return; 728 return;
730 729
731 amd64_debug_display_dimm_sizes(pvt, 1); 730 debug_display_dimm_sizes(pvt, 1);
732 731
733 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); 732 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
734 733
735 /* Only if NOT ganged does dclr1 have valid info */ 734 /* Only if NOT ganged does dclr1 have valid info */
736 if (!dct_ganging_enabled(pvt)) 735 if (!dct_ganging_enabled(pvt))
737 amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1); 736 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
738} 737}
739 738
740/* 739/*
@@ -800,7 +799,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
800 } 799 }
801} 800}
802 801
803static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) 802static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
804{ 803{
805 enum mem_type type; 804 enum mem_type type;
806 805
@@ -1702,7 +1701,7 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1702 * debug routine to display the memory sizes of all logical DIMMs and its 1701 * debug routine to display the memory sizes of all logical DIMMs and its
1703 * CSROWs 1702 * CSROWs
1704 */ 1703 */
1705static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) 1704static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1706{ 1705{
1707 int dimm, size0, size1; 1706 int dimm, size0, size1;
1708 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 1707 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
@@ -1744,7 +1743,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1744 } 1743 }
1745} 1744}
1746 1745
1747static struct amd64_family_type amd64_family_types[] = { 1746static struct amd64_family_type family_types[] = {
1748 [K8_CPUS] = { 1747 [K8_CPUS] = {
1749 .ctl_name = "K8", 1748 .ctl_name = "K8",
1750 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 1749 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
@@ -2191,7 +2190,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2191 * encompasses 2190 * encompasses
2192 * 2191 *
2193 */ 2192 */
2194static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2193static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2195{ 2194{
2196 u32 cs_mode, nr_pages; 2195 u32 cs_mode, nr_pages;
2197 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2196 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
@@ -2258,19 +2257,19 @@ static int init_csrows(struct mem_ctl_info *mci)
2258 pvt->mc_node_id, i); 2257 pvt->mc_node_id, i);
2259 2258
2260 if (row_dct0) { 2259 if (row_dct0) {
2261 nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2260 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2262 csrow->channels[0]->dimm->nr_pages = nr_pages; 2261 csrow->channels[0]->dimm->nr_pages = nr_pages;
2263 } 2262 }
2264 2263
2265 /* K8 has only one DCT */ 2264 /* K8 has only one DCT */
2266 if (pvt->fam != 0xf && row_dct1) { 2265 if (pvt->fam != 0xf && row_dct1) {
2267 int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); 2266 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2268 2267
2269 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; 2268 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2270 nr_pages += row_dct1_pages; 2269 nr_pages += row_dct1_pages;
2271 } 2270 }
2272 2271
2273 mtype = amd64_determine_memory_type(pvt, i); 2272 mtype = determine_memory_type(pvt, i);
2274 2273
2275 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); 2274 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2276 2275
@@ -2304,7 +2303,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2304} 2303}
2305 2304
2306/* check MCG_CTL on all the cpus on this node */ 2305/* check MCG_CTL on all the cpus on this node */
2307static bool amd64_nb_mce_bank_enabled_on_node(u16 nid) 2306static bool nb_mce_bank_enabled_on_node(u16 nid)
2308{ 2307{
2309 cpumask_var_t mask; 2308 cpumask_var_t mask;
2310 int cpu, nbe; 2309 int cpu, nbe;
@@ -2477,7 +2476,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2477 ecc_en = !!(value & NBCFG_ECC_ENABLE); 2476 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2478 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); 2477 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2479 2478
2480 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); 2479 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2481 if (!nb_mce_en) 2480 if (!nb_mce_en)
2482 amd64_notice("NB MCE bank disabled, set MSR " 2481 amd64_notice("NB MCE bank disabled, set MSR "
2483 "0x%08x[4] on node %d to enable.\n", 2482 "0x%08x[4] on node %d to enable.\n",
@@ -2532,7 +2531,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2532 if (pvt->nbcap & NBCAP_CHIPKILL) 2531 if (pvt->nbcap & NBCAP_CHIPKILL)
2533 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 2532 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2534 2533
2535 mci->edac_cap = amd64_determine_edac_cap(pvt); 2534 mci->edac_cap = determine_edac_cap(pvt);
2536 mci->mod_name = EDAC_MOD_STR; 2535 mci->mod_name = EDAC_MOD_STR;
2537 mci->mod_ver = EDAC_AMD64_VERSION; 2536 mci->mod_ver = EDAC_AMD64_VERSION;
2538 mci->ctl_name = fam->ctl_name; 2537 mci->ctl_name = fam->ctl_name;
@@ -2540,14 +2539,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2540 mci->ctl_page_to_phys = NULL; 2539 mci->ctl_page_to_phys = NULL;
2541 2540
2542 /* memory scrubber interface */ 2541 /* memory scrubber interface */
2543 mci->set_sdram_scrub_rate = amd64_set_scrub_rate; 2542 mci->set_sdram_scrub_rate = set_scrub_rate;
2544 mci->get_sdram_scrub_rate = amd64_get_scrub_rate; 2543 mci->get_sdram_scrub_rate = get_scrub_rate;
2545} 2544}
2546 2545
2547/* 2546/*
2548 * returns a pointer to the family descriptor on success, NULL otherwise. 2547 * returns a pointer to the family descriptor on success, NULL otherwise.
2549 */ 2548 */
2550static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) 2549static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2551{ 2550{
2552 struct amd64_family_type *fam_type = NULL; 2551 struct amd64_family_type *fam_type = NULL;
2553 2552
@@ -2558,29 +2557,29 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2558 2557
2559 switch (pvt->fam) { 2558 switch (pvt->fam) {
2560 case 0xf: 2559 case 0xf:
2561 fam_type = &amd64_family_types[K8_CPUS]; 2560 fam_type = &family_types[K8_CPUS];
2562 pvt->ops = &amd64_family_types[K8_CPUS].ops; 2561 pvt->ops = &family_types[K8_CPUS].ops;
2563 break; 2562 break;
2564 2563
2565 case 0x10: 2564 case 0x10:
2566 fam_type = &amd64_family_types[F10_CPUS]; 2565 fam_type = &family_types[F10_CPUS];
2567 pvt->ops = &amd64_family_types[F10_CPUS].ops; 2566 pvt->ops = &family_types[F10_CPUS].ops;
2568 break; 2567 break;
2569 2568
2570 case 0x15: 2569 case 0x15:
2571 if (pvt->model == 0x30) { 2570 if (pvt->model == 0x30) {
2572 fam_type = &amd64_family_types[F15_M30H_CPUS]; 2571 fam_type = &family_types[F15_M30H_CPUS];
2573 pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops; 2572 pvt->ops = &family_types[F15_M30H_CPUS].ops;
2574 break; 2573 break;
2575 } 2574 }
2576 2575
2577 fam_type = &amd64_family_types[F15_CPUS]; 2576 fam_type = &family_types[F15_CPUS];
2578 pvt->ops = &amd64_family_types[F15_CPUS].ops; 2577 pvt->ops = &family_types[F15_CPUS].ops;
2579 break; 2578 break;
2580 2579
2581 case 0x16: 2580 case 0x16:
2582 fam_type = &amd64_family_types[F16_CPUS]; 2581 fam_type = &family_types[F16_CPUS];
2583 pvt->ops = &amd64_family_types[F16_CPUS].ops; 2582 pvt->ops = &family_types[F16_CPUS].ops;
2584 break; 2583 break;
2585 2584
2586 default: 2585 default:
@@ -2596,7 +2595,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2596 return fam_type; 2595 return fam_type;
2597} 2596}
2598 2597
2599static int amd64_init_one_instance(struct pci_dev *F2) 2598static int init_one_instance(struct pci_dev *F2)
2600{ 2599{
2601 struct amd64_pvt *pvt = NULL; 2600 struct amd64_pvt *pvt = NULL;
2602 struct amd64_family_type *fam_type = NULL; 2601 struct amd64_family_type *fam_type = NULL;
@@ -2614,7 +2613,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2614 pvt->F2 = F2; 2613 pvt->F2 = F2;
2615 2614
2616 ret = -EINVAL; 2615 ret = -EINVAL;
2617 fam_type = amd64_per_family_init(pvt); 2616 fam_type = per_family_init(pvt);
2618 if (!fam_type) 2617 if (!fam_type)
2619 goto err_free; 2618 goto err_free;
2620 2619
@@ -2698,8 +2697,8 @@ err_ret:
2698 return ret; 2697 return ret;
2699} 2698}
2700 2699
2701static int amd64_probe_one_instance(struct pci_dev *pdev, 2700static int probe_one_instance(struct pci_dev *pdev,
2702 const struct pci_device_id *mc_type) 2701 const struct pci_device_id *mc_type)
2703{ 2702{
2704 u16 nid = amd_get_node_id(pdev); 2703 u16 nid = amd_get_node_id(pdev);
2705 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2704 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -2731,7 +2730,7 @@ static int amd64_probe_one_instance(struct pci_dev *pdev,
2731 goto err_enable; 2730 goto err_enable;
2732 } 2731 }
2733 2732
2734 ret = amd64_init_one_instance(pdev); 2733 ret = init_one_instance(pdev);
2735 if (ret < 0) { 2734 if (ret < 0) {
2736 amd64_err("Error probing instance: %d\n", nid); 2735 amd64_err("Error probing instance: %d\n", nid);
2737 restore_ecc_error_reporting(s, nid, F3); 2736 restore_ecc_error_reporting(s, nid, F3);
@@ -2747,7 +2746,7 @@ err_out:
2747 return ret; 2746 return ret;
2748} 2747}
2749 2748
2750static void amd64_remove_one_instance(struct pci_dev *pdev) 2749static void remove_one_instance(struct pci_dev *pdev)
2751{ 2750{
2752 struct mem_ctl_info *mci; 2751 struct mem_ctl_info *mci;
2753 struct amd64_pvt *pvt; 2752 struct amd64_pvt *pvt;
@@ -2838,8 +2837,8 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2838 2837
2839static struct pci_driver amd64_pci_driver = { 2838static struct pci_driver amd64_pci_driver = {
2840 .name = EDAC_MOD_STR, 2839 .name = EDAC_MOD_STR,
2841 .probe = amd64_probe_one_instance, 2840 .probe = probe_one_instance,
2842 .remove = amd64_remove_one_instance, 2841 .remove = remove_one_instance,
2843 .id_table = amd64_pci_table, 2842 .id_table = amd64_pci_table,
2844}; 2843};
2845 2844
@@ -2848,23 +2847,18 @@ static void setup_pci_device(void)
2848 struct mem_ctl_info *mci; 2847 struct mem_ctl_info *mci;
2849 struct amd64_pvt *pvt; 2848 struct amd64_pvt *pvt;
2850 2849
2851 if (amd64_ctl_pci) 2850 if (pci_ctl)
2852 return; 2851 return;
2853 2852
2854 mci = mcis[0]; 2853 mci = mcis[0];
2855 if (mci) { 2854 if (!mci)
2856 2855 return;
2857 pvt = mci->pvt_info;
2858 amd64_ctl_pci =
2859 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2860
2861 if (!amd64_ctl_pci) {
2862 pr_warning("%s(): Unable to create PCI control\n",
2863 __func__);
2864 2856
2865 pr_warning("%s(): PCI error report via EDAC not set\n", 2857 pvt = mci->pvt_info;
2866 __func__); 2858 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2867 } 2859 if (!pci_ctl) {
2860 pr_warn("%s(): Unable to create PCI control\n", __func__);
2861 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2868 } 2862 }
2869} 2863}
2870 2864
@@ -2920,8 +2914,8 @@ err_ret:
2920 2914
2921static void __exit amd64_edac_exit(void) 2915static void __exit amd64_edac_exit(void)
2922{ 2916{
2923 if (amd64_ctl_pci) 2917 if (pci_ctl)
2924 edac_pci_release_generic_ctl(amd64_ctl_pci); 2918 edac_pci_release_generic_ctl(pci_ctl);
2925 2919
2926 pci_unregister_driver(&amd64_pci_driver); 2920 pci_unregister_driver(&amd64_pci_driver);
2927 2921