aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac/amd64_edac.c
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2009-11-03 09:29:26 -0500
committerBorislav Petkov <borislav.petkov@amd.com>2009-12-07 13:14:27 -0500
commitf6d6ae965760906d79ab29bc38507608c5971549 (patch)
tree47424f660be015e280760139465be06c0ff47027 /drivers/edac/amd64_edac.c
parentba578cb34a71fb08fff14ac0796b934a8c9991e1 (diff)
amd64_edac: unify MCGCTL ECC switching
Unify almost identical code into one function and remove NUMA-specific usage (specifically cpumask_of_node()) in favor of generic topology methods. Remove unused defines, while at it. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'drivers/edac/amd64_edac.c')
-rw-r--r--drivers/edac/amd64_edac.c204
1 files changed, 113 insertions, 91 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 67541e7d1cfe..70c7d5f5ba5e 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2624,6 +2624,109 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2624 return empty; 2624 return empty;
2625} 2625}
2626 2626
2627/* get all cores on this DCT */
2628static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2629{
2630 int cpu;
2631
2632 for_each_online_cpu(cpu)
2633 if (amd_get_nb_id(cpu) == nid)
2634 cpumask_set_cpu(cpu, mask);
2635}
2636
2637/* check MCG_CTL on all the cpus on this node */
2638static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2639{
2640 cpumask_var_t mask;
2641 struct msr *msrs;
2642 int cpu, nbe, idx = 0;
2643 bool ret = false;
2644
2645 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2646 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2647 __func__);
2648 return false;
2649 }
2650
2651 get_cpus_on_this_dct_cpumask(mask, nid);
2652
2653 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
2654 if (!msrs) {
2655 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2656 __func__);
2657 free_cpumask_var(mask);
2658 return false;
2659 }
2660
2661 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2662
2663 for_each_cpu(cpu, mask) {
2664 nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
2665
2666 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2667 cpu, msrs[idx].q,
2668 (nbe ? "enabled" : "disabled"));
2669
2670 if (!nbe)
2671 goto out;
2672
2673 idx++;
2674 }
2675 ret = true;
2676
2677out:
2678 kfree(msrs);
2679 free_cpumask_var(mask);
2680 return ret;
2681}
2682
2683static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2684{
2685 cpumask_var_t cmask;
2686 struct msr *msrs = NULL;
2687 int cpu, idx = 0;
2688
2689 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2690 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2691 __func__);
2692 return false;
2693 }
2694
2695 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2696
2697 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
2698 if (!msrs) {
2699 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2700 __func__);
2701 return -ENOMEM;
2702 }
2703
2704 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2705
2706 for_each_cpu(cpu, cmask) {
2707
2708 if (on) {
2709 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2710 pvt->flags.ecc_report = 1;
2711
2712 msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2713 } else {
2714 /*
2715 * Turn off ECC reporting only when it was off before
2716 */
2717 if (!pvt->flags.ecc_report)
2718 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2719 }
2720 idx++;
2721 }
2722 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2723
2724 kfree(msrs);
2725 free_cpumask_var(cmask);
2726
2727 return 0;
2728}
2729
2627/* 2730/*
2628 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" 2731 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2629 * enable it. 2732 * enable it.
@@ -2631,17 +2734,12 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2631static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) 2734static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2632{ 2735{
2633 struct amd64_pvt *pvt = mci->pvt_info; 2736 struct amd64_pvt *pvt = mci->pvt_info;
2634 const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id); 2737 int err = 0;
2635 int cpu, idx = 0, err = 0; 2738 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2636 struct msr msrs[cpumask_weight(cpumask)];
2637 u32 value;
2638 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2639 2739
2640 if (!ecc_enable_override) 2740 if (!ecc_enable_override)
2641 return; 2741 return;
2642 2742
2643 memset(msrs, 0, sizeof(msrs));
2644
2645 amd64_printk(KERN_WARNING, 2743 amd64_printk(KERN_WARNING,
2646 "'ecc_enable_override' parameter is active, " 2744 "'ecc_enable_override' parameter is active, "
2647 "Enabling AMD ECC hardware now: CAUTION\n"); 2745 "Enabling AMD ECC hardware now: CAUTION\n");
@@ -2657,16 +2755,9 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2657 value |= mask; 2755 value |= mask;
2658 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2756 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2659 2757
2660 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); 2758 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2661 2759 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2662 for_each_cpu(cpu, cpumask) { 2760 "MCGCTL!\n");
2663 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2664 set_bit(idx, &pvt->old_mcgctl);
2665
2666 msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2667 idx++;
2668 }
2669 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2670 2761
2671 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); 2762 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2672 if (err) 2763 if (err)
@@ -2707,17 +2798,12 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2707 2798
2708static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) 2799static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2709{ 2800{
2710 const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id); 2801 int err = 0;
2711 int cpu, idx = 0, err = 0; 2802 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2712 struct msr msrs[cpumask_weight(cpumask)];
2713 u32 value;
2714 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2715 2803
2716 if (!pvt->nbctl_mcgctl_saved) 2804 if (!pvt->nbctl_mcgctl_saved)
2717 return; 2805 return;
2718 2806
2719 memset(msrs, 0, sizeof(msrs));
2720
2721 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); 2807 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2722 if (err) 2808 if (err)
2723 debugf0("Reading K8_NBCTL failed\n"); 2809 debugf0("Reading K8_NBCTL failed\n");
@@ -2727,72 +2813,9 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2727 /* restore the NB Enable MCGCTL bit */ 2813 /* restore the NB Enable MCGCTL bit */
2728 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2814 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2729 2815
2730 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); 2816 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2731 2817 amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
2732 for_each_cpu(cpu, cpumask) { 2818 "MCGCTL!\n");
2733 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2734 msrs[idx].l |=
2735 test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
2736 idx++;
2737 }
2738
2739 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2740}
2741
2742/* get all cores on this DCT */
2743static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2744{
2745 int cpu;
2746
2747 for_each_online_cpu(cpu)
2748 if (amd_get_nb_id(cpu) == nid)
2749 cpumask_set_cpu(cpu, mask);
2750}
2751
2752/* check MCG_CTL on all the cpus on this node */
2753static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2754{
2755 cpumask_var_t mask;
2756 struct msr *msrs;
2757 int cpu, nbe, idx = 0;
2758 bool ret = false;
2759
2760 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2761 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2762 __func__);
2763 return false;
2764 }
2765
2766 get_cpus_on_this_dct_cpumask(mask, nid);
2767
2768 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
2769 if (!msrs) {
2770 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2771 __func__);
2772 free_cpumask_var(mask);
2773 return false;
2774 }
2775
2776 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2777
2778 for_each_cpu(cpu, mask) {
2779 nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
2780
2781 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2782 cpu, msrs[idx].q,
2783 (nbe ? "enabled" : "disabled"));
2784
2785 if (!nbe)
2786 goto out;
2787
2788 idx++;
2789 }
2790 ret = true;
2791
2792out:
2793 kfree(msrs);
2794 free_cpumask_var(mask);
2795 return ret;
2796} 2819}
2797 2820
2798/* 2821/*
@@ -2921,7 +2944,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
2921 pvt->ext_model = boot_cpu_data.x86_model >> 4; 2944 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2922 pvt->mc_type_index = mc_type_index; 2945 pvt->mc_type_index = mc_type_index;
2923 pvt->ops = family_ops(mc_type_index); 2946 pvt->ops = family_ops(mc_type_index);
2924 pvt->old_mcgctl = 0;
2925 2947
2926 /* 2948 /*
2927 * We have the dram_f2_ctl device as an argument, now go reserve its 2949 * We have the dram_f2_ctl device as an argument, now go reserve its