diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-11 22:01:14 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-11 22:01:14 -0500 |
commit | 9b66bfb28049594fe2bb2b91607ba302f511ce8b (patch) | |
tree | e96e79b1864699800c3f2c2e06b482a995744daf /arch/x86/kernel/apic | |
parent | c2136301e43cbb3b71d0163a9949f30dafcb4590 (diff) | |
parent | b5dfcb09debc38582c3cb72a3c1a88b919b07f2d (diff) |
Merge branch 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 UV debug changes from Ingo Molnar:
"Various SGI UV debuggability improvements, amongst them KDB support,
with related core KDB enabling patches changing kernel/debug/kdb/"
* 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
Revert "x86/UV: Add uvtrace support"
x86/UV: Add call to KGDB/KDB from NMI handler
kdb: Add support for external NMI handler to call KGDB/KDB
x86/UV: Check for alloc_cpumask_var() failures properly in uv_nmi_setup()
x86/UV: Add uvtrace support
x86/UV: Add kdump to UV NMI handler
x86/UV: Add summary of cpu activity to UV NMI handler
x86/UV: Update UV support for external NMI signals
x86/UV: Move NMI support
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 70 |
1 files changed, 1 insertions, 69 deletions
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index a419814cea57..ad0dc0428baf 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -39,12 +39,6 @@ | |||
39 | #include <asm/x86_init.h> | 39 | #include <asm/x86_init.h> |
40 | #include <asm/nmi.h> | 40 | #include <asm/nmi.h> |
41 | 41 | ||
42 | /* BMC sets a bit this MMR non-zero before sending an NMI */ | ||
43 | #define UVH_NMI_MMR UVH_SCRATCH5 | ||
44 | #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) | ||
45 | #define UV_NMI_PENDING_MASK (1UL << 63) | ||
46 | DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); | ||
47 | |||
48 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 42 | DEFINE_PER_CPU(int, x2apic_extra_bits); |
49 | 43 | ||
50 | #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args) | 44 | #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args) |
@@ -58,7 +52,6 @@ int uv_min_hub_revision_id; | |||
58 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); | 52 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); |
59 | unsigned int uv_apicid_hibits; | 53 | unsigned int uv_apicid_hibits; |
60 | EXPORT_SYMBOL_GPL(uv_apicid_hibits); | 54 | EXPORT_SYMBOL_GPL(uv_apicid_hibits); |
61 | static DEFINE_SPINLOCK(uv_nmi_lock); | ||
62 | 55 | ||
63 | static struct apic apic_x2apic_uv_x; | 56 | static struct apic apic_x2apic_uv_x; |
64 | 57 | ||
@@ -847,68 +840,6 @@ void uv_cpu_init(void) | |||
847 | set_x2apic_extra_bits(uv_hub_info->pnode); | 840 | set_x2apic_extra_bits(uv_hub_info->pnode); |
848 | } | 841 | } |
849 | 842 | ||
850 | /* | ||
851 | * When NMI is received, print a stack trace. | ||
852 | */ | ||
853 | int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | ||
854 | { | ||
855 | unsigned long real_uv_nmi; | ||
856 | int bid; | ||
857 | |||
858 | /* | ||
859 | * Each blade has an MMR that indicates when an NMI has been sent | ||
860 | * to cpus on the blade. If an NMI is detected, atomically | ||
861 | * clear the MMR and update a per-blade NMI count used to | ||
862 | * cause each cpu on the blade to notice a new NMI. | ||
863 | */ | ||
864 | bid = uv_numa_blade_id(); | ||
865 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); | ||
866 | |||
867 | if (unlikely(real_uv_nmi)) { | ||
868 | spin_lock(&uv_blade_info[bid].nmi_lock); | ||
869 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); | ||
870 | if (real_uv_nmi) { | ||
871 | uv_blade_info[bid].nmi_count++; | ||
872 | uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK); | ||
873 | } | ||
874 | spin_unlock(&uv_blade_info[bid].nmi_lock); | ||
875 | } | ||
876 | |||
877 | if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) | ||
878 | return NMI_DONE; | ||
879 | |||
880 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; | ||
881 | |||
882 | /* | ||
883 | * Use a lock so only one cpu prints at a time. | ||
884 | * This prevents intermixed output. | ||
885 | */ | ||
886 | spin_lock(&uv_nmi_lock); | ||
887 | pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); | ||
888 | dump_stack(); | ||
889 | spin_unlock(&uv_nmi_lock); | ||
890 | |||
891 | return NMI_HANDLED; | ||
892 | } | ||
893 | |||
894 | void uv_register_nmi_notifier(void) | ||
895 | { | ||
896 | if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv")) | ||
897 | printk(KERN_WARNING "UV NMI handler failed to register\n"); | ||
898 | } | ||
899 | |||
900 | void uv_nmi_init(void) | ||
901 | { | ||
902 | unsigned int value; | ||
903 | |||
904 | /* | ||
905 | * Unmask NMI on all cpus | ||
906 | */ | ||
907 | value = apic_read(APIC_LVT1) | APIC_DM_NMI; | ||
908 | value &= ~APIC_LVT_MASKED; | ||
909 | apic_write(APIC_LVT1, value); | ||
910 | } | ||
911 | |||
912 | void __init uv_system_init(void) | 843 | void __init uv_system_init(void) |
913 | { | 844 | { |
914 | union uvh_rh_gam_config_mmr_u m_n_config; | 845 | union uvh_rh_gam_config_mmr_u m_n_config; |
@@ -1046,6 +977,7 @@ void __init uv_system_init(void) | |||
1046 | map_mmr_high(max_pnode); | 977 | map_mmr_high(max_pnode); |
1047 | map_mmioh_high(min_pnode, max_pnode); | 978 | map_mmioh_high(min_pnode, max_pnode); |
1048 | 979 | ||
980 | uv_nmi_setup(); | ||
1049 | uv_cpu_init(); | 981 | uv_cpu_init(); |
1050 | uv_scir_register_cpu_notifier(); | 982 | uv_scir_register_cpu_notifier(); |
1051 | uv_register_nmi_notifier(); | 983 | uv_register_nmi_notifier(); |