aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform/uv/uv_nmi.c
diff options
context:
space:
mode:
authortravis@sgi.com <travis@sgi.com>2017-01-25 11:35:24 -0500
committerIngo Molnar <mingo@kernel.org>2017-02-01 04:21:00 -0500
commit1e74016370ec3d552a7f5df18bb2b0f1c80b5a9f (patch)
tree710fa0278ee93d3557de924f6df0be957a80411d /arch/x86/platform/uv/uv_nmi.c
parent9ec808a0225aabab59fb2932b70784b087ac0f58 (diff)
x86/platform/UV: Clean up the NMI code to match current coding style
Update UV NMI to current coding style. Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russ Anderson <rja@hpe.com> Link: http://lkml.kernel.org/r/20170125163518.419094259@asylum.americas.sgi.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/platform/uv/uv_nmi.c')
-rw-r--r--arch/x86/platform/uv/uv_nmi.c74
1 files changed, 37 insertions, 37 deletions
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 6a71b087da98..0ecd7bf7d2d3 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -45,8 +45,8 @@
45 * 45 *
46 * Handle system-wide NMI events generated by the global 'power nmi' command. 46 * Handle system-wide NMI events generated by the global 'power nmi' command.
47 * 47 *
48 * Basic operation is to field the NMI interrupt on each cpu and wait 48 * Basic operation is to field the NMI interrupt on each CPU and wait
49 * until all cpus have arrived into the nmi handler. If some cpus do not 49 * until all CPU's have arrived into the nmi handler. If some CPU's do not
50 * make it into the handler, try and force them in with the IPI(NMI) signal. 50 * make it into the handler, try and force them in with the IPI(NMI) signal.
51 * 51 *
52 * We also have to lessen UV Hub MMR accesses as much as possible as this 52 * We also have to lessen UV Hub MMR accesses as much as possible as this
@@ -56,7 +56,7 @@
56 * To do this we register our primary NMI notifier on the NMI_UNKNOWN 56 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
57 * chain. This reduces the number of false NMI calls when the perf 57 * chain. This reduces the number of false NMI calls when the perf
58 * tools are running which generate an enormous number of NMIs per 58 * tools are running which generate an enormous number of NMIs per
59 * second (~4M/s for 1024 cpu threads). Our secondary NMI handler is 59 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
60 * very short as it only checks that if it has been "pinged" with the 60 * very short as it only checks that if it has been "pinged" with the
61 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. 61 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
62 * 62 *
@@ -113,7 +113,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp)
113 113
114static int param_set_local64(const char *val, const struct kernel_param *kp) 114static int param_set_local64(const char *val, const struct kernel_param *kp)
115{ 115{
116 /* clear on any write */ 116 /* Clear on any write */
117 local64_set((local64_t *)kp->arg, 0); 117 local64_set((local64_t *)kp->arg, 0);
118 return 0; 118 return 0;
119} 119}
@@ -322,7 +322,7 @@ static struct init_nmi {
322 .data = 0x0, /* ACPI Mode */ 322 .data = 0x0, /* ACPI Mode */
323 }, 323 },
324 324
325/* clear status */ 325/* Clear status: */
326 { /* GPI_INT_STS_GPP_D_0 */ 326 { /* GPI_INT_STS_GPP_D_0 */
327 .offset = 0x104, 327 .offset = 0x104,
328 .mask = 0x0, 328 .mask = 0x0,
@@ -344,29 +344,29 @@ static struct init_nmi {
344 .data = 0x1, /* Clear Status */ 344 .data = 0x1, /* Clear Status */
345 }, 345 },
346 346
347/* disable interrupts */ 347/* Disable interrupts: */
348 { /* GPI_INT_EN_GPP_D_0 */ 348 { /* GPI_INT_EN_GPP_D_0 */
349 .offset = 0x114, 349 .offset = 0x114,
350 .mask = 0x1, 350 .mask = 0x1,
351 .data = 0x0, /* disable interrupt generation */ 351 .data = 0x0, /* Disable interrupt generation */
352 }, 352 },
353 { /* GPI_GPE_EN_GPP_D_0 */ 353 { /* GPI_GPE_EN_GPP_D_0 */
354 .offset = 0x134, 354 .offset = 0x134,
355 .mask = 0x1, 355 .mask = 0x1,
356 .data = 0x0, /* disable interrupt generation */ 356 .data = 0x0, /* Disable interrupt generation */
357 }, 357 },
358 { /* GPI_SMI_EN_GPP_D_0 */ 358 { /* GPI_SMI_EN_GPP_D_0 */
359 .offset = 0x154, 359 .offset = 0x154,
360 .mask = 0x1, 360 .mask = 0x1,
361 .data = 0x0, /* disable interrupt generation */ 361 .data = 0x0, /* Disable interrupt generation */
362 }, 362 },
363 { /* GPI_NMI_EN_GPP_D_0 */ 363 { /* GPI_NMI_EN_GPP_D_0 */
364 .offset = 0x174, 364 .offset = 0x174,
365 .mask = 0x1, 365 .mask = 0x1,
366 .data = 0x0, /* disable interrupt generation */ 366 .data = 0x0, /* Disable interrupt generation */
367 }, 367 },
368 368
369/* setup GPP_D_0 Pad Config */ 369/* Setup GPP_D_0 Pad Config: */
370 { /* PAD_CFG_DW0_GPP_D_0 */ 370 { /* PAD_CFG_DW0_GPP_D_0 */
371 .offset = 0x4c0, 371 .offset = 0x4c0,
372 .mask = 0xffffffff, 372 .mask = 0xffffffff,
@@ -444,7 +444,7 @@ static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
444 return 0; 444 return 0;
445 445
446 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */ 446 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
447 (void)*pstat; /* flush write */ 447 (void)*pstat; /* Flush write */
448 448
449 return 1; 449 return 1;
450} 450}
@@ -461,8 +461,8 @@ static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
461} 461}
462 462
463/* 463/*
464 * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and 464 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
465 * return true. If first cpu in on the system, set global "in_nmi" flag. 465 * return true. If first CPU in on the system, set global "in_nmi" flag.
466 */ 466 */
467static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) 467static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
468{ 468{
@@ -496,7 +496,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
496 if (raw_spin_trylock(&hub_nmi->nmi_lock)) { 496 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
497 nmi_detected = uv_test_nmi(hub_nmi); 497 nmi_detected = uv_test_nmi(hub_nmi);
498 498
499 /* check flag for UV external NMI */ 499 /* Check flag for UV external NMI */
500 if (nmi_detected > 0) { 500 if (nmi_detected > 0) {
501 uv_set_in_nmi(cpu, hub_nmi); 501 uv_set_in_nmi(cpu, hub_nmi);
502 nmi = 1; 502 nmi = 1;
@@ -516,7 +516,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
516slave_wait: cpu_relax(); 516slave_wait: cpu_relax();
517 udelay(uv_nmi_slave_delay); 517 udelay(uv_nmi_slave_delay);
518 518
519 /* re-check hub in_nmi flag */ 519 /* Re-check hub in_nmi flag */
520 nmi = atomic_read(&hub_nmi->in_nmi); 520 nmi = atomic_read(&hub_nmi->in_nmi);
521 if (nmi) 521 if (nmi)
522 break; 522 break;
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
560 } 560 }
561} 561}
562 562
563/* Ping non-responding cpus attemping to force them into the NMI handler */ 563/* Ping non-responding CPU's attemping to force them into the NMI handler */
564static void uv_nmi_nr_cpus_ping(void) 564static void uv_nmi_nr_cpus_ping(void)
565{ 565{
566 int cpu; 566 int cpu;
@@ -571,7 +571,7 @@ static void uv_nmi_nr_cpus_ping(void)
571 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 571 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
572} 572}
573 573
574/* Clean up flags for cpus that ignored both NMI and ping */ 574/* Clean up flags for CPU's that ignored both NMI and ping */
575static void uv_nmi_cleanup_mask(void) 575static void uv_nmi_cleanup_mask(void)
576{ 576{
577 int cpu; 577 int cpu;
@@ -583,7 +583,7 @@ static void uv_nmi_cleanup_mask(void)
583 } 583 }
584} 584}
585 585
586/* Loop waiting as cpus enter NMI handler */ 586/* Loop waiting as CPU's enter NMI handler */
587static int uv_nmi_wait_cpus(int first) 587static int uv_nmi_wait_cpus(int first)
588{ 588{
589 int i, j, k, n = num_online_cpus(); 589 int i, j, k, n = num_online_cpus();
@@ -597,7 +597,7 @@ static int uv_nmi_wait_cpus(int first)
597 k = n - cpumask_weight(uv_nmi_cpu_mask); 597 k = n - cpumask_weight(uv_nmi_cpu_mask);
598 } 598 }
599 599
600 /* PCH NMI causes only one cpu to respond */ 600 /* PCH NMI causes only one CPU to respond */
601 if (first && uv_pch_intr_now_enabled) { 601 if (first && uv_pch_intr_now_enabled) {
602 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); 602 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
603 return n - k - 1; 603 return n - k - 1;
@@ -618,13 +618,13 @@ static int uv_nmi_wait_cpus(int first)
618 k = n; 618 k = n;
619 break; 619 break;
620 } 620 }
621 if (last_k != k) { /* abort if no new cpus coming in */ 621 if (last_k != k) { /* abort if no new CPU's coming in */
622 last_k = k; 622 last_k = k;
623 waiting = 0; 623 waiting = 0;
624 } else if (++waiting > uv_nmi_wait_count) 624 } else if (++waiting > uv_nmi_wait_count)
625 break; 625 break;
626 626
627 /* extend delay if waiting only for cpu 0 */ 627 /* Extend delay if waiting only for CPU 0: */
628 if (waiting && (n - k) == 1 && 628 if (waiting && (n - k) == 1 &&
629 cpumask_test_cpu(0, uv_nmi_cpu_mask)) 629 cpumask_test_cpu(0, uv_nmi_cpu_mask))
630 loop_delay *= 100; 630 loop_delay *= 100;
@@ -635,29 +635,29 @@ static int uv_nmi_wait_cpus(int first)
635 return n - k; 635 return n - k;
636} 636}
637 637
638/* Wait until all slave cpus have entered UV NMI handler */ 638/* Wait until all slave CPU's have entered UV NMI handler */
639static void uv_nmi_wait(int master) 639static void uv_nmi_wait(int master)
640{ 640{
641 /* indicate this cpu is in */ 641 /* Indicate this CPU is in: */
642 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); 642 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
643 643
644 /* if not the first cpu in (the master), then we are a slave cpu */ 644 /* If not the first CPU in (the master), then we are a slave CPU */
645 if (!master) 645 if (!master)
646 return; 646 return;
647 647
648 do { 648 do {
649 /* wait for all other cpus to gather here */ 649 /* Wait for all other CPU's to gather here */
650 if (!uv_nmi_wait_cpus(1)) 650 if (!uv_nmi_wait_cpus(1))
651 break; 651 break;
652 652
653 /* if not all made it in, send IPI NMI to them */ 653 /* If not all made it in, send IPI NMI to them */
654 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n", 654 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
655 cpumask_weight(uv_nmi_cpu_mask), 655 cpumask_weight(uv_nmi_cpu_mask),
656 cpumask_pr_args(uv_nmi_cpu_mask)); 656 cpumask_pr_args(uv_nmi_cpu_mask));
657 657
658 uv_nmi_nr_cpus_ping(); 658 uv_nmi_nr_cpus_ping();
659 659
660 /* if all cpus are in, then done */ 660 /* If all CPU's are in, then done */
661 if (!uv_nmi_wait_cpus(0)) 661 if (!uv_nmi_wait_cpus(0))
662 break; 662 break;
663 663
@@ -709,7 +709,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
709 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); 709 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
710} 710}
711 711
712/* Trigger a slave cpu to dump it's state */ 712/* Trigger a slave CPU to dump it's state */
713static void uv_nmi_trigger_dump(int cpu) 713static void uv_nmi_trigger_dump(int cpu)
714{ 714{
715 int retry = uv_nmi_trigger_delay; 715 int retry = uv_nmi_trigger_delay;
@@ -730,7 +730,7 @@ static void uv_nmi_trigger_dump(int cpu)
730 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; 730 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
731} 731}
732 732
733/* Wait until all cpus ready to exit */ 733/* Wait until all CPU's ready to exit */
734static void uv_nmi_sync_exit(int master) 734static void uv_nmi_sync_exit(int master)
735{ 735{
736 atomic_dec(&uv_nmi_cpus_in_nmi); 736 atomic_dec(&uv_nmi_cpus_in_nmi);
@@ -760,7 +760,7 @@ static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
760 uv_nmi_sync_exit(master); 760 uv_nmi_sync_exit(master);
761} 761}
762 762
763/* Walk through cpu list and dump state of each */ 763/* Walk through CPU list and dump state of each */
764static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) 764static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
765{ 765{
766 if (master) { 766 if (master) {
@@ -872,7 +872,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
872 if (reason < 0) 872 if (reason < 0)
873 return; 873 return;
874 874
875 /* call KGDB NMI handler as MASTER */ 875 /* Call KGDB NMI handler as MASTER */
876 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason, 876 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
877 &uv_nmi_slave_continue); 877 &uv_nmi_slave_continue);
878 if (ret) { 878 if (ret) {
@@ -880,7 +880,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
880 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT); 880 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
881 } 881 }
882 } else { 882 } else {
883 /* wait for KGDB signal that it's ready for slaves to enter */ 883 /* Wait for KGDB signal that it's ready for slaves to enter */
884 int sig; 884 int sig;
885 885
886 do { 886 do {
@@ -888,7 +888,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
888 sig = atomic_read(&uv_nmi_slave_continue); 888 sig = atomic_read(&uv_nmi_slave_continue);
889 } while (!sig); 889 } while (!sig);
890 890
891 /* call KGDB as slave */ 891 /* Call KGDB as slave */
892 if (sig == SLAVE_CONTINUE) 892 if (sig == SLAVE_CONTINUE)
893 kgdb_nmicallback(cpu, regs); 893 kgdb_nmicallback(cpu, regs);
894 } 894 }
@@ -932,7 +932,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
932 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action)); 932 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
933 } 933 }
934 934
935 /* Pause as all cpus enter the NMI handler */ 935 /* Pause as all CPU's enter the NMI handler */
936 uv_nmi_wait(master); 936 uv_nmi_wait(master);
937 937
938 /* Process actions other than "kdump": */ 938 /* Process actions other than "kdump": */
@@ -972,7 +972,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
972} 972}
973 973
974/* 974/*
975 * NMI handler for pulling in CPUs when perf events are grabbing our NMI 975 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
976 */ 976 */
977static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) 977static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
978{ 978{
@@ -1005,7 +1005,7 @@ void uv_nmi_init(void)
1005 unsigned int value; 1005 unsigned int value;
1006 1006
1007 /* 1007 /*
1008 * Unmask NMI on all cpus 1008 * Unmask NMI on all CPU's
1009 */ 1009 */
1010 value = apic_read(APIC_LVT1) | APIC_DM_NMI; 1010 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
1011 value &= ~APIC_LVT_MASKED; 1011 value &= ~APIC_LVT_MASKED;