aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform/uv/uv_nmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/platform/uv/uv_nmi.c')
-rw-r--r--arch/x86/platform/uv/uv_nmi.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index c89c93320c12..c6b146e67116 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -63,8 +63,8 @@
63 63
64static struct uv_hub_nmi_s **uv_hub_nmi_list; 64static struct uv_hub_nmi_s **uv_hub_nmi_list;
65 65
66DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); 66DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
67EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi); 67EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
68 68
69static unsigned long nmi_mmr; 69static unsigned long nmi_mmr;
70static unsigned long nmi_mmr_clear; 70static unsigned long nmi_mmr_clear;
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
215 int nmi = 0; 215 int nmi = 0;
216 216
217 local64_inc(&uv_nmi_count); 217 local64_inc(&uv_nmi_count);
218 uv_cpu_nmi.queries++; 218 this_cpu_inc(uv_cpu_nmi.queries);
219 219
220 do { 220 do {
221 nmi = atomic_read(&hub_nmi->in_nmi); 221 nmi = atomic_read(&hub_nmi->in_nmi);
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void)
293 int cpu; 293 int cpu;
294 294
295 for_each_cpu(cpu, uv_nmi_cpu_mask) 295 for_each_cpu(cpu, uv_nmi_cpu_mask)
296 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1); 296 uv_cpu_nmi_per(cpu).pinging = 1;
297 297
298 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 298 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
299} 299}
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void)
304 int cpu; 304 int cpu;
305 305
306 for_each_cpu(cpu, uv_nmi_cpu_mask) { 306 for_each_cpu(cpu, uv_nmi_cpu_mask) {
307 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0); 307 uv_cpu_nmi_per(cpu).pinging = 0;
308 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT); 308 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
309 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); 309 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
310 } 310 }
311} 311}
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first)
328 int loop_delay = uv_nmi_loop_delay; 328 int loop_delay = uv_nmi_loop_delay;
329 329
330 for_each_cpu(j, uv_nmi_cpu_mask) { 330 for_each_cpu(j, uv_nmi_cpu_mask) {
331 if (atomic_read(&uv_cpu_nmi_per(j).state)) { 331 if (uv_cpu_nmi_per(j).state) {
332 cpumask_clear_cpu(j, uv_nmi_cpu_mask); 332 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
333 if (++k >= n) 333 if (++k >= n)
334 break; 334 break;
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first)
359static void uv_nmi_wait(int master) 359static void uv_nmi_wait(int master)
360{ 360{
361 /* indicate this cpu is in */ 361 /* indicate this cpu is in */
362 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN); 362 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
363 363
364 /* if not the first cpu in (the master), then we are a slave cpu */ 364 /* if not the first cpu in (the master), then we are a slave cpu */
365 if (!master) 365 if (!master)
@@ -419,7 +419,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
419 "UV:%sNMI process trace for CPU %d\n", dots, cpu); 419 "UV:%sNMI process trace for CPU %d\n", dots, cpu);
420 show_regs(regs); 420 show_regs(regs);
421 } 421 }
422 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); 422 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
423} 423}
424 424
425/* Trigger a slave cpu to dump it's state */ 425/* Trigger a slave cpu to dump it's state */
@@ -427,20 +427,20 @@ static void uv_nmi_trigger_dump(int cpu)
427{ 427{
428 int retry = uv_nmi_trigger_delay; 428 int retry = uv_nmi_trigger_delay;
429 429
430 if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN) 430 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
431 return; 431 return;
432 432
433 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP); 433 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
434 do { 434 do {
435 cpu_relax(); 435 cpu_relax();
436 udelay(10); 436 udelay(10);
437 if (atomic_read(&uv_cpu_nmi_per(cpu).state) 437 if (uv_cpu_nmi_per(cpu).state
438 != UV_NMI_STATE_DUMP) 438 != UV_NMI_STATE_DUMP)
439 return; 439 return;
440 } while (--retry > 0); 440 } while (--retry > 0);
441 441
442 pr_crit("UV: CPU %d stuck in process dump function\n", cpu); 442 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
443 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE); 443 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
444} 444}
445 445
446/* Wait until all cpus ready to exit */ 446/* Wait until all cpus ready to exit */
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
488 } else { 488 } else {
489 while (!atomic_read(&uv_nmi_slave_continue)) 489 while (!atomic_read(&uv_nmi_slave_continue))
490 cpu_relax(); 490 cpu_relax();
491 while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) 491 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
492 cpu_relax(); 492 cpu_relax();
493 uv_nmi_dump_state_cpu(cpu, regs); 493 uv_nmi_dump_state_cpu(cpu, regs);
494 } 494 }
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
615 local_irq_save(flags); 615 local_irq_save(flags);
616 616
617 /* If not a UV System NMI, ignore */ 617 /* If not a UV System NMI, ignore */
618 if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { 618 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
619 local_irq_restore(flags); 619 local_irq_restore(flags);
620 return NMI_DONE; 620 return NMI_DONE;
621 } 621 }
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
639 uv_call_kgdb_kdb(cpu, regs, master); 639 uv_call_kgdb_kdb(cpu, regs, master);
640 640
641 /* Clear per_cpu "in nmi" flag */ 641 /* Clear per_cpu "in nmi" flag */
642 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT); 642 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
643 643
644 /* Clear MMR NMI flag on each hub */ 644 /* Clear MMR NMI flag on each hub */
645 uv_clear_nmi(cpu); 645 uv_clear_nmi(cpu);
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
666{ 666{
667 int ret; 667 int ret;
668 668
669 uv_cpu_nmi.queries++; 669 this_cpu_inc(uv_cpu_nmi.queries);
670 if (!atomic_read(&uv_cpu_nmi.pinging)) { 670 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
671 local64_inc(&uv_nmi_ping_misses); 671 local64_inc(&uv_nmi_ping_misses);
672 return NMI_DONE; 672 return NMI_DONE;
673 } 673 }
674 674
675 uv_cpu_nmi.pings++; 675 this_cpu_inc(uv_cpu_nmi.pings);
676 local64_inc(&uv_nmi_ping_count); 676 local64_inc(&uv_nmi_ping_count);
677 ret = uv_handle_nmi(reason, regs); 677 ret = uv_handle_nmi(reason, regs);
678 atomic_set(&uv_cpu_nmi.pinging, 0); 678 this_cpu_write(uv_cpu_nmi.pinging, 0);
679 return ret; 679 return ret;
680} 680}
681 681