diff options
author | Christoph Lameter <cl@linux.com> | 2014-08-17 13:30:41 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-08-26 13:45:50 -0400 |
commit | e16321709c8270f9803bbfdb51e5e02235078c7f (patch) | |
tree | ca4c858f3596fff6492b1fce63d1bbe6a8e08f22 | |
parent | 89cbc76768c2fa4ed95545bf961f3a14ddfeed21 (diff) |
uv: Replace __get_cpu_var
Use __this_cpu_read instead.
Cc: Hedi Berriche <hedi@sgi.com>
Cc: Mike Travis <travis@sgi.com>
Cc: Dimitri Sivanich <sivanich@sgi.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r-- | arch/x86/include/asm/uv/uv_hub.h | 10 | ||||
-rw-r--r-- | arch/x86/platform/uv/uv_nmi.c | 40 |
2 files changed, 25 insertions, 25 deletions
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index bb84cfd5a1a1..a00ad8f2a657 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -601,16 +601,16 @@ struct uv_hub_nmi_s { | |||
601 | 601 | ||
602 | struct uv_cpu_nmi_s { | 602 | struct uv_cpu_nmi_s { |
603 | struct uv_hub_nmi_s *hub; | 603 | struct uv_hub_nmi_s *hub; |
604 | atomic_t state; | 604 | int state; |
605 | atomic_t pinging; | 605 | int pinging; |
606 | int queries; | 606 | int queries; |
607 | int pings; | 607 | int pings; |
608 | }; | 608 | }; |
609 | 609 | ||
610 | DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); | 610 | DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); |
611 | #define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi)) | 611 | |
612 | #define uv_hub_nmi (uv_cpu_nmi.hub) | 612 | #define uv_hub_nmi (uv_cpu_nmi.hub) |
613 | #define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu)) | 613 | #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu)) |
614 | #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) | 614 | #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) |
615 | 615 | ||
616 | /* uv_cpu_nmi_states */ | 616 | /* uv_cpu_nmi_states */ |
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index c89c93320c12..c6b146e67116 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c | |||
@@ -63,8 +63,8 @@ | |||
63 | 63 | ||
64 | static struct uv_hub_nmi_s **uv_hub_nmi_list; | 64 | static struct uv_hub_nmi_s **uv_hub_nmi_list; |
65 | 65 | ||
66 | DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); | 66 | DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); |
67 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi); | 67 | EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi); |
68 | 68 | ||
69 | static unsigned long nmi_mmr; | 69 | static unsigned long nmi_mmr; |
70 | static unsigned long nmi_mmr_clear; | 70 | static unsigned long nmi_mmr_clear; |
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi) | |||
215 | int nmi = 0; | 215 | int nmi = 0; |
216 | 216 | ||
217 | local64_inc(&uv_nmi_count); | 217 | local64_inc(&uv_nmi_count); |
218 | uv_cpu_nmi.queries++; | 218 | this_cpu_inc(uv_cpu_nmi.queries); |
219 | 219 | ||
220 | do { | 220 | do { |
221 | nmi = atomic_read(&hub_nmi->in_nmi); | 221 | nmi = atomic_read(&hub_nmi->in_nmi); |
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void) | |||
293 | int cpu; | 293 | int cpu; |
294 | 294 | ||
295 | for_each_cpu(cpu, uv_nmi_cpu_mask) | 295 | for_each_cpu(cpu, uv_nmi_cpu_mask) |
296 | atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1); | 296 | uv_cpu_nmi_per(cpu).pinging = 1; |
297 | 297 | ||
298 | apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); | 298 | apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); |
299 | } | 299 | } |
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void) | |||
304 | int cpu; | 304 | int cpu; |
305 | 305 | ||
306 | for_each_cpu(cpu, uv_nmi_cpu_mask) { | 306 | for_each_cpu(cpu, uv_nmi_cpu_mask) { |
307 | atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0); | 307 | uv_cpu_nmi_per(cpu).pinging = 0; |
308 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT); | 308 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; |
309 | cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); | 309 | cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); |
310 | } | 310 | } |
311 | } | 311 | } |
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first) | |||
328 | int loop_delay = uv_nmi_loop_delay; | 328 | int loop_delay = uv_nmi_loop_delay; |
329 | 329 | ||
330 | for_each_cpu(j, uv_nmi_cpu_mask) { | 330 | for_each_cpu(j, uv_nmi_cpu_mask) { |
331 | if (atomic_read(&uv_cpu_nmi_per(j).state)) { | 331 | if (uv_cpu_nmi_per(j).state) { |
332 | cpumask_clear_cpu(j, uv_nmi_cpu_mask); | 332 | cpumask_clear_cpu(j, uv_nmi_cpu_mask); |
333 | if (++k >= n) | 333 | if (++k >= n) |
334 | break; | 334 | break; |
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first) | |||
359 | static void uv_nmi_wait(int master) | 359 | static void uv_nmi_wait(int master) |
360 | { | 360 | { |
361 | /* indicate this cpu is in */ | 361 | /* indicate this cpu is in */ |
362 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN); | 362 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); |
363 | 363 | ||
364 | /* if not the first cpu in (the master), then we are a slave cpu */ | 364 | /* if not the first cpu in (the master), then we are a slave cpu */ |
365 | if (!master) | 365 | if (!master) |
@@ -419,7 +419,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) | |||
419 | "UV:%sNMI process trace for CPU %d\n", dots, cpu); | 419 | "UV:%sNMI process trace for CPU %d\n", dots, cpu); |
420 | show_regs(regs); | 420 | show_regs(regs); |
421 | } | 421 | } |
422 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); | 422 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); |
423 | } | 423 | } |
424 | 424 | ||
425 | /* Trigger a slave cpu to dump it's state */ | 425 | /* Trigger a slave cpu to dump it's state */ |
@@ -427,20 +427,20 @@ static void uv_nmi_trigger_dump(int cpu) | |||
427 | { | 427 | { |
428 | int retry = uv_nmi_trigger_delay; | 428 | int retry = uv_nmi_trigger_delay; |
429 | 429 | ||
430 | if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN) | 430 | if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) |
431 | return; | 431 | return; |
432 | 432 | ||
433 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP); | 433 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; |
434 | do { | 434 | do { |
435 | cpu_relax(); | 435 | cpu_relax(); |
436 | udelay(10); | 436 | udelay(10); |
437 | if (atomic_read(&uv_cpu_nmi_per(cpu).state) | 437 | if (uv_cpu_nmi_per(cpu).state |
438 | != UV_NMI_STATE_DUMP) | 438 | != UV_NMI_STATE_DUMP) |
439 | return; | 439 | return; |
440 | } while (--retry > 0); | 440 | } while (--retry > 0); |
441 | 441 | ||
442 | pr_crit("UV: CPU %d stuck in process dump function\n", cpu); | 442 | pr_crit("UV: CPU %d stuck in process dump function\n", cpu); |
443 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE); | 443 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; |
444 | } | 444 | } |
445 | 445 | ||
446 | /* Wait until all cpus ready to exit */ | 446 | /* Wait until all cpus ready to exit */ |
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) | |||
488 | } else { | 488 | } else { |
489 | while (!atomic_read(&uv_nmi_slave_continue)) | 489 | while (!atomic_read(&uv_nmi_slave_continue)) |
490 | cpu_relax(); | 490 | cpu_relax(); |
491 | while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) | 491 | while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) |
492 | cpu_relax(); | 492 | cpu_relax(); |
493 | uv_nmi_dump_state_cpu(cpu, regs); | 493 | uv_nmi_dump_state_cpu(cpu, regs); |
494 | } | 494 | } |
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | |||
615 | local_irq_save(flags); | 615 | local_irq_save(flags); |
616 | 616 | ||
617 | /* If not a UV System NMI, ignore */ | 617 | /* If not a UV System NMI, ignore */ |
618 | if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { | 618 | if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { |
619 | local_irq_restore(flags); | 619 | local_irq_restore(flags); |
620 | return NMI_DONE; | 620 | return NMI_DONE; |
621 | } | 621 | } |
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | |||
639 | uv_call_kgdb_kdb(cpu, regs, master); | 639 | uv_call_kgdb_kdb(cpu, regs, master); |
640 | 640 | ||
641 | /* Clear per_cpu "in nmi" flag */ | 641 | /* Clear per_cpu "in nmi" flag */ |
642 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT); | 642 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT); |
643 | 643 | ||
644 | /* Clear MMR NMI flag on each hub */ | 644 | /* Clear MMR NMI flag on each hub */ |
645 | uv_clear_nmi(cpu); | 645 | uv_clear_nmi(cpu); |
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) | |||
666 | { | 666 | { |
667 | int ret; | 667 | int ret; |
668 | 668 | ||
669 | uv_cpu_nmi.queries++; | 669 | this_cpu_inc(uv_cpu_nmi.queries); |
670 | if (!atomic_read(&uv_cpu_nmi.pinging)) { | 670 | if (!this_cpu_read(uv_cpu_nmi.pinging)) { |
671 | local64_inc(&uv_nmi_ping_misses); | 671 | local64_inc(&uv_nmi_ping_misses); |
672 | return NMI_DONE; | 672 | return NMI_DONE; |
673 | } | 673 | } |
674 | 674 | ||
675 | uv_cpu_nmi.pings++; | 675 | this_cpu_inc(uv_cpu_nmi.pings); |
676 | local64_inc(&uv_nmi_ping_count); | 676 | local64_inc(&uv_nmi_ping_count); |
677 | ret = uv_handle_nmi(reason, regs); | 677 | ret = uv_handle_nmi(reason, regs); |
678 | atomic_set(&uv_cpu_nmi.pinging, 0); | 678 | this_cpu_write(uv_cpu_nmi.pinging, 0); |
679 | return ret; | 679 | return ret; |
680 | } | 680 | } |
681 | 681 | ||