aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
commit0429fbc0bdc297d64188483ba029a23773ae07b0 (patch)
tree67de46978c90f37540dd6ded1db20eb53a569030 /arch/x86/platform
parent6929c358972facf2999f8768815c40dd88514fc2 (diff)
parent513d1a2884a49654f368b5fa25ef186e976bdada (diff)
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/uv/uv_nmi.c40
-rw-r--r--arch/x86/platform/uv/uv_time.c2
2 files changed, 21 insertions, 21 deletions
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index c89c93320c12..c6b146e67116 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -63,8 +63,8 @@
63 63
64static struct uv_hub_nmi_s **uv_hub_nmi_list; 64static struct uv_hub_nmi_s **uv_hub_nmi_list;
65 65
66DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); 66DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
67EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi); 67EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
68 68
69static unsigned long nmi_mmr; 69static unsigned long nmi_mmr;
70static unsigned long nmi_mmr_clear; 70static unsigned long nmi_mmr_clear;
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
215 int nmi = 0; 215 int nmi = 0;
216 216
217 local64_inc(&uv_nmi_count); 217 local64_inc(&uv_nmi_count);
218 uv_cpu_nmi.queries++; 218 this_cpu_inc(uv_cpu_nmi.queries);
219 219
220 do { 220 do {
221 nmi = atomic_read(&hub_nmi->in_nmi); 221 nmi = atomic_read(&hub_nmi->in_nmi);
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void)
293 int cpu; 293 int cpu;
294 294
295 for_each_cpu(cpu, uv_nmi_cpu_mask) 295 for_each_cpu(cpu, uv_nmi_cpu_mask)
296 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1); 296 uv_cpu_nmi_per(cpu).pinging = 1;
297 297
298 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 298 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
299} 299}
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void)
304 int cpu; 304 int cpu;
305 305
306 for_each_cpu(cpu, uv_nmi_cpu_mask) { 306 for_each_cpu(cpu, uv_nmi_cpu_mask) {
307 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0); 307 uv_cpu_nmi_per(cpu).pinging = 0;
308 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT); 308 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
309 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); 309 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
310 } 310 }
311} 311}
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first)
328 int loop_delay = uv_nmi_loop_delay; 328 int loop_delay = uv_nmi_loop_delay;
329 329
330 for_each_cpu(j, uv_nmi_cpu_mask) { 330 for_each_cpu(j, uv_nmi_cpu_mask) {
331 if (atomic_read(&uv_cpu_nmi_per(j).state)) { 331 if (uv_cpu_nmi_per(j).state) {
332 cpumask_clear_cpu(j, uv_nmi_cpu_mask); 332 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
333 if (++k >= n) 333 if (++k >= n)
334 break; 334 break;
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first)
359static void uv_nmi_wait(int master) 359static void uv_nmi_wait(int master)
360{ 360{
361 /* indicate this cpu is in */ 361 /* indicate this cpu is in */
362 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN); 362 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
363 363
364 /* if not the first cpu in (the master), then we are a slave cpu */ 364 /* if not the first cpu in (the master), then we are a slave cpu */
365 if (!master) 365 if (!master)
@@ -419,7 +419,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
419 "UV:%sNMI process trace for CPU %d\n", dots, cpu); 419 "UV:%sNMI process trace for CPU %d\n", dots, cpu);
420 show_regs(regs); 420 show_regs(regs);
421 } 421 }
422 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); 422 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
423} 423}
424 424
425/* Trigger a slave cpu to dump it's state */ 425/* Trigger a slave cpu to dump it's state */
@@ -427,20 +427,20 @@ static void uv_nmi_trigger_dump(int cpu)
427{ 427{
428 int retry = uv_nmi_trigger_delay; 428 int retry = uv_nmi_trigger_delay;
429 429
430 if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN) 430 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
431 return; 431 return;
432 432
433 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP); 433 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
434 do { 434 do {
435 cpu_relax(); 435 cpu_relax();
436 udelay(10); 436 udelay(10);
437 if (atomic_read(&uv_cpu_nmi_per(cpu).state) 437 if (uv_cpu_nmi_per(cpu).state
438 != UV_NMI_STATE_DUMP) 438 != UV_NMI_STATE_DUMP)
439 return; 439 return;
440 } while (--retry > 0); 440 } while (--retry > 0);
441 441
442 pr_crit("UV: CPU %d stuck in process dump function\n", cpu); 442 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
443 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE); 443 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
444} 444}
445 445
446/* Wait until all cpus ready to exit */ 446/* Wait until all cpus ready to exit */
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
488 } else { 488 } else {
489 while (!atomic_read(&uv_nmi_slave_continue)) 489 while (!atomic_read(&uv_nmi_slave_continue))
490 cpu_relax(); 490 cpu_relax();
491 while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) 491 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
492 cpu_relax(); 492 cpu_relax();
493 uv_nmi_dump_state_cpu(cpu, regs); 493 uv_nmi_dump_state_cpu(cpu, regs);
494 } 494 }
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
615 local_irq_save(flags); 615 local_irq_save(flags);
616 616
617 /* If not a UV System NMI, ignore */ 617 /* If not a UV System NMI, ignore */
618 if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { 618 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
619 local_irq_restore(flags); 619 local_irq_restore(flags);
620 return NMI_DONE; 620 return NMI_DONE;
621 } 621 }
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
639 uv_call_kgdb_kdb(cpu, regs, master); 639 uv_call_kgdb_kdb(cpu, regs, master);
640 640
641 /* Clear per_cpu "in nmi" flag */ 641 /* Clear per_cpu "in nmi" flag */
642 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT); 642 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
643 643
644 /* Clear MMR NMI flag on each hub */ 644 /* Clear MMR NMI flag on each hub */
645 uv_clear_nmi(cpu); 645 uv_clear_nmi(cpu);
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
666{ 666{
667 int ret; 667 int ret;
668 668
669 uv_cpu_nmi.queries++; 669 this_cpu_inc(uv_cpu_nmi.queries);
670 if (!atomic_read(&uv_cpu_nmi.pinging)) { 670 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
671 local64_inc(&uv_nmi_ping_misses); 671 local64_inc(&uv_nmi_ping_misses);
672 return NMI_DONE; 672 return NMI_DONE;
673 } 673 }
674 674
675 uv_cpu_nmi.pings++; 675 this_cpu_inc(uv_cpu_nmi.pings);
676 local64_inc(&uv_nmi_ping_count); 676 local64_inc(&uv_nmi_ping_count);
677 ret = uv_handle_nmi(reason, regs); 677 ret = uv_handle_nmi(reason, regs);
678 atomic_set(&uv_cpu_nmi.pinging, 0); 678 this_cpu_write(uv_cpu_nmi.pinging, 0);
679 return ret; 679 return ret;
680} 680}
681 681
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 5c86786bbfd2..a244237f3cfa 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -365,7 +365,7 @@ __setup("uvrtcevt", uv_enable_evt_rtc);
365 365
366static __init void uv_rtc_register_clockevents(struct work_struct *dummy) 366static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
367{ 367{
368 struct clock_event_device *ced = &__get_cpu_var(cpu_ced); 368 struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
369 369
370 *ced = clock_event_device_uv; 370 *ced = clock_event_device_uv;
371 ced->cpumask = cpumask_of(smp_processor_id()); 371 ced->cpumask = cpumask_of(smp_processor_id());