diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-11 21:36:26 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-11 21:36:26 -0500 |
| commit | eedb3d3304b59c64c811522f4ebaaf83124deeac (patch) | |
| tree | 00ede75849525bdc788fc106a1951616bc43c9e0 | |
| parent | 9d050966e2eb37a643ac15904b6a8fda7fcfabe9 (diff) | |
| parent | eadac03e898617521f327faf265932b73ecc3e0f (diff) | |
Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo:
"Nothing interesting. A patch to convert the remaining __get_cpu_var()
users, another to fix non-critical off-by-one in an assertion and a
cosmetic conversion to lockless_dereference() in percpu-ref.
The back-merge from mainline is to receive lockless_dereference()"
* 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
percpu: Replace smp_read_barrier_depends() with lockless_dereference()
percpu: Convert remaining __get_cpu_var uses in 3.18-rcX
percpu: off by one in BUG_ON()
| -rw-r--r-- | arch/arm64/kernel/psci.c | 4 | ||||
| -rw-r--r-- | include/linux/percpu-refcount.h | 4 | ||||
| -rw-r--r-- | kernel/irq_work.c | 4 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
| -rw-r--r-- | mm/percpu.c | 2 |
5 files changed, 7 insertions, 9 deletions
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 663da771580a..3425f311c49e 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
| @@ -511,7 +511,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu) | |||
| 511 | 511 | ||
| 512 | static int psci_suspend_finisher(unsigned long index) | 512 | static int psci_suspend_finisher(unsigned long index) |
| 513 | { | 513 | { |
| 514 | struct psci_power_state *state = __get_cpu_var(psci_power_state); | 514 | struct psci_power_state *state = __this_cpu_read(psci_power_state); |
| 515 | 515 | ||
| 516 | return psci_ops.cpu_suspend(state[index - 1], | 516 | return psci_ops.cpu_suspend(state[index - 1], |
| 517 | virt_to_phys(cpu_resume)); | 517 | virt_to_phys(cpu_resume)); |
| @@ -520,7 +520,7 @@ static int psci_suspend_finisher(unsigned long index) | |||
| 520 | static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) | 520 | static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) |
| 521 | { | 521 | { |
| 522 | int ret; | 522 | int ret; |
| 523 | struct psci_power_state *state = __get_cpu_var(psci_power_state); | 523 | struct psci_power_state *state = __this_cpu_read(psci_power_state); |
| 524 | /* | 524 | /* |
| 525 | * idle state index 0 corresponds to wfi, should never be called | 525 | * idle state index 0 corresponds to wfi, should never be called |
| 526 | * from the cpu_suspend operations | 526 | * from the cpu_suspend operations |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 530b249f7ea4..b4337646388b 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
| @@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
| 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, | 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, |
| 129 | unsigned long __percpu **percpu_countp) | 129 | unsigned long __percpu **percpu_countp) |
| 130 | { | 130 | { |
| 131 | unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); | ||
| 132 | |||
| 133 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 131 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
| 134 | smp_read_barrier_depends(); | 132 | unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); |
| 135 | 133 | ||
| 136 | /* | 134 | /* |
| 137 | * Theoretically, the following could test just ATOMIC; however, | 135 | * Theoretically, the following could test just ATOMIC; however, |
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 3ab9048483fa..cbf9fb899d92 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
| @@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(irq_work_run); | |||
| 175 | 175 | ||
| 176 | void irq_work_tick(void) | 176 | void irq_work_tick(void) |
| 177 | { | 177 | { |
| 178 | struct llist_head *raised = &__get_cpu_var(raised_list); | 178 | struct llist_head *raised = this_cpu_ptr(&raised_list); |
| 179 | 179 | ||
| 180 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) | 180 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) |
| 181 | irq_work_run_list(raised); | 181 | irq_work_run_list(raised); |
| 182 | irq_work_run_list(&__get_cpu_var(lazy_list)); | 182 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | /* | 185 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1f4356037a7d..4d54b7540585 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -235,7 +235,7 @@ void tick_nohz_full_kick(void) | |||
| 235 | if (!tick_nohz_full_cpu(smp_processor_id())) | 235 | if (!tick_nohz_full_cpu(smp_processor_id())) |
| 236 | return; | 236 | return; |
| 237 | 237 | ||
| 238 | irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); | 238 | irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | /* | 241 | /* |
diff --git a/mm/percpu.c b/mm/percpu.c index 014bab65e0ff..d39e2f4e335c 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -1591,7 +1591,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
| 1591 | if (cpu == NR_CPUS) | 1591 | if (cpu == NR_CPUS) |
| 1592 | continue; | 1592 | continue; |
| 1593 | 1593 | ||
| 1594 | PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); | 1594 | PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); |
| 1595 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); | 1595 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); |
| 1596 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); | 1596 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); |
| 1597 | 1597 | ||
