aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>2014-04-07 12:25:23 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-20 02:58:42 -0400
commit6c8cd5bbda7e6be166cf2e2dd4be5890193e17ac (patch)
tree0245d1a206b04c2cd2b5b4914dfb696205673861
parent5b3f683e694a835f5dfdab06102be1a50604c3b7 (diff)
s390/spinlock: optimize spinlock code sequence
Use lowcore constant to improve the code generated for spinlocks. [ Martin Schwidefsky: patch breakdown and code beautification ] Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/spinlock.h15
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/lib/spinlock.c4
5 files changed, 21 insertions, 10 deletions
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index bbf8141408cd..3b476eb92f20 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -139,7 +139,7 @@ struct _lowcore {
139 __u32 percpu_offset; /* 0x02f0 */ 139 __u32 percpu_offset; /* 0x02f0 */
140 __u32 machine_flags; /* 0x02f4 */ 140 __u32 machine_flags; /* 0x02f4 */
141 __u32 ftrace_func; /* 0x02f8 */ 141 __u32 ftrace_func; /* 0x02f8 */
142 __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ 142 __u32 spinlock_lockval; /* 0x02fc */
143 143
144 /* Interrupt response block */ 144 /* Interrupt response block */
145 __u8 irb[64]; /* 0x0300 */ 145 __u8 irb[64]; /* 0x0300 */
@@ -285,7 +285,8 @@ struct _lowcore {
285 __u64 machine_flags; /* 0x0388 */ 285 __u64 machine_flags; /* 0x0388 */
286 __u64 ftrace_func; /* 0x0390 */ 286 __u64 ftrace_func; /* 0x0390 */
287 __u64 gmap; /* 0x0398 */ 287 __u64 gmap; /* 0x0398 */
288 __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ 288 __u32 spinlock_lockval; /* 0x03a0 */
289 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
289 290
290 /* Interrupt response block. */ 291 /* Interrupt response block. */
291 __u8 irb[64]; /* 0x0400 */ 292 __u8 irb[64]; /* 0x0400 */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index b60212a02d08..5a0b2882ad48 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -11,6 +11,8 @@
11 11
12#include <linux/smp.h> 12#include <linux/smp.h>
13 13
14#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15
14extern int spin_retry; 16extern int spin_retry;
15 17
16static inline int 18static inline int
@@ -40,6 +42,11 @@ int arch_spin_trylock_retry(arch_spinlock_t *);
40void arch_spin_relax(arch_spinlock_t *); 42void arch_spin_relax(arch_spinlock_t *);
41void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
42 44
45static inline u32 arch_spin_lockval(int cpu)
46{
47 return ~cpu;
48}
49
43static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 50static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
44{ 51{
45 return lock.lock == 0; 52 return lock.lock == 0;
@@ -52,16 +59,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
52 59
53static inline int arch_spin_trylock_once(arch_spinlock_t *lp) 60static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
54{ 61{
55 unsigned int new = ~smp_processor_id(); 62 return _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL);
56
57 return _raw_compare_and_swap(&lp->lock, 0, new);
58} 63}
59 64
60static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) 65static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
61{ 66{
62 unsigned int old = ~smp_processor_id(); 67 return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
63
64 return _raw_compare_and_swap(&lp->lock, old, 0);
65} 68}
66 69
67static inline void arch_spin_lock(arch_spinlock_t *lp) 70static inline void arch_spin_lock(arch_spinlock_t *lp)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1f5536c2fd02..7c5b05fa2194 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -373,6 +373,10 @@ static void __init setup_lowcore(void)
373 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); 373 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
374 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); 374 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
375 375
376#ifdef CONFIG_SMP
377 lc->spinlock_lockval = arch_spin_lockval(0);
378#endif
379
376 set_prefix((u32)(unsigned long) lc); 380 set_prefix((u32)(unsigned long) lc);
377 lowcore_ptr[0] = lc; 381 lowcore_ptr[0] = lc;
378} 382}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 86e65ec3422b..1f0b474041c4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -170,6 +170,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
170 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE 170 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
171 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 171 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
172 lc->cpu_nr = cpu; 172 lc->cpu_nr = cpu;
173 lc->spinlock_lockval = arch_spin_lockval(cpu);
173#ifndef CONFIG_64BIT 174#ifndef CONFIG_64BIT
174 if (MACHINE_HAS_IEEE) { 175 if (MACHINE_HAS_IEEE) {
175 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); 176 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
@@ -226,6 +227,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
226 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); 227 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
227 atomic_inc(&init_mm.context.attach_count); 228 atomic_inc(&init_mm.context.attach_count);
228 lc->cpu_nr = cpu; 229 lc->cpu_nr = cpu;
230 lc->spinlock_lockval = arch_spin_lockval(cpu);
229 lc->percpu_offset = __per_cpu_offset[cpu]; 231 lc->percpu_offset = __per_cpu_offset[cpu];
230 lc->kernel_asce = S390_lowcore.kernel_asce; 232 lc->kernel_asce = S390_lowcore.kernel_asce;
231 lc->machine_flags = S390_lowcore.machine_flags; 233 lc->machine_flags = S390_lowcore.machine_flags;
@@ -809,6 +811,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
809void __init smp_setup_processor_id(void) 811void __init smp_setup_processor_id(void)
810{ 812{
811 S390_lowcore.cpu_nr = 0; 813 S390_lowcore.cpu_nr = 0;
814 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
812} 815}
813 816
814/* 817/*
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 4a3b33b2dbb9..3ca9de4d9cb9 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -27,7 +27,7 @@ __setup("spin_retry=", spin_retry_setup);
27void arch_spin_lock_wait(arch_spinlock_t *lp) 27void arch_spin_lock_wait(arch_spinlock_t *lp)
28{ 28{
29 int count = spin_retry; 29 int count = spin_retry;
30 unsigned int cpu = ~smp_processor_id(); 30 unsigned int cpu = SPINLOCK_LOCKVAL;
31 unsigned int owner; 31 unsigned int owner;
32 32
33 while (1) { 33 while (1) {
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
54void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) 54void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
55{ 55{
56 int count = spin_retry; 56 int count = spin_retry;
57 unsigned int cpu = ~smp_processor_id(); 57 unsigned int cpu = SPINLOCK_LOCKVAL;
58 unsigned int owner; 58 unsigned int owner;
59 59
60 local_irq_restore(flags); 60 local_irq_restore(flags);