aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2005-07-27 14:44:57 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:26:04 -0400
commit951f22d5b1f0eaae35dafc669e3774a0c2084d10 (patch)
tree66c0131b576dadb98026da11d624df453c4c9a7c /include/asm-s390
parent8449d003f323ca7a00eec38905d984ba5ec83a29 (diff)
[PATCH] s390: spin lock retry
Split spin lock and r/w lock implementation into a single try which is done inline and an out of line function that repeatedly tries to get the lock before doing the cpu_relax(). Add a system control to set the number of retries before a cpu is yielded. The reason for the spin lock retry is that the diagnose 0x44 that is used to give up the virtual cpu is quite expensive. For spin locks that are held only for a short period of time the costs of the diagnoses outweights the savings for spin locks that are held for a longer timer. The default retry count is 1000. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-s390')
-rw-r--r--include/asm-s390/lowcore.h4
-rw-r--r--include/asm-s390/processor.h5
-rw-r--r--include/asm-s390/spinlock.h252
3 files changed, 83 insertions, 178 deletions
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 76b5b19c0ae2..afe6a9f9b0ae 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -90,7 +90,6 @@
90#define __LC_SYSTEM_TIMER 0x278 90#define __LC_SYSTEM_TIMER 0x278
91#define __LC_LAST_UPDATE_CLOCK 0x280 91#define __LC_LAST_UPDATE_CLOCK 0x280
92#define __LC_STEAL_CLOCK 0x288 92#define __LC_STEAL_CLOCK 0x288
93#define __LC_DIAG44_OPCODE 0x290
94#define __LC_KERNEL_STACK 0xD40 93#define __LC_KERNEL_STACK 0xD40
95#define __LC_THREAD_INFO 0xD48 94#define __LC_THREAD_INFO 0xD48
96#define __LC_ASYNC_STACK 0xD50 95#define __LC_ASYNC_STACK 0xD50
@@ -286,8 +285,7 @@ struct _lowcore
286 __u64 system_timer; /* 0x278 */ 285 __u64 system_timer; /* 0x278 */
287 __u64 last_update_clock; /* 0x280 */ 286 __u64 last_update_clock; /* 0x280 */
288 __u64 steal_clock; /* 0x288 */ 287 __u64 steal_clock; /* 0x288 */
289 __u32 diag44_opcode; /* 0x290 */ 288 __u8 pad8[0xc00-0x290]; /* 0x290 */
290 __u8 pad8[0xc00-0x294]; /* 0x294 */
291 /* System info area */ 289 /* System info area */
292 __u64 save_area[16]; /* 0xc00 */ 290 __u64 save_area[16]; /* 0xc00 */
293 __u8 pad9[0xd40-0xc80]; /* 0xc80 */ 291 __u8 pad9[0xd40-0xc80]; /* 0xc80 */
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 8bd14de69e35..4ec652ebb3b1 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -203,7 +203,10 @@ unsigned long get_wchan(struct task_struct *p);
203# define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory") 203# define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory")
204#else /* __s390x__ */ 204#else /* __s390x__ */
205# define cpu_relax() \ 205# define cpu_relax() \
206 asm volatile ("ex 0,%0" : : "i" (__LC_DIAG44_OPCODE) : "memory") 206 do { \
207 if (MACHINE_HAS_DIAG44) \
208 asm volatile ("diag 0,0,68" : : : "memory"); \
209 } while (0)
207#endif /* __s390x__ */ 210#endif /* __s390x__ */
208 211
209/* 212/*
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 53cc736b9820..8ff10300f7ee 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -11,21 +11,16 @@
11#ifndef __ASM_SPINLOCK_H 11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H 12#define __ASM_SPINLOCK_H
13 13
14#ifdef __s390x__ 14static inline int
15/* 15_raw_compare_and_swap(volatile unsigned int *lock,
16 * Grmph, take care of %&#! user space programs that include 16 unsigned int old, unsigned int new)
17 * asm/spinlock.h. The diagnose is only available in kernel 17{
18 * context. 18 asm volatile ("cs %0,%3,0(%4)"
19 */ 19 : "=d" (old), "=m" (*lock)
20#ifdef __KERNEL__ 20 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
21#include <asm/lowcore.h> 21 : "cc", "memory" );
22#define __DIAG44_INSN "ex" 22 return old;
23#define __DIAG44_OPERAND __LC_DIAG44_OPCODE 23}
24#else
25#define __DIAG44_INSN "#"
26#define __DIAG44_OPERAND 0
27#endif
28#endif /* __s390x__ */
29 24
30/* 25/*
31 * Simple spin lock operations. There are two variants, one clears IRQ's 26 * Simple spin lock operations. There are two variants, one clears IRQ's
@@ -41,58 +36,35 @@ typedef struct {
41#endif 36#endif
42} __attribute__ ((aligned (4))) spinlock_t; 37} __attribute__ ((aligned (4))) spinlock_t;
43 38
44#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } 39#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
45#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) 40#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0)
46#define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) 41#define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
47#define spin_is_locked(x) ((x)->lock != 0) 42#define spin_is_locked(x) ((x)->lock != 0)
48#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 43#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
49 44
50extern inline void _raw_spin_lock(spinlock_t *lp) 45extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc);
46extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc);
47
48static inline void _raw_spin_lock(spinlock_t *lp)
51{ 49{
52#ifndef __s390x__ 50 unsigned long pc = (unsigned long) __builtin_return_address(0);
53 unsigned int reg1, reg2; 51
54 __asm__ __volatile__(" bras %0,1f\n" 52 if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
55 "0: diag 0,0,68\n" 53 _raw_spin_lock_wait(lp, pc);
56 "1: slr %1,%1\n"
57 " cs %1,%0,0(%3)\n"
58 " jl 0b\n"
59 : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock)
60 : "a" (&lp->lock), "m" (lp->lock)
61 : "cc", "memory" );
62#else /* __s390x__ */
63 unsigned long reg1, reg2;
64 __asm__ __volatile__(" bras %1,1f\n"
65 "0: " __DIAG44_INSN " 0,%4\n"
66 "1: slr %0,%0\n"
67 " cs %0,%1,0(%3)\n"
68 " jl 0b\n"
69 : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock)
70 : "a" (&lp->lock), "i" (__DIAG44_OPERAND),
71 "m" (lp->lock) : "cc", "memory" );
72#endif /* __s390x__ */
73} 54}
74 55
75extern inline int _raw_spin_trylock(spinlock_t *lp) 56static inline int _raw_spin_trylock(spinlock_t *lp)
76{ 57{
77 unsigned long reg; 58 unsigned long pc = (unsigned long) __builtin_return_address(0);
78 unsigned int result; 59
79 60 if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
80 __asm__ __volatile__(" basr %1,0\n" 61 return 1;
81 "0: cs %0,%1,0(%3)" 62 return _raw_spin_trylock_retry(lp, pc);
82 : "=d" (result), "=&d" (reg), "=m" (lp->lock)
83 : "a" (&lp->lock), "m" (lp->lock), "0" (0)
84 : "cc", "memory" );
85 return !result;
86} 63}
87 64
88extern inline void _raw_spin_unlock(spinlock_t *lp) 65static inline void _raw_spin_unlock(spinlock_t *lp)
89{ 66{
90 unsigned int old; 67 _raw_compare_and_swap(&lp->lock, lp->lock, 0);
91
92 __asm__ __volatile__("cs %0,%3,0(%4)"
93 : "=d" (old), "=m" (lp->lock)
94 : "0" (lp->lock), "d" (0), "a" (lp)
95 : "cc", "memory" );
96} 68}
97 69
98/* 70/*
@@ -106,7 +78,7 @@ extern inline void _raw_spin_unlock(spinlock_t *lp)
106 * read-locks. 78 * read-locks.
107 */ 79 */
108typedef struct { 80typedef struct {
109 volatile unsigned long lock; 81 volatile unsigned int lock;
110 volatile unsigned long owner_pc; 82 volatile unsigned long owner_pc;
111#ifdef CONFIG_PREEMPT 83#ifdef CONFIG_PREEMPT
112 unsigned int break_lock; 84 unsigned int break_lock;
@@ -129,123 +101,55 @@ typedef struct {
129 */ 101 */
130#define write_can_lock(x) ((x)->lock == 0) 102#define write_can_lock(x) ((x)->lock == 0)
131 103
132#ifndef __s390x__ 104extern void _raw_read_lock_wait(rwlock_t *lp);
133#define _raw_read_lock(rw) \ 105extern int _raw_read_trylock_retry(rwlock_t *lp);
134 asm volatile(" l 2,0(%1)\n" \ 106extern void _raw_write_lock_wait(rwlock_t *lp);
135 " j 1f\n" \ 107extern int _raw_write_trylock_retry(rwlock_t *lp);
136 "0: diag 0,0,68\n" \ 108
137 "1: la 2,0(2)\n" /* clear high (=write) bit */ \ 109static inline void _raw_read_lock(rwlock_t *rw)
138 " la 3,1(2)\n" /* one more reader */ \ 110{
139 " cs 2,3,0(%1)\n" /* try to write new value */ \ 111 unsigned int old;
140 " jl 0b" \ 112 old = rw->lock & 0x7fffffffU;
141 : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ 113 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
142 "m" ((rw)->lock) : "2", "3", "cc", "memory" ) 114 _raw_read_lock_wait(rw);
143#else /* __s390x__ */ 115}
144#define _raw_read_lock(rw) \ 116
145 asm volatile(" lg 2,0(%1)\n" \ 117static inline void _raw_read_unlock(rwlock_t *rw)
146 " j 1f\n" \ 118{
147 "0: " __DIAG44_INSN " 0,%2\n" \ 119 unsigned int old, cmp;
148 "1: nihh 2,0x7fff\n" /* clear high (=write) bit */ \ 120
149 " la 3,1(2)\n" /* one more reader */ \ 121 old = rw->lock;
150 " csg 2,3,0(%1)\n" /* try to write new value */ \ 122 do {
151 " jl 0b" \ 123 cmp = old;
152 : "=m" ((rw)->lock) \ 124 old = _raw_compare_and_swap(&rw->lock, old, old - 1);
153 : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ 125 } while (cmp != old);
154 "m" ((rw)->lock) : "2", "3", "cc", "memory" ) 126}
155#endif /* __s390x__ */ 127
156 128static inline void _raw_write_lock(rwlock_t *rw)
157#ifndef __s390x__ 129{
158#define _raw_read_unlock(rw) \ 130 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
159 asm volatile(" l 2,0(%1)\n" \ 131 _raw_write_lock_wait(rw);
160 " j 1f\n" \ 132}
161 "0: diag 0,0,68\n" \ 133
162 "1: lr 3,2\n" \ 134static inline void _raw_write_unlock(rwlock_t *rw)
163 " ahi 3,-1\n" /* one less reader */ \ 135{
164 " cs 2,3,0(%1)\n" \ 136 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
165 " jl 0b" \ 137}
166 : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ 138
167 "m" ((rw)->lock) : "2", "3", "cc", "memory" ) 139static inline int _raw_read_trylock(rwlock_t *rw)
168#else /* __s390x__ */ 140{
169#define _raw_read_unlock(rw) \ 141 unsigned int old;
170 asm volatile(" lg 2,0(%1)\n" \ 142 old = rw->lock & 0x7fffffffU;
171 " j 1f\n" \ 143 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
172 "0: " __DIAG44_INSN " 0,%2\n" \ 144 return 1;
173 "1: lgr 3,2\n" \ 145 return _raw_read_trylock_retry(rw);
174 " bctgr 3,0\n" /* one less reader */ \ 146}
175 " csg 2,3,0(%1)\n" \ 147
176 " jl 0b" \ 148static inline int _raw_write_trylock(rwlock_t *rw)
177 : "=m" ((rw)->lock) \
178 : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
179 "m" ((rw)->lock) : "2", "3", "cc", "memory" )
180#endif /* __s390x__ */
181
182#ifndef __s390x__
183#define _raw_write_lock(rw) \
184 asm volatile(" lhi 3,1\n" \
185 " sll 3,31\n" /* new lock value = 0x80000000 */ \
186 " j 1f\n" \
187 "0: diag 0,0,68\n" \
188 "1: slr 2,2\n" /* old lock value must be 0 */ \
189 " cs 2,3,0(%1)\n" \
190 " jl 0b" \
191 : "=m" ((rw)->lock) : "a" (&(rw)->lock), \
192 "m" ((rw)->lock) : "2", "3", "cc", "memory" )
193#else /* __s390x__ */
194#define _raw_write_lock(rw) \
195 asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \
196 " j 1f\n" \
197 "0: " __DIAG44_INSN " 0,%2\n" \
198 "1: slgr 2,2\n" /* old lock value must be 0 */ \
199 " csg 2,3,0(%1)\n" \
200 " jl 0b" \
201 : "=m" ((rw)->lock) \
202 : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
203 "m" ((rw)->lock) : "2", "3", "cc", "memory" )
204#endif /* __s390x__ */
205
206#ifndef __s390x__
207#define _raw_write_unlock(rw) \
208 asm volatile(" slr 3,3\n" /* new lock value = 0 */ \
209 " j 1f\n" \
210 "0: diag 0,0,68\n" \
211 "1: lhi 2,1\n" \
212 " sll 2,31\n" /* old lock value must be 0x80000000 */ \
213 " cs 2,3,0(%1)\n" \
214 " jl 0b" \
215 : "=m" ((rw)->lock) : "a" (&(rw)->lock), \
216 "m" ((rw)->lock) : "2", "3", "cc", "memory" )
217#else /* __s390x__ */
218#define _raw_write_unlock(rw) \
219 asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \
220 " j 1f\n" \
221 "0: " __DIAG44_INSN " 0,%2\n" \
222 "1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\
223 " csg 2,3,0(%1)\n" \
224 " jl 0b" \
225 : "=m" ((rw)->lock) \
226 : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
227 "m" ((rw)->lock) : "2", "3", "cc", "memory" )
228#endif /* __s390x__ */
229
230#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
231
232extern inline int _raw_write_trylock(rwlock_t *rw)
233{ 149{
234 unsigned long result, reg; 150 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
235 151 return 1;
236 __asm__ __volatile__( 152 return _raw_write_trylock_retry(rw);
237#ifndef __s390x__
238 " lhi %1,1\n"
239 " sll %1,31\n"
240 " cs %0,%1,0(%3)"
241#else /* __s390x__ */
242 " llihh %1,0x8000\n"
243 "0: csg %0,%1,0(%3)\n"
244#endif /* __s390x__ */
245 : "=d" (result), "=&d" (reg), "=m" (rw->lock)
246 : "a" (&rw->lock), "m" (rw->lock), "0" (0UL)
247 : "cc", "memory" );
248 return result == 0;
249} 153}
250 154
251#endif /* __ASM_SPINLOCK_H */ 155#endif /* __ASM_SPINLOCK_H */