aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-10-01 02:27:45 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-01 03:39:22 -0400
commit3c1fcfe229e99752c74efb945a4a3f560be04204 (patch)
tree44085d9b599e06a92426141811a6f712beac17aa
parentcdc39363d33506b0e067d41fc91f89d186bdf7f7 (diff)
[PATCH] Directed yield: direct yield of spinlocks for s390.
Use the new diagnose 0x9c in the spinlock implementation for s390. It yields the remaining timeslice of the virtual cpu that tries to acquire a lock to the virtual cpu that is the current holder of the lock. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/s390/kernel/head31.S11
-rw-r--r--arch/s390/kernel/head64.S11
-rw-r--r--arch/s390/lib/spinlock.c62
-rw-r--r--include/asm-s390/setup.h1
-rw-r--r--include/asm-s390/spinlock.h33
-rw-r--r--include/asm-s390/spinlock_types.h6
6 files changed, 88 insertions, 36 deletions
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 1fa9fa1ca740..1b952a3664e2 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -254,6 +254,16 @@ startup_continue:
254 oi 3(%r12),0x80 # set IDTE flag 254 oi 3(%r12),0x80 # set IDTE flag
255.Lchkidte: 255.Lchkidte:
256 256
257#
258# find out if the diag 0x9c is available
259#
260 mvc __LC_PGM_NEW_PSW(8),.Lpcdiag9c-.LPG1(%r13)
261 stap __LC_CPUID+4 # store cpu address
262 lh %r1,__LC_CPUID+4
263 diag %r1,0,0x9c # test diag 0x9c
264 oi 2(%r12),1 # set diag9c flag
265.Lchkdiag9c:
266
257 lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, 267 lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
258 # virtual and never return ... 268 # virtual and never return ...
259 .align 8 269 .align 8
@@ -281,6 +291,7 @@ startup_continue:
281.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp 291.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
282.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg 292.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
283.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte 293.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
294.Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c
284.Lmemsize:.long memory_size 295.Lmemsize:.long memory_size
285.Lmchunk:.long memory_chunk 296.Lmchunk:.long memory_chunk
286.Lmflags:.long machine_flags 297.Lmflags:.long machine_flags
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 48998d50b00a..b30e5897cdf7 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -251,6 +251,17 @@ startup_continue:
2510: 2510:
252 252
253# 253#
254# find out if the diag 0x9c is available
255#
256 la %r1,0f-.LPG1(%r13) # set program check address
257 stg %r1,__LC_PGM_NEW_PSW+8
258 stap __LC_CPUID+4 # store cpu address
259 lh %r1,__LC_CPUID+4
260 diag %r1,0,0x9c # test diag 0x9c
261 oi 6(%r12),1 # set diag9c flag
2620:
263
264#
254# find out if we have the MVCOS instruction 265# find out if we have the MVCOS instruction
255# 266#
256 la %r1,0f-.LPG1(%r13) # set program check address 267 la %r1,0f-.LPG1(%r13) # set program check address
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index b9b7958a226a..8d76403fcf89 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -24,57 +24,76 @@ static int __init spin_retry_setup(char *str)
24} 24}
25__setup("spin_retry=", spin_retry_setup); 25__setup("spin_retry=", spin_retry_setup);
26 26
27static inline void 27static inline void _raw_yield(void)
28_diag44(void)
29{ 28{
30#ifdef CONFIG_64BIT
31 if (MACHINE_HAS_DIAG44) 29 if (MACHINE_HAS_DIAG44)
32#endif
33 asm volatile("diag 0,0,0x44"); 30 asm volatile("diag 0,0,0x44");
34} 31}
35 32
36void 33static inline void _raw_yield_cpu(int cpu)
37_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) 34{
35 if (MACHINE_HAS_DIAG9C)
36 asm volatile("diag %0,0,0x9c"
37 : : "d" (__cpu_logical_map[cpu]));
38 else
39 _raw_yield();
40}
41
42void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
38{ 43{
39 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id();
40 46
41 while (1) { 47 while (1) {
42 if (count-- <= 0) { 48 if (count-- <= 0) {
43 _diag44(); 49 unsigned int owner = lp->owner_cpu;
50 if (owner != 0)
51 _raw_yield_cpu(~owner);
44 count = spin_retry; 52 count = spin_retry;
45 } 53 }
46 if (__raw_spin_is_locked(lp)) 54 if (__raw_spin_is_locked(lp))
47 continue; 55 continue;
48 if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
57 lp->owner_pc = pc;
49 return; 58 return;
59 }
50 } 60 }
51} 61}
52EXPORT_SYMBOL(_raw_spin_lock_wait); 62EXPORT_SYMBOL(_raw_spin_lock_wait);
53 63
54int 64int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
55_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
56{ 65{
57 int count = spin_retry; 66 unsigned int cpu = ~smp_processor_id();
67 int count;
58 68
59 while (count-- > 0) { 69 for (count = spin_retry; count > 0; count--) {
60 if (__raw_spin_is_locked(lp)) 70 if (__raw_spin_is_locked(lp))
61 continue; 71 continue;
62 if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) 72 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
73 lp->owner_pc = pc;
63 return 1; 74 return 1;
75 }
64 } 76 }
65 return 0; 77 return 0;
66} 78}
67EXPORT_SYMBOL(_raw_spin_trylock_retry); 79EXPORT_SYMBOL(_raw_spin_trylock_retry);
68 80
69void 81void _raw_spin_relax(raw_spinlock_t *lock)
70_raw_read_lock_wait(raw_rwlock_t *rw) 82{
83 unsigned int cpu = lock->owner_cpu;
84 if (cpu != 0)
85 _raw_yield_cpu(~cpu);
86}
87EXPORT_SYMBOL(_raw_spin_relax);
88
89void _raw_read_lock_wait(raw_rwlock_t *rw)
71{ 90{
72 unsigned int old; 91 unsigned int old;
73 int count = spin_retry; 92 int count = spin_retry;
74 93
75 while (1) { 94 while (1) {
76 if (count-- <= 0) { 95 if (count-- <= 0) {
77 _diag44(); 96 _raw_yield();
78 count = spin_retry; 97 count = spin_retry;
79 } 98 }
80 if (!__raw_read_can_lock(rw)) 99 if (!__raw_read_can_lock(rw))
@@ -86,8 +105,7 @@ _raw_read_lock_wait(raw_rwlock_t *rw)
86} 105}
87EXPORT_SYMBOL(_raw_read_lock_wait); 106EXPORT_SYMBOL(_raw_read_lock_wait);
88 107
89int 108int _raw_read_trylock_retry(raw_rwlock_t *rw)
90_raw_read_trylock_retry(raw_rwlock_t *rw)
91{ 109{
92 unsigned int old; 110 unsigned int old;
93 int count = spin_retry; 111 int count = spin_retry;
@@ -103,14 +121,13 @@ _raw_read_trylock_retry(raw_rwlock_t *rw)
103} 121}
104EXPORT_SYMBOL(_raw_read_trylock_retry); 122EXPORT_SYMBOL(_raw_read_trylock_retry);
105 123
106void 124void _raw_write_lock_wait(raw_rwlock_t *rw)
107_raw_write_lock_wait(raw_rwlock_t *rw)
108{ 125{
109 int count = spin_retry; 126 int count = spin_retry;
110 127
111 while (1) { 128 while (1) {
112 if (count-- <= 0) { 129 if (count-- <= 0) {
113 _diag44(); 130 _raw_yield();
114 count = spin_retry; 131 count = spin_retry;
115 } 132 }
116 if (!__raw_write_can_lock(rw)) 133 if (!__raw_write_can_lock(rw))
@@ -121,8 +138,7 @@ _raw_write_lock_wait(raw_rwlock_t *rw)
121} 138}
122EXPORT_SYMBOL(_raw_write_lock_wait); 139EXPORT_SYMBOL(_raw_write_lock_wait);
123 140
124int 141int _raw_write_trylock_retry(raw_rwlock_t *rw)
125_raw_write_trylock_retry(raw_rwlock_t *rw)
126{ 142{
127 int count = spin_retry; 143 int count = spin_retry;
128 144
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index f1959732b6fd..5d72eda8a11b 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -39,6 +39,7 @@ extern unsigned long machine_flags;
39#define MACHINE_IS_P390 (machine_flags & 4) 39#define MACHINE_IS_P390 (machine_flags & 4)
40#define MACHINE_HAS_MVPG (machine_flags & 16) 40#define MACHINE_HAS_MVPG (machine_flags & 16)
41#define MACHINE_HAS_IDTE (machine_flags & 128) 41#define MACHINE_HAS_IDTE (machine_flags & 128)
42#define MACHINE_HAS_DIAG9C (machine_flags & 256)
42 43
43#ifndef __s390x__ 44#ifndef __s390x__
44#define MACHINE_HAS_IEEE (machine_flags & 2) 45#define MACHINE_HAS_IEEE (machine_flags & 2)
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 5f00feaf1be6..6b78af16999b 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -13,6 +13,8 @@
13 13
14#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 14#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
15 15
16#include <linux/smp.h>
17
16static inline int 18static inline int
17_raw_compare_and_swap(volatile unsigned int *lock, 19_raw_compare_and_swap(volatile unsigned int *lock,
18 unsigned int old, unsigned int new) 20 unsigned int old, unsigned int new)
@@ -50,34 +52,46 @@ _raw_compare_and_swap(volatile unsigned int *lock,
50 * (the type definitions are in asm/spinlock_types.h) 52 * (the type definitions are in asm/spinlock_types.h)
51 */ 53 */
52 54
53#define __raw_spin_is_locked(x) ((x)->lock != 0) 55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
54#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 56#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
55#define __raw_spin_unlock_wait(lock) \ 57#define __raw_spin_unlock_wait(lock) \
56 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 58 do { while (__raw_spin_is_locked(lock)) \
59 _raw_spin_relax(lock); } while (0)
57 60
58extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc); 61extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc);
59extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc); 62extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc);
63extern void _raw_spin_relax(raw_spinlock_t *lock);
60 64
61static inline void __raw_spin_lock(raw_spinlock_t *lp) 65static inline void __raw_spin_lock(raw_spinlock_t *lp)
62{ 66{
63 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); 67 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
64 68 int old;
65 if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0)) 69
66 _raw_spin_lock_wait(lp, pc); 70 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
71 if (likely(old == 0)) {
72 lp->owner_pc = pc;
73 return;
74 }
75 _raw_spin_lock_wait(lp, pc);
67} 76}
68 77
69static inline int __raw_spin_trylock(raw_spinlock_t *lp) 78static inline int __raw_spin_trylock(raw_spinlock_t *lp)
70{ 79{
71 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); 80 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
81 int old;
72 82
73 if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0)) 83 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
84 if (likely(old == 0)) {
85 lp->owner_pc = pc;
74 return 1; 86 return 1;
87 }
75 return _raw_spin_trylock_retry(lp, pc); 88 return _raw_spin_trylock_retry(lp, pc);
76} 89}
77 90
78static inline void __raw_spin_unlock(raw_spinlock_t *lp) 91static inline void __raw_spin_unlock(raw_spinlock_t *lp)
79{ 92{
80 _raw_compare_and_swap(&lp->lock, lp->lock, 0); 93 lp->owner_pc = 0;
94 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
81} 95}
82 96
83/* 97/*
@@ -154,7 +168,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
154 return _raw_write_trylock_retry(rw); 168 return _raw_write_trylock_retry(rw);
155} 169}
156 170
157#define _raw_spin_relax(lock) cpu_relax()
158#define _raw_read_relax(lock) cpu_relax() 171#define _raw_read_relax(lock) cpu_relax()
159#define _raw_write_relax(lock) cpu_relax() 172#define _raw_write_relax(lock) cpu_relax()
160 173
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h
index f79a2216204f..b7ac13f7aa37 100644
--- a/include/asm-s390/spinlock_types.h
+++ b/include/asm-s390/spinlock_types.h
@@ -6,16 +6,16 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int owner_cpu;
10 volatile unsigned int owner_pc;
10} __attribute__ ((aligned (4))) raw_spinlock_t; 11} __attribute__ ((aligned (4))) raw_spinlock_t;
11 12
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 13#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 14
14typedef struct { 15typedef struct {
15 volatile unsigned int lock; 16 volatile unsigned int lock;
16 volatile unsigned int owner_pc;
17} raw_rwlock_t; 17} raw_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __RAW_RW_LOCK_UNLOCKED { 0 }
20 20
21#endif 21#endif