aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/semaphore.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-x86_64/semaphore.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-x86_64/semaphore.h')
-rw-r--r--include/asm-x86_64/semaphore.h196
1 files changed, 196 insertions, 0 deletions
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
new file mode 100644
index 000000000000..f325e39bf3b9
--- /dev/null
+++ b/include/asm-x86_64/semaphore.h
@@ -0,0 +1,196 @@
1#ifndef _X86_64_SEMAPHORE_H
2#define _X86_64_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <asm/rwlock.h>
42#include <linux/wait.h>
43#include <linux/rwsem.h>
44#include <linux/stringify.h>
45
46struct semaphore {
47 atomic_t count;
48 int sleepers;
49 wait_queue_head_t wait;
50};
51
52#define __SEMAPHORE_INITIALIZER(name, n) \
53{ \
54 .count = ATOMIC_INIT(n), \
55 .sleepers = 0, \
56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
57}
58
59#define __MUTEX_INITIALIZER(name) \
60 __SEMAPHORE_INITIALIZER(name,1)
61
62#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
63 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
64
65#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
66#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
67
68static inline void sema_init (struct semaphore *sem, int val)
69{
70/*
71 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
72 *
73 * i'd rather use the more flexible initialization above, but sadly
74 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
75 */
76 atomic_set(&sem->count, val);
77 sem->sleepers = 0;
78 init_waitqueue_head(&sem->wait);
79}
80
81static inline void init_MUTEX (struct semaphore *sem)
82{
83 sema_init(sem, 1);
84}
85
86static inline void init_MUTEX_LOCKED (struct semaphore *sem)
87{
88 sema_init(sem, 0);
89}
90
91asmlinkage void __down_failed(void /* special register calling convention */);
92asmlinkage int __down_failed_interruptible(void /* params in registers */);
93asmlinkage int __down_failed_trylock(void /* params in registers */);
94asmlinkage void __up_wakeup(void /* special register calling convention */);
95
96asmlinkage void __down(struct semaphore * sem);
97asmlinkage int __down_interruptible(struct semaphore * sem);
98asmlinkage int __down_trylock(struct semaphore * sem);
99asmlinkage void __up(struct semaphore * sem);
100
101/*
102 * This is ugly, but we want the default case to fall through.
103 * "__down_failed" is a special asm handler that calls the C
104 * routine that actually waits. See arch/x86_64/kernel/semaphore.c
105 */
106static inline void down(struct semaphore * sem)
107{
108 might_sleep();
109
110 __asm__ __volatile__(
111 "# atomic down operation\n\t"
112 LOCK "decl %0\n\t" /* --sem->count */
113 "js 2f\n"
114 "1:\n"
115 LOCK_SECTION_START("")
116 "2:\tcall __down_failed\n\t"
117 "jmp 1b\n"
118 LOCK_SECTION_END
119 :"=m" (sem->count)
120 :"D" (sem)
121 :"memory");
122}
123
124/*
125 * Interruptible try to acquire a semaphore. If we obtained
126 * it, return zero. If we were interrupted, returns -EINTR
127 */
128static inline int down_interruptible(struct semaphore * sem)
129{
130 int result;
131
132 might_sleep();
133
134 __asm__ __volatile__(
135 "# atomic interruptible down operation\n\t"
136 LOCK "decl %1\n\t" /* --sem->count */
137 "js 2f\n\t"
138 "xorl %0,%0\n"
139 "1:\n"
140 LOCK_SECTION_START("")
141 "2:\tcall __down_failed_interruptible\n\t"
142 "jmp 1b\n"
143 LOCK_SECTION_END
144 :"=a" (result), "=m" (sem->count)
145 :"D" (sem)
146 :"memory");
147 return result;
148}
149
150/*
151 * Non-blockingly attempt to down() a semaphore.
152 * Returns zero if we acquired it
153 */
154static inline int down_trylock(struct semaphore * sem)
155{
156 int result;
157
158 __asm__ __volatile__(
159 "# atomic interruptible down operation\n\t"
160 LOCK "decl %1\n\t" /* --sem->count */
161 "js 2f\n\t"
162 "xorl %0,%0\n"
163 "1:\n"
164 LOCK_SECTION_START("")
165 "2:\tcall __down_failed_trylock\n\t"
166 "jmp 1b\n"
167 LOCK_SECTION_END
168 :"=a" (result), "=m" (sem->count)
169 :"D" (sem)
170 :"memory","cc");
171 return result;
172}
173
174/*
175 * Note! This is subtle. We jump to wake people up only if
176 * the semaphore was negative (== somebody was waiting on it).
177 * The default case (no contention) will result in NO
178 * jumps for both down() and up().
179 */
180static inline void up(struct semaphore * sem)
181{
182 __asm__ __volatile__(
183 "# atomic up operation\n\t"
184 LOCK "incl %0\n\t" /* ++sem->count */
185 "jle 2f\n"
186 "1:\n"
187 LOCK_SECTION_START("")
188 "2:\tcall __up_wakeup\n\t"
189 "jmp 1b\n"
190 LOCK_SECTION_END
191 :"=m" (sem->count)
192 :"D" (sem)
193 :"memory");
194}
195#endif /* __KERNEL__ */
196#endif