aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/semaphore.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/semaphore.h')
-rw-r--r--include/asm-i386/semaphore.h194
1 files changed, 194 insertions, 0 deletions
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
new file mode 100644
index 000000000000..ea563da63e24
--- /dev/null
+++ b/include/asm-i386/semaphore.h
@@ -0,0 +1,194 @@
1#ifndef _I386_SEMAPHORE_H
2#define _I386_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <linux/wait.h>
42#include <linux/rwsem.h>
43
44struct semaphore {
45 atomic_t count;
46 int sleepers;
47 wait_queue_head_t wait;
48};
49
50
51#define __SEMAPHORE_INITIALIZER(name, n) \
52{ \
53 .count = ATOMIC_INIT(n), \
54 .sleepers = 0, \
55 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
56}
57
58#define __MUTEX_INITIALIZER(name) \
59 __SEMAPHORE_INITIALIZER(name,1)
60
61#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
62 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
63
64#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
65#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
66
67static inline void sema_init (struct semaphore *sem, int val)
68{
69/*
70 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
71 *
72 * i'd rather use the more flexible initialization above, but sadly
73 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
74 */
75 atomic_set(&sem->count, val);
76 sem->sleepers = 0;
77 init_waitqueue_head(&sem->wait);
78}
79
80static inline void init_MUTEX (struct semaphore *sem)
81{
82 sema_init(sem, 1);
83}
84
85static inline void init_MUTEX_LOCKED (struct semaphore *sem)
86{
87 sema_init(sem, 0);
88}
89
90fastcall void __down_failed(void /* special register calling convention */);
91fastcall int __down_failed_interruptible(void /* params in registers */);
92fastcall int __down_failed_trylock(void /* params in registers */);
93fastcall void __up_wakeup(void /* special register calling convention */);
94
95/*
96 * This is ugly, but we want the default case to fall through.
97 * "__down_failed" is a special asm handler that calls the C
98 * routine that actually waits. See arch/i386/kernel/semaphore.c
99 */
100static inline void down(struct semaphore * sem)
101{
102 might_sleep();
103 __asm__ __volatile__(
104 "# atomic down operation\n\t"
105 LOCK "decl %0\n\t" /* --sem->count */
106 "js 2f\n"
107 "1:\n"
108 LOCK_SECTION_START("")
109 "2:\tlea %0,%%eax\n\t"
110 "call __down_failed\n\t"
111 "jmp 1b\n"
112 LOCK_SECTION_END
113 :"=m" (sem->count)
114 :
115 :"memory","ax");
116}
117
118/*
119 * Interruptible try to acquire a semaphore. If we obtained
120 * it, return zero. If we were interrupted, returns -EINTR
121 */
122static inline int down_interruptible(struct semaphore * sem)
123{
124 int result;
125
126 might_sleep();
127 __asm__ __volatile__(
128 "# atomic interruptible down operation\n\t"
129 LOCK "decl %1\n\t" /* --sem->count */
130 "js 2f\n\t"
131 "xorl %0,%0\n"
132 "1:\n"
133 LOCK_SECTION_START("")
134 "2:\tlea %1,%%eax\n\t"
135 "call __down_failed_interruptible\n\t"
136 "jmp 1b\n"
137 LOCK_SECTION_END
138 :"=a" (result), "=m" (sem->count)
139 :
140 :"memory");
141 return result;
142}
143
144/*
145 * Non-blockingly attempt to down() a semaphore.
146 * Returns zero if we acquired it
147 */
148static inline int down_trylock(struct semaphore * sem)
149{
150 int result;
151
152 __asm__ __volatile__(
153 "# atomic interruptible down operation\n\t"
154 LOCK "decl %1\n\t" /* --sem->count */
155 "js 2f\n\t"
156 "xorl %0,%0\n"
157 "1:\n"
158 LOCK_SECTION_START("")
159 "2:\tlea %1,%%eax\n\t"
160 "call __down_failed_trylock\n\t"
161 "jmp 1b\n"
162 LOCK_SECTION_END
163 :"=a" (result), "=m" (sem->count)
164 :
165 :"memory");
166 return result;
167}
168
169/*
170 * Note! This is subtle. We jump to wake people up only if
171 * the semaphore was negative (== somebody was waiting on it).
172 * The default case (no contention) will result in NO
173 * jumps for both down() and up().
174 */
175static inline void up(struct semaphore * sem)
176{
177 __asm__ __volatile__(
178 "# atomic up operation\n\t"
179 LOCK "incl %0\n\t" /* ++sem->count */
180 "jle 2f\n"
181 "1:\n"
182 LOCK_SECTION_START("")
183 "2:\tlea %0,%%eax\n\t"
184 "call __up_wakeup\n\t"
185 "jmp 1b\n"
186 LOCK_SECTION_END
187 ".subsection 0\n"
188 :"=m" (sem->count)
189 :
190 :"memory","ax");
191}
192
193#endif
194#endif