aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/semaphore_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/semaphore_64.h')
-rw-r--r--include/asm-x86/semaphore_64.h181
1 files changed, 181 insertions, 0 deletions
diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h
new file mode 100644
index 000000000000..1194888536b9
--- /dev/null
+++ b/include/asm-x86/semaphore_64.h
@@ -0,0 +1,181 @@
1#ifndef _X86_64_SEMAPHORE_H
2#define _X86_64_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <asm/rwlock.h>
42#include <linux/wait.h>
43#include <linux/rwsem.h>
44#include <linux/stringify.h>
45
46struct semaphore {
47 atomic_t count;
48 int sleepers;
49 wait_queue_head_t wait;
50};
51
52#define __SEMAPHORE_INITIALIZER(name, n) \
53{ \
54 .count = ATOMIC_INIT(n), \
55 .sleepers = 0, \
56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
57}
58
59#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
61
62#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
63#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
64
65static inline void sema_init (struct semaphore *sem, int val)
66{
67/*
68 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
69 *
70 * i'd rather use the more flexible initialization above, but sadly
71 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
72 */
73 atomic_set(&sem->count, val);
74 sem->sleepers = 0;
75 init_waitqueue_head(&sem->wait);
76}
77
78static inline void init_MUTEX (struct semaphore *sem)
79{
80 sema_init(sem, 1);
81}
82
83static inline void init_MUTEX_LOCKED (struct semaphore *sem)
84{
85 sema_init(sem, 0);
86}
87
88asmlinkage void __down_failed(void /* special register calling convention */);
89asmlinkage int __down_failed_interruptible(void /* params in registers */);
90asmlinkage int __down_failed_trylock(void /* params in registers */);
91asmlinkage void __up_wakeup(void /* special register calling convention */);
92
93asmlinkage void __down(struct semaphore * sem);
94asmlinkage int __down_interruptible(struct semaphore * sem);
95asmlinkage int __down_trylock(struct semaphore * sem);
96asmlinkage void __up(struct semaphore * sem);
97
98/*
99 * This is ugly, but we want the default case to fall through.
100 * "__down_failed" is a special asm handler that calls the C
101 * routine that actually waits. See arch/x86_64/kernel/semaphore.c
102 */
103static inline void down(struct semaphore * sem)
104{
105 might_sleep();
106
107 __asm__ __volatile__(
108 "# atomic down operation\n\t"
109 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
110 "jns 1f\n\t"
111 "call __down_failed\n"
112 "1:"
113 :"=m" (sem->count)
114 :"D" (sem)
115 :"memory");
116}
117
118/*
119 * Interruptible try to acquire a semaphore. If we obtained
120 * it, return zero. If we were interrupted, returns -EINTR
121 */
122static inline int down_interruptible(struct semaphore * sem)
123{
124 int result;
125
126 might_sleep();
127
128 __asm__ __volatile__(
129 "# atomic interruptible down operation\n\t"
130 "xorl %0,%0\n\t"
131 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
132 "jns 2f\n\t"
133 "call __down_failed_interruptible\n"
134 "2:\n"
135 :"=&a" (result), "=m" (sem->count)
136 :"D" (sem)
137 :"memory");
138 return result;
139}
140
141/*
142 * Non-blockingly attempt to down() a semaphore.
143 * Returns zero if we acquired it
144 */
145static inline int down_trylock(struct semaphore * sem)
146{
147 int result;
148
149 __asm__ __volatile__(
150 "# atomic interruptible down operation\n\t"
151 "xorl %0,%0\n\t"
152 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
153 "jns 2f\n\t"
154 "call __down_failed_trylock\n\t"
155 "2:\n"
156 :"=&a" (result), "=m" (sem->count)
157 :"D" (sem)
158 :"memory","cc");
159 return result;
160}
161
162/*
163 * Note! This is subtle. We jump to wake people up only if
164 * the semaphore was negative (== somebody was waiting on it).
165 * The default case (no contention) will result in NO
166 * jumps for both down() and up().
167 */
168static inline void up(struct semaphore * sem)
169{
170 __asm__ __volatile__(
171 "# atomic up operation\n\t"
172 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
173 "jg 1f\n\t"
174 "call __up_wakeup\n"
175 "1:"
176 :"=m" (sem->count)
177 :"D" (sem)
178 :"memory");
179}
180#endif /* __KERNEL__ */
181#endif