aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/semaphore.h
diff options
context:
space:
mode:
authorBecky Bruce <bgill@freescale.com>2005-09-23 12:07:36 -0400
committerPaul Mackerras <paulus@samba.org>2005-09-25 08:38:46 -0400
commitd6a4c847e43c851cc0ddf73087a730227223f989 (patch)
tree4f1991e81f8c87296222145e456a682a41d6be8e /include/asm-powerpc/semaphore.h
parent342e73b3d6fd1321d1eaaa6a935f267a300ceebd (diff)
[PATCH] powerpc: merge semaphore.h
powerpc: Merge semaphore.h Adopted the ppc64 version of semaphore.h. The 32-bit version used smp_wmb(), but recent updates to atomic.h mean this is no longer required. The 64-bit version made use of unlikely(), which has been retained in the combined version. This patch requires the recent atomic.h patch. Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Kumar Gala <kumar.gala@freescale.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/semaphore.h')
-rw-r--r--include/asm-powerpc/semaphore.h98
1 files changed, 98 insertions, 0 deletions
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
new file mode 100644
index 000000000000..fd42fe97158f
--- /dev/null
+++ b/include/asm-powerpc/semaphore.h
@@ -0,0 +1,98 @@
1#ifndef _ASM_POWERPC_SEMAPHORE_H
2#define _ASM_POWERPC_SEMAPHORE_H
3
4/*
5 * Remove spinlock-based RW semaphores; RW semaphore definitions are
6 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
7 * Rework semaphores to use atomic_dec_if_positive.
8 * -- Paul Mackerras (paulus@samba.org)
9 */
10
11#ifdef __KERNEL__
12
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19 /*
20 * Note that any negative value of count is equivalent to 0,
21 * but additionally indicates that some process(es) might be
22 * sleeping on `wait'.
23 */
24 atomic_t count;
25 wait_queue_head_t wait;
26};
27
28#define __SEMAPHORE_INITIALIZER(name, n) \
29{ \
30 .count = ATOMIC_INIT(n), \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __MUTEX_INITIALIZER(name) \
35 __SEMAPHORE_INITIALIZER(name, 1)
36
37#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
38 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
39
40#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
41#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
42
43static inline void sema_init (struct semaphore *sem, int val)
44{
45 atomic_set(&sem->count, val);
46 init_waitqueue_head(&sem->wait);
47}
48
49static inline void init_MUTEX (struct semaphore *sem)
50{
51 sema_init(sem, 1);
52}
53
54static inline void init_MUTEX_LOCKED (struct semaphore *sem)
55{
56 sema_init(sem, 0);
57}
58
59extern void __down(struct semaphore * sem);
60extern int __down_interruptible(struct semaphore * sem);
61extern void __up(struct semaphore * sem);
62
63static inline void down(struct semaphore * sem)
64{
65 might_sleep();
66
67 /*
68 * Try to get the semaphore, take the slow path if we fail.
69 */
70 if (unlikely(atomic_dec_return(&sem->count) < 0))
71 __down(sem);
72}
73
74static inline int down_interruptible(struct semaphore * sem)
75{
76 int ret = 0;
77
78 might_sleep();
79
80 if (unlikely(atomic_dec_return(&sem->count) < 0))
81 ret = __down_interruptible(sem);
82 return ret;
83}
84
85static inline int down_trylock(struct semaphore * sem)
86{
87 return atomic_dec_if_positive(&sem->count) < 0;
88}
89
90static inline void up(struct semaphore * sem)
91{
92 if (unlikely(atomic_inc_return(&sem->count) <= 0))
93 __up(sem);
94}
95
96#endif /* __KERNEL__ */
97
98#endif /* _ASM_POWERPC_SEMAPHORE_H */