aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-07 21:55:58 -0500
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:34 -0400
commit64ac24e738823161693bf791f87adc802cf529ff (patch)
tree19c0b0cf314d4394ca580c05b86cdf874ce0a167 /include/asm-arm
parente48b3deee475134585eed03e7afebe4bf9e0dba9 (diff)
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/semaphore-helper.h84
-rw-r--r--include/asm-arm/semaphore.h99
2 files changed, 1 insertions, 182 deletions
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h
deleted file mode 100644
index 1d7f1987edb9..000000000000
--- a/include/asm-arm/semaphore-helper.h
+++ /dev/null
@@ -1,84 +0,0 @@
1#ifndef ASMARM_SEMAPHORE_HELPER_H
2#define ASMARM_SEMAPHORE_HELPER_H
3
4/*
5 * These two _must_ execute atomically wrt each other.
6 */
7static inline void wake_one_more(struct semaphore * sem)
8{
9 unsigned long flags;
10
11 spin_lock_irqsave(&semaphore_wake_lock, flags);
12 if (atomic_read(&sem->count) <= 0)
13 sem->waking++;
14 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
15}
16
17static inline int waking_non_zero(struct semaphore *sem)
18{
19 unsigned long flags;
20 int ret = 0;
21
22 spin_lock_irqsave(&semaphore_wake_lock, flags);
23 if (sem->waking > 0) {
24 sem->waking--;
25 ret = 1;
26 }
27 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
28 return ret;
29}
30
31/*
32 * waking non zero interruptible
33 * 1 got the lock
34 * 0 go to sleep
35 * -EINTR interrupted
36 *
37 * We must undo the sem->count down_interruptible() increment while we are
38 * protected by the spinlock in order to make this atomic_inc() with the
39 * atomic_read() in wake_one_more(), otherwise we can race. -arca
40 */
41static inline int waking_non_zero_interruptible(struct semaphore *sem,
42 struct task_struct *tsk)
43{
44 unsigned long flags;
45 int ret = 0;
46
47 spin_lock_irqsave(&semaphore_wake_lock, flags);
48 if (sem->waking > 0) {
49 sem->waking--;
50 ret = 1;
51 } else if (signal_pending(tsk)) {
52 atomic_inc(&sem->count);
53 ret = -EINTR;
54 }
55 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
56 return ret;
57}
58
59/*
60 * waking_non_zero_try_lock:
61 * 1 failed to lock
62 * 0 got the lock
63 *
64 * We must undo the sem->count down_interruptible() increment while we are
65 * protected by the spinlock in order to make this atomic_inc() with the
66 * atomic_read() in wake_one_more(), otherwise we can race. -arca
67 */
68static inline int waking_non_zero_trylock(struct semaphore *sem)
69{
70 unsigned long flags;
71 int ret = 1;
72
73 spin_lock_irqsave(&semaphore_wake_lock, flags);
74 if (sem->waking <= 0)
75 atomic_inc(&sem->count);
76 else {
77 sem->waking--;
78 ret = 0;
79 }
80 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
81 return ret;
82}
83
84#endif
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 1c8b441f89e3..d9b2034ed1d2 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -1,98 +1 @@
1/* #include <linux/semaphore.h>
2 * linux/include/asm-arm/semaphore.h
3 */
4#ifndef __ASM_ARM_SEMAPHORE_H
5#define __ASM_ARM_SEMAPHORE_H
6
7#include <linux/linkage.h>
8#include <linux/spinlock.h>
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12#include <asm/atomic.h>
13#include <asm/locks.h>
14
15struct semaphore {
16 atomic_t count;
17 int sleepers;
18 wait_queue_head_t wait;
19};
20
21#define __SEMAPHORE_INIT(name, cnt) \
22{ \
23 .count = ATOMIC_INIT(cnt), \
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
25}
26
27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
28 struct semaphore name = __SEMAPHORE_INIT(name,count)
29
30#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
31
32static inline void sema_init(struct semaphore *sem, int val)
33{
34 atomic_set(&sem->count, val);
35 sem->sleepers = 0;
36 init_waitqueue_head(&sem->wait);
37}
38
39static inline void init_MUTEX(struct semaphore *sem)
40{
41 sema_init(sem, 1);
42}
43
44static inline void init_MUTEX_LOCKED(struct semaphore *sem)
45{
46 sema_init(sem, 0);
47}
48
49/*
50 * special register calling convention
51 */
52asmlinkage void __down_failed(void);
53asmlinkage int __down_interruptible_failed(void);
54asmlinkage int __down_trylock_failed(void);
55asmlinkage void __up_wakeup(void);
56
57extern void __down(struct semaphore * sem);
58extern int __down_interruptible(struct semaphore * sem);
59extern int __down_trylock(struct semaphore * sem);
60extern void __up(struct semaphore * sem);
61
62/*
63 * This is ugly, but we want the default case to fall through.
64 * "__down" is the actual routine that waits...
65 */
66static inline void down(struct semaphore * sem)
67{
68 might_sleep();
69 __down_op(sem, __down_failed);
70}
71
72/*
73 * This is ugly, but we want the default case to fall through.
74 * "__down_interruptible" is the actual routine that waits...
75 */
76static inline int down_interruptible (struct semaphore * sem)
77{
78 might_sleep();
79 return __down_op_ret(sem, __down_interruptible_failed);
80}
81
82static inline int down_trylock(struct semaphore *sem)
83{
84 return __down_op_ret(sem, __down_trylock_failed);
85}
86
87/*
88 * Note! This is subtle. We jump to wake people up only if
89 * the semaphore was negative (== somebody was waiting on it).
90 * The default case (no contention) will result in NO
91 * jumps for both down() and up().
92 */
93static inline void up(struct semaphore * sem)
94{
95 __up_op(sem, __up_wakeup);
96}
97
98#endif