aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh64/semaphore-helper.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh64/semaphore-helper.h')
-rw-r--r--include/asm-sh64/semaphore-helper.h101
1 files changed, 101 insertions, 0 deletions
diff --git a/include/asm-sh64/semaphore-helper.h b/include/asm-sh64/semaphore-helper.h
new file mode 100644
index 000000000000..fcfafe263e86
--- /dev/null
+++ b/include/asm-sh64/semaphore-helper.h
@@ -0,0 +1,101 @@
1#ifndef __ASM_SH64_SEMAPHORE_HELPER_H
2#define __ASM_SH64_SEMAPHORE_HELPER_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/semaphore-helper.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14#include <asm/errno.h>
15
16/*
17 * SMP- and interrupt-safe semaphores helper functions.
18 *
19 * (C) Copyright 1996 Linus Torvalds
20 * (C) Copyright 1999 Andrea Arcangeli
21 */
22
23/*
24 * These two _must_ execute atomically wrt each other.
25 *
26 * This is trivially done with load_locked/store_cond,
27 * which we have. Let the rest of the losers suck eggs.
28 */
29static __inline__ void wake_one_more(struct semaphore * sem)
30{
31 atomic_inc((atomic_t *)&sem->sleepers);
32}
33
34static __inline__ int waking_non_zero(struct semaphore *sem)
35{
36 unsigned long flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
43 }
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
46}
47
48/*
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
53 *
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 */
58static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
59 struct task_struct *tsk)
60{
61 unsigned long flags;
62 int ret = 0;
63
64 spin_lock_irqsave(&semaphore_wake_lock, flags);
65 if (sem->sleepers > 0) {
66 sem->sleepers--;
67 ret = 1;
68 } else if (signal_pending(tsk)) {
69 atomic_inc(&sem->count);
70 ret = -EINTR;
71 }
72 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
73 return ret;
74}
75
76/*
77 * waking_non_zero_trylock:
78 * 1 failed to lock
79 * 0 got the lock
80 *
81 * We must undo the sem->count down_trylock() increment while we are
82 * protected by the spinlock in order to make atomic this atomic_inc() with the
83 * atomic_read() in wake_one_more(), otherwise we can race. -arca
84 */
85static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
86{
87 unsigned long flags;
88 int ret = 1;
89
90 spin_lock_irqsave(&semaphore_wake_lock, flags);
91 if (sem->sleepers <= 0)
92 atomic_inc(&sem->count);
93 else {
94 sem->sleepers--;
95 ret = 0;
96 }
97 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
98 return ret;
99}
100
101#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */