aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-m32r/semaphore.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-m32r/semaphore.h')
-rw-r--r--include/asm-m32r/semaphore.h205
1 files changed, 205 insertions, 0 deletions
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
new file mode 100644
index 000000000000..53e3c60f21ec
--- /dev/null
+++ b/include/asm-m32r/semaphore.h
@@ -0,0 +1,205 @@
1#ifndef _ASM_M32R_SEMAPHORE_H
2#define _ASM_M32R_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * Copyright (C) 1996 Linus Torvalds
12 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
13 */
14
15#include <linux/config.h>
16#include <linux/wait.h>
17#include <linux/rwsem.h>
18#include <asm/assembler.h>
19#include <asm/system.h>
20#include <asm/atomic.h>
21
22struct semaphore {
23 atomic_t count;
24 int sleepers;
25 wait_queue_head_t wait;
26};
27
28#define __SEMAPHORE_INITIALIZER(name, n) \
29{ \
30 .count = ATOMIC_INIT(n), \
31 .sleepers = 0, \
32 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
33}
34
35#define __MUTEX_INITIALIZER(name) \
36 __SEMAPHORE_INITIALIZER(name,1)
37
38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
40
41#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
43
44static inline void sema_init (struct semaphore *sem, int val)
45{
46/*
47 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
48 *
49 * i'd rather use the more flexible initialization above, but sadly
50 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
51 */
52 atomic_set(&sem->count, val);
53 sem->sleepers = 0;
54 init_waitqueue_head(&sem->wait);
55}
56
57static inline void init_MUTEX (struct semaphore *sem)
58{
59 sema_init(sem, 1);
60}
61
62static inline void init_MUTEX_LOCKED (struct semaphore *sem)
63{
64 sema_init(sem, 0);
65}
66
67asmlinkage void __down_failed(void /* special register calling convention */);
68asmlinkage int __down_failed_interruptible(void /* params in registers */);
69asmlinkage int __down_failed_trylock(void /* params in registers */);
70asmlinkage void __up_wakeup(void /* special register calling convention */);
71
72asmlinkage void __down(struct semaphore * sem);
73asmlinkage int __down_interruptible(struct semaphore * sem);
74asmlinkage int __down_trylock(struct semaphore * sem);
75asmlinkage void __up(struct semaphore * sem);
76
77/*
78 * Atomically decrement the semaphore's count. If it goes negative,
79 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
80 */
81static inline void down(struct semaphore * sem)
82{
83 unsigned long flags;
84 long count;
85
86 might_sleep();
87 local_irq_save(flags);
88 __asm__ __volatile__ (
89 "# down \n\t"
90 DCACHE_CLEAR("%0", "r4", "%1")
91 M32R_LOCK" %0, @%1; \n\t"
92 "addi %0, #-1; \n\t"
93 M32R_UNLOCK" %0, @%1; \n\t"
94 : "=&r" (count)
95 : "r" (&sem->count)
96 : "memory"
97#ifdef CONFIG_CHIP_M32700_TS1
98 , "r4"
99#endif /* CONFIG_CHIP_M32700_TS1 */
100 );
101 local_irq_restore(flags);
102
103 if (unlikely(count < 0))
104 __down(sem);
105}
106
107/*
108 * Interruptible try to acquire a semaphore. If we obtained
109 * it, return zero. If we were interrupted, returns -EINTR
110 */
111static inline int down_interruptible(struct semaphore * sem)
112{
113 unsigned long flags;
114 long count;
115 int result = 0;
116
117 might_sleep();
118 local_irq_save(flags);
119 __asm__ __volatile__ (
120 "# down_interruptible \n\t"
121 DCACHE_CLEAR("%0", "r4", "%1")
122 M32R_LOCK" %0, @%1; \n\t"
123 "addi %0, #-1; \n\t"
124 M32R_UNLOCK" %0, @%1; \n\t"
125 : "=&r" (count)
126 : "r" (&sem->count)
127 : "memory"
128#ifdef CONFIG_CHIP_M32700_TS1
129 , "r4"
130#endif /* CONFIG_CHIP_M32700_TS1 */
131 );
132 local_irq_restore(flags);
133
134 if (unlikely(count < 0))
135 result = __down_interruptible(sem);
136
137 return result;
138}
139
140/*
141 * Non-blockingly attempt to down() a semaphore.
142 * Returns zero if we acquired it
143 */
144static inline int down_trylock(struct semaphore * sem)
145{
146 unsigned long flags;
147 long count;
148 int result = 0;
149
150 local_irq_save(flags);
151 __asm__ __volatile__ (
152 "# down_trylock \n\t"
153 DCACHE_CLEAR("%0", "r4", "%1")
154 M32R_LOCK" %0, @%1; \n\t"
155 "addi %0, #-1; \n\t"
156 M32R_UNLOCK" %0, @%1; \n\t"
157 : "=&r" (count)
158 : "r" (&sem->count)
159 : "memory"
160#ifdef CONFIG_CHIP_M32700_TS1
161 , "r4"
162#endif /* CONFIG_CHIP_M32700_TS1 */
163 );
164 local_irq_restore(flags);
165
166 if (unlikely(count < 0))
167 result = __down_trylock(sem);
168
169 return result;
170}
171
172/*
173 * Note! This is subtle. We jump to wake people up only if
174 * the semaphore was negative (== somebody was waiting on it).
175 * The default case (no contention) will result in NO
176 * jumps for both down() and up().
177 */
178static inline void up(struct semaphore * sem)
179{
180 unsigned long flags;
181 long count;
182
183 local_irq_save(flags);
184 __asm__ __volatile__ (
185 "# up \n\t"
186 DCACHE_CLEAR("%0", "r4", "%1")
187 M32R_LOCK" %0, @%1; \n\t"
188 "addi %0, #1; \n\t"
189 M32R_UNLOCK" %0, @%1; \n\t"
190 : "=&r" (count)
191 : "r" (&sem->count)
192 : "memory"
193#ifdef CONFIG_CHIP_M32700_TS1
194 , "r4"
195#endif /* CONFIG_CHIP_M32700_TS1 */
196 );
197 local_irq_restore(flags);
198
199 if (unlikely(count <= 0))
200 __up(sem);
201}
202
203#endif /* __KERNEL__ */
204
205#endif /* _ASM_M32R_SEMAPHORE_H */