aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-m32r
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-07 21:55:58 -0500
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:34 -0400
commit64ac24e738823161693bf791f87adc802cf529ff (patch)
tree19c0b0cf314d4394ca580c05b86cdf874ce0a167 /include/asm-m32r
parente48b3deee475134585eed03e7afebe4bf9e0dba9 (diff)
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-m32r')
-rw-r--r--include/asm-m32r/semaphore.h145
1 files changed, 1 insertions, 144 deletions
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
index b5bf95a6f2b4..d9b2034ed1d2 100644
--- a/include/asm-m32r/semaphore.h
+++ b/include/asm-m32r/semaphore.h
@@ -1,144 +1 @@
1#ifndef _ASM_M32R_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_M32R_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * Copyright (C) 1996 Linus Torvalds
12 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 */
14
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17#include <asm/assembler.h>
18#include <asm/system.h>
19#include <asm/atomic.h>
20
21struct semaphore {
22 atomic_t count;
23 int sleepers;
24 wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n) \
28{ \
29 .count = ATOMIC_INIT(n), \
30 .sleepers = 0, \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38
39static inline void sema_init (struct semaphore *sem, int val)
40{
41/*
42 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
43 *
44 * i'd rather use the more flexible initialization above, but sadly
45 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
46 */
47 atomic_set(&sem->count, val);
48 sem->sleepers = 0;
49 init_waitqueue_head(&sem->wait);
50}
51
52static inline void init_MUTEX (struct semaphore *sem)
53{
54 sema_init(sem, 1);
55}
56
57static inline void init_MUTEX_LOCKED (struct semaphore *sem)
58{
59 sema_init(sem, 0);
60}
61
62asmlinkage void __down_failed(void /* special register calling convention */);
63asmlinkage int __down_failed_interruptible(void /* params in registers */);
64asmlinkage int __down_failed_trylock(void /* params in registers */);
65asmlinkage void __up_wakeup(void /* special register calling convention */);
66
67asmlinkage void __down(struct semaphore * sem);
68asmlinkage int __down_interruptible(struct semaphore * sem);
69asmlinkage int __down_trylock(struct semaphore * sem);
70asmlinkage void __up(struct semaphore * sem);
71
72/*
73 * Atomically decrement the semaphore's count. If it goes negative,
74 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
75 */
76static inline void down(struct semaphore * sem)
77{
78 might_sleep();
79 if (unlikely(atomic_dec_return(&sem->count) < 0))
80 __down(sem);
81}
82
83/*
84 * Interruptible try to acquire a semaphore. If we obtained
85 * it, return zero. If we were interrupted, returns -EINTR
86 */
87static inline int down_interruptible(struct semaphore * sem)
88{
89 int result = 0;
90
91 might_sleep();
92 if (unlikely(atomic_dec_return(&sem->count) < 0))
93 result = __down_interruptible(sem);
94
95 return result;
96}
97
98/*
99 * Non-blockingly attempt to down() a semaphore.
100 * Returns zero if we acquired it
101 */
102static inline int down_trylock(struct semaphore * sem)
103{
104 unsigned long flags;
105 long count;
106 int result = 0;
107
108 local_irq_save(flags);
109 __asm__ __volatile__ (
110 "# down_trylock \n\t"
111 DCACHE_CLEAR("%0", "r4", "%1")
112 M32R_LOCK" %0, @%1; \n\t"
113 "addi %0, #-1; \n\t"
114 M32R_UNLOCK" %0, @%1; \n\t"
115 : "=&r" (count)
116 : "r" (&sem->count)
117 : "memory"
118#ifdef CONFIG_CHIP_M32700_TS1
119 , "r4"
120#endif /* CONFIG_CHIP_M32700_TS1 */
121 );
122 local_irq_restore(flags);
123
124 if (unlikely(count < 0))
125 result = __down_trylock(sem);
126
127 return result;
128}
129
130/*
131 * Note! This is subtle. We jump to wake people up only if
132 * the semaphore was negative (== somebody was waiting on it).
133 * The default case (no contention) will result in NO
134 * jumps for both down() and up().
135 */
136static inline void up(struct semaphore * sem)
137{
138 if (unlikely(atomic_inc_return(&sem->count) <= 0))
139 __up(sem);
140}
141
142#endif /* __KERNEL__ */
143
144#endif /* _ASM_M32R_SEMAPHORE_H */