diff options
Diffstat (limited to 'include/asm-powerpc/semaphore.h')
| -rw-r--r-- | include/asm-powerpc/semaphore.h | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h new file mode 100644 index 000000000000..57369d2cadef --- /dev/null +++ b/include/asm-powerpc/semaphore.h | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | #ifndef _ASM_POWERPC_SEMAPHORE_H | ||
| 2 | #define _ASM_POWERPC_SEMAPHORE_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | ||
| 6 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | ||
| 7 | * Rework semaphores to use atomic_dec_if_positive. | ||
| 8 | * -- Paul Mackerras (paulus@samba.org) | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifdef __KERNEL__ | ||
| 12 | |||
| 13 | #include <asm/atomic.h> | ||
| 14 | #include <asm/system.h> | ||
| 15 | #include <linux/wait.h> | ||
| 16 | #include <linux/rwsem.h> | ||
| 17 | |||
| 18 | struct semaphore { | ||
| 19 | /* | ||
| 20 | * Note that any negative value of count is equivalent to 0, | ||
| 21 | * but additionally indicates that some process(es) might be | ||
| 22 | * sleeping on `wait'. | ||
| 23 | */ | ||
| 24 | atomic_t count; | ||
| 25 | wait_queue_head_t wait; | ||
| 26 | }; | ||
| 27 | |||
| 28 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
| 29 | { \ | ||
| 30 | .count = ATOMIC_INIT(n), \ | ||
| 31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
| 32 | } | ||
| 33 | |||
| 34 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
| 35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
| 36 | |||
| 37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
| 38 | #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) | ||
| 39 | |||
| 40 | static inline void sema_init (struct semaphore *sem, int val) | ||
| 41 | { | ||
| 42 | atomic_set(&sem->count, val); | ||
| 43 | init_waitqueue_head(&sem->wait); | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline void init_MUTEX (struct semaphore *sem) | ||
| 47 | { | ||
| 48 | sema_init(sem, 1); | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
| 52 | { | ||
| 53 | sema_init(sem, 0); | ||
| 54 | } | ||
| 55 | |||
| 56 | extern void __down(struct semaphore * sem); | ||
| 57 | extern int __down_interruptible(struct semaphore * sem); | ||
| 58 | extern void __up(struct semaphore * sem); | ||
| 59 | |||
| 60 | static inline void down(struct semaphore * sem) | ||
| 61 | { | ||
| 62 | might_sleep(); | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Try to get the semaphore, take the slow path if we fail. | ||
| 66 | */ | ||
| 67 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
| 68 | __down(sem); | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline int down_interruptible(struct semaphore * sem) | ||
| 72 | { | ||
| 73 | int ret = 0; | ||
| 74 | |||
| 75 | might_sleep(); | ||
| 76 | |||
| 77 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
| 78 | ret = __down_interruptible(sem); | ||
| 79 | return ret; | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline int down_trylock(struct semaphore * sem) | ||
| 83 | { | ||
| 84 | return atomic_dec_if_positive(&sem->count) < 0; | ||
| 85 | } | ||
| 86 | |||
| 87 | static inline void up(struct semaphore * sem) | ||
| 88 | { | ||
| 89 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
| 90 | __up(sem); | ||
| 91 | } | ||
| 92 | |||
| 93 | #endif /* __KERNEL__ */ | ||
| 94 | |||
| 95 | #endif /* _ASM_POWERPC_SEMAPHORE_H */ | ||
