diff options
Diffstat (limited to 'include/asm-sh')
-rw-r--r-- | include/asm-sh/semaphore-helper.h | 89 | ||||
-rw-r--r-- | include/asm-sh/semaphore.h | 116 |
2 files changed, 1 insertions, 204 deletions
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h deleted file mode 100644 index bd8230c369ca..000000000000 --- a/include/asm-sh/semaphore-helper.h +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | #ifndef __ASM_SH_SEMAPHORE_HELPER_H | ||
2 | #define __ASM_SH_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1999 Andrea Arcangeli | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * These two _must_ execute atomically wrt each other. | ||
13 | * | ||
14 | * This is trivially done with load_locked/store_cond, | ||
15 | * which we have. Let the rest of the losers suck eggs. | ||
16 | */ | ||
17 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc((atomic_t *)&sem->sleepers); | ||
20 | } | ||
21 | |||
22 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | int ret = 0; | ||
26 | |||
27 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
28 | if (sem->sleepers > 0) { | ||
29 | sem->sleepers--; | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | * | ||
42 | * We must undo the sem->count down_interruptible() increment while we are | ||
43 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
44 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
45 | */ | ||
46 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
47 | struct task_struct *tsk) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | int ret = 0; | ||
51 | |||
52 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
53 | if (sem->sleepers > 0) { | ||
54 | sem->sleepers--; | ||
55 | ret = 1; | ||
56 | } else if (signal_pending(tsk)) { | ||
57 | atomic_inc(&sem->count); | ||
58 | ret = -EINTR; | ||
59 | } | ||
60 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * waking_non_zero_trylock: | ||
66 | * 1 failed to lock | ||
67 | * 0 got the lock | ||
68 | * | ||
69 | * We must undo the sem->count down_trylock() increment while we are | ||
70 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
71 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
72 | */ | ||
73 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | int ret = 1; | ||
77 | |||
78 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
79 | if (sem->sleepers <= 0) | ||
80 | atomic_inc(&sem->count); | ||
81 | else { | ||
82 | sem->sleepers--; | ||
83 | ret = 0; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | #endif /* __ASM_SH_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h index 9e5a37c4dce2..d9b2034ed1d2 100644 --- a/include/asm-sh/semaphore.h +++ b/include/asm-sh/semaphore.h | |||
@@ -1,115 +1 @@ | |||
1 | #ifndef __ASM_SH_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define __ASM_SH_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | /* | ||
8 | * SMP- and interrupt-safe semaphores. | ||
9 | * | ||
10 | * (C) Copyright 1996 Linus Torvalds | ||
11 | * | ||
12 | * SuperH verison by Niibe Yutaka | ||
13 | * (Currently no asm implementation but generic C code...) | ||
14 | */ | ||
15 | |||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/rwsem.h> | ||
18 | #include <linux/wait.h> | ||
19 | |||
20 | #include <asm/system.h> | ||
21 | #include <asm/atomic.h> | ||
22 | |||
23 | struct semaphore { | ||
24 | atomic_t count; | ||
25 | int sleepers; | ||
26 | wait_queue_head_t wait; | ||
27 | }; | ||
28 | |||
29 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
30 | { \ | ||
31 | .count = ATOMIC_INIT(n), \ | ||
32 | .sleepers = 0, \ | ||
33 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
34 | } | ||
35 | |||
36 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
37 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
38 | |||
39 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
40 | |||
41 | static inline void sema_init (struct semaphore *sem, int val) | ||
42 | { | ||
43 | /* | ||
44 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
45 | * | ||
46 | * i'd rather use the more flexible initialization above, but sadly | ||
47 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
48 | */ | ||
49 | atomic_set(&sem->count, val); | ||
50 | sem->sleepers = 0; | ||
51 | init_waitqueue_head(&sem->wait); | ||
52 | } | ||
53 | |||
54 | static inline void init_MUTEX (struct semaphore *sem) | ||
55 | { | ||
56 | sema_init(sem, 1); | ||
57 | } | ||
58 | |||
59 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
60 | { | ||
61 | sema_init(sem, 0); | ||
62 | } | ||
63 | |||
64 | #if 0 | ||
65 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
66 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
67 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
68 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
69 | #endif | ||
70 | |||
71 | asmlinkage void __down(struct semaphore * sem); | ||
72 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
73 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
74 | asmlinkage void __up(struct semaphore * sem); | ||
75 | |||
76 | extern spinlock_t semaphore_wake_lock; | ||
77 | |||
78 | static inline void down(struct semaphore * sem) | ||
79 | { | ||
80 | might_sleep(); | ||
81 | if (atomic_dec_return(&sem->count) < 0) | ||
82 | __down(sem); | ||
83 | } | ||
84 | |||
85 | static inline int down_interruptible(struct semaphore * sem) | ||
86 | { | ||
87 | int ret = 0; | ||
88 | |||
89 | might_sleep(); | ||
90 | if (atomic_dec_return(&sem->count) < 0) | ||
91 | ret = __down_interruptible(sem); | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | static inline int down_trylock(struct semaphore * sem) | ||
96 | { | ||
97 | int ret = 0; | ||
98 | |||
99 | if (atomic_dec_return(&sem->count) < 0) | ||
100 | ret = __down_trylock(sem); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Note! This is subtle. We jump to wake people up only if | ||
106 | * the semaphore was negative (== somebody was waiting on it). | ||
107 | */ | ||
108 | static inline void up(struct semaphore * sem) | ||
109 | { | ||
110 | if (atomic_inc_return(&sem->count) <= 0) | ||
111 | __up(sem); | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | #endif /* __ASM_SH_SEMAPHORE_H */ | ||