diff options
author | Matthew Wilcox <matthew@wil.cx> | 2008-03-07 21:55:58 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@linux.intel.com> | 2008-04-17 10:42:34 -0400 |
commit | 64ac24e738823161693bf791f87adc802cf529ff (patch) | |
tree | 19c0b0cf314d4394ca580c05b86cdf874ce0a167 /include/asm-parisc | |
parent | e48b3deee475134585eed03e7afebe4bf9e0dba9 (diff) |
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C
implementation is better for maintainability, debuggability and
extensibility. Thanks to Peter Zijlstra for fixing the lockdep
warning. Thanks to Harvey Harrison for pointing out that the
unlikely() was unnecessary.
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-parisc')
-rw-r--r-- | include/asm-parisc/semaphore-helper.h | 89 | ||||
-rw-r--r-- | include/asm-parisc/semaphore.h | 146 |
2 files changed, 1 insertions, 234 deletions
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h deleted file mode 100644 index 387f7c1277a2..000000000000 --- a/include/asm-parisc/semaphore-helper.h +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | #ifndef _ASM_PARISC_SEMAPHORE_HELPER_H | ||
2 | #define _ASM_PARISC_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1999 Andrea Arcangeli | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * These two _must_ execute atomically wrt each other. | ||
13 | * | ||
14 | * This is trivially done with load_locked/store_cond, | ||
15 | * which we have. Let the rest of the losers suck eggs. | ||
16 | */ | ||
17 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc((atomic_t *)&sem->waking); | ||
20 | } | ||
21 | |||
22 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | int ret = 0; | ||
26 | |||
27 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
28 | if (sem->waking > 0) { | ||
29 | sem->waking--; | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | * | ||
42 | * We must undo the sem->count down_interruptible() increment while we are | ||
43 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
44 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
45 | */ | ||
46 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
47 | struct task_struct *tsk) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | int ret = 0; | ||
51 | |||
52 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
53 | if (sem->waking > 0) { | ||
54 | sem->waking--; | ||
55 | ret = 1; | ||
56 | } else if (signal_pending(tsk)) { | ||
57 | atomic_inc(&sem->count); | ||
58 | ret = -EINTR; | ||
59 | } | ||
60 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * waking_non_zero_trylock: | ||
66 | * 1 failed to lock | ||
67 | * 0 got the lock | ||
68 | * | ||
69 | * We must undo the sem->count down_trylock() increment while we are | ||
70 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
71 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
72 | */ | ||
73 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | int ret = 1; | ||
77 | |||
78 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
79 | if (sem->waking <= 0) | ||
80 | atomic_inc(&sem->count); | ||
81 | else { | ||
82 | sem->waking--; | ||
83 | ret = 0; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | #endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h index a16271cdc748..d9b2034ed1d2 100644 --- a/include/asm-parisc/semaphore.h +++ b/include/asm-parisc/semaphore.h | |||
@@ -1,145 +1 @@ | |||
1 | /* SMP- and interrupt-safe semaphores. | #include <linux/semaphore.h> | |
2 | * PA-RISC version by Matthew Wilcox | ||
3 | * | ||
4 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) | ||
5 | * Copyright (C) 1996 Linus Torvalds | ||
6 | * Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org > | ||
7 | * Copyright (C) 2000 Grant Grundler < grundler a debian org > | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #ifndef _ASM_PARISC_SEMAPHORE_H | ||
25 | #define _ASM_PARISC_SEMAPHORE_H | ||
26 | |||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/wait.h> | ||
29 | #include <linux/rwsem.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | |||
33 | /* | ||
34 | * The `count' is initialised to the number of people who are allowed to | ||
35 | * take the lock. (Normally we want a mutex, so this is `1'). if | ||
36 | * `count' is positive, the lock can be taken. if it's 0, no-one is | ||
37 | * waiting on it. if it's -1, at least one task is waiting. | ||
38 | */ | ||
39 | struct semaphore { | ||
40 | spinlock_t sentry; | ||
41 | int count; | ||
42 | wait_queue_head_t wait; | ||
43 | }; | ||
44 | |||
45 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
46 | { \ | ||
47 | .sentry = SPIN_LOCK_UNLOCKED, \ | ||
48 | .count = n, \ | ||
49 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
50 | } | ||
51 | |||
52 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
53 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
54 | |||
55 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
56 | |||
57 | static inline void sema_init (struct semaphore *sem, int val) | ||
58 | { | ||
59 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
60 | } | ||
61 | |||
62 | static inline void init_MUTEX (struct semaphore *sem) | ||
63 | { | ||
64 | sema_init(sem, 1); | ||
65 | } | ||
66 | |||
67 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
68 | { | ||
69 | sema_init(sem, 0); | ||
70 | } | ||
71 | |||
72 | static inline int sem_getcount(struct semaphore *sem) | ||
73 | { | ||
74 | return sem->count; | ||
75 | } | ||
76 | |||
77 | asmlinkage void __down(struct semaphore * sem); | ||
78 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
79 | asmlinkage void __up(struct semaphore * sem); | ||
80 | |||
81 | /* Semaphores can be `tried' from irq context. So we have to disable | ||
82 | * interrupts while we're messing with the semaphore. Sorry. | ||
83 | */ | ||
84 | |||
85 | static inline void down(struct semaphore * sem) | ||
86 | { | ||
87 | might_sleep(); | ||
88 | spin_lock_irq(&sem->sentry); | ||
89 | if (sem->count > 0) { | ||
90 | sem->count--; | ||
91 | } else { | ||
92 | __down(sem); | ||
93 | } | ||
94 | spin_unlock_irq(&sem->sentry); | ||
95 | } | ||
96 | |||
97 | static inline int down_interruptible(struct semaphore * sem) | ||
98 | { | ||
99 | int ret = 0; | ||
100 | might_sleep(); | ||
101 | spin_lock_irq(&sem->sentry); | ||
102 | if (sem->count > 0) { | ||
103 | sem->count--; | ||
104 | } else { | ||
105 | ret = __down_interruptible(sem); | ||
106 | } | ||
107 | spin_unlock_irq(&sem->sentry); | ||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * down_trylock returns 0 on success, 1 if we failed to get the lock. | ||
113 | * May not sleep, but must preserve irq state | ||
114 | */ | ||
115 | static inline int down_trylock(struct semaphore * sem) | ||
116 | { | ||
117 | unsigned long flags; | ||
118 | int count; | ||
119 | |||
120 | spin_lock_irqsave(&sem->sentry, flags); | ||
121 | count = sem->count - 1; | ||
122 | if (count >= 0) | ||
123 | sem->count = count; | ||
124 | spin_unlock_irqrestore(&sem->sentry, flags); | ||
125 | return (count < 0); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Note! This is subtle. We jump to wake people up only if | ||
130 | * the semaphore was negative (== somebody was waiting on it). | ||
131 | */ | ||
132 | static inline void up(struct semaphore * sem) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | spin_lock_irqsave(&sem->sentry, flags); | ||
137 | if (sem->count < 0) { | ||
138 | __up(sem); | ||
139 | } else { | ||
140 | sem->count++; | ||
141 | } | ||
142 | spin_unlock_irqrestore(&sem->sentry, flags); | ||
143 | } | ||
144 | |||
145 | #endif /* _ASM_PARISC_SEMAPHORE_H */ | ||