diff options
| author | Matthew Wilcox <matthew@wil.cx> | 2008-03-07 21:55:58 -0500 |
|---|---|---|
| committer | Matthew Wilcox <willy@linux.intel.com> | 2008-04-17 10:42:34 -0400 |
| commit | 64ac24e738823161693bf791f87adc802cf529ff (patch) | |
| tree | 19c0b0cf314d4394ca580c05b86cdf874ce0a167 /arch/xtensa | |
| parent | e48b3deee475134585eed03e7afebe4bf9e0dba9 (diff) | |
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C
implementation is better for maintainability, debuggability and
extensibility. Thanks to Peter Zijlstra for fixing the lockdep
warning. Thanks to Harvey Harrison for pointing out that the
unlikely() was unnecessary.
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/xtensa')
| -rw-r--r-- | arch/xtensa/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/xtensa/kernel/semaphore.c | 226 | ||||
| -rw-r--r-- | arch/xtensa/kernel/xtensa_ksyms.c | 9 |
3 files changed, 1 insertions, 236 deletions
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index f582d6a24ec2..7419dbccf027 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o vmlinux.lds |
| 6 | 6 | ||
| 7 | 7 | ||
| 8 | obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ | 8 | obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ |
| 9 | setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ | 9 | setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ |
| 10 | pci-dma.o init_task.o io.o | 10 | pci-dma.o init_task.o io.o |
| 11 | 11 | ||
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c deleted file mode 100644 index 995c6410ae10..000000000000 --- a/arch/xtensa/kernel/semaphore.c +++ /dev/null | |||
| @@ -1,226 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * arch/xtensa/kernel/semaphore.c | ||
| 3 | * | ||
| 4 | * Generic semaphore code. Buyer beware. Do your own specific changes | ||
| 5 | * in <asm/semaphore-helper.h> | ||
| 6 | * | ||
| 7 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 8 | * License. See the file "COPYING" in the main directory of this archive | ||
| 9 | * for more details. | ||
| 10 | * | ||
| 11 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
| 12 | * | ||
| 13 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | ||
| 14 | * Chris Zankel <chris@zankel.net> | ||
| 15 | * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca> | ||
| 16 | * Kevin Chea | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/sched.h> | ||
| 20 | #include <linux/wait.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <asm/semaphore.h> | ||
| 23 | #include <asm/errno.h> | ||
| 24 | |||
| 25 | /* | ||
| 26 | * These two _must_ execute atomically wrt each other. | ||
| 27 | */ | ||
| 28 | |||
| 29 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
| 30 | { | ||
| 31 | atomic_inc((atomic_t *)&sem->sleepers); | ||
| 32 | } | ||
| 33 | |||
| 34 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
| 35 | { | ||
| 36 | unsigned long flags; | ||
| 37 | int ret = 0; | ||
| 38 | |||
| 39 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
| 40 | if (sem->sleepers > 0) { | ||
| 41 | sem->sleepers--; | ||
| 42 | ret = 1; | ||
| 43 | } | ||
| 44 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
| 45 | return ret; | ||
| 46 | } | ||
| 47 | |||
| 48 | /* | ||
| 49 | * waking_non_zero_interruptible: | ||
| 50 | * 1 got the lock | ||
| 51 | * 0 go to sleep | ||
| 52 | * -EINTR interrupted | ||
| 53 | * | ||
| 54 | * We must undo the sem->count down_interruptible() increment while we are | ||
| 55 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
| 56 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
| 57 | */ | ||
| 58 | |||
| 59 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
| 60 | struct task_struct *tsk) | ||
| 61 | { | ||
| 62 | unsigned long flags; | ||
| 63 | int ret = 0; | ||
| 64 | |||
| 65 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
| 66 | if (sem->sleepers > 0) { | ||
| 67 | sem->sleepers--; | ||
| 68 | ret = 1; | ||
| 69 | } else if (signal_pending(tsk)) { | ||
| 70 | atomic_inc(&sem->count); | ||
| 71 | ret = -EINTR; | ||
| 72 | } | ||
| 73 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
| 74 | return ret; | ||
| 75 | } | ||
| 76 | |||
| 77 | /* | ||
| 78 | * waking_non_zero_trylock: | ||
| 79 | * 1 failed to lock | ||
| 80 | * 0 got the lock | ||
| 81 | * | ||
| 82 | * We must undo the sem->count down_trylock() increment while we are | ||
| 83 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
| 84 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
| 85 | */ | ||
| 86 | |||
| 87 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
| 88 | { | ||
| 89 | unsigned long flags; | ||
| 90 | int ret = 1; | ||
| 91 | |||
| 92 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
| 93 | if (sem->sleepers <= 0) | ||
| 94 | atomic_inc(&sem->count); | ||
| 95 | else { | ||
| 96 | sem->sleepers--; | ||
| 97 | ret = 0; | ||
| 98 | } | ||
| 99 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
| 100 | return ret; | ||
| 101 | } | ||
| 102 | |||
| 103 | DEFINE_SPINLOCK(semaphore_wake_lock); | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Semaphores are implemented using a two-way counter: | ||
| 107 | * The "count" variable is decremented for each process | ||
| 108 | * that tries to sleep, while the "waking" variable is | ||
| 109 | * incremented when the "up()" code goes to wake up waiting | ||
| 110 | * processes. | ||
| 111 | * | ||
| 112 | * Notably, the inline "up()" and "down()" functions can | ||
| 113 | * efficiently test if they need to do any extra work (up | ||
| 114 | * needs to do something only if count was negative before | ||
| 115 | * the increment operation. | ||
| 116 | * | ||
| 117 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
| 118 | * atomically. | ||
| 119 | * | ||
| 120 | * When __up() is called, the count was negative before | ||
| 121 | * incrementing it, and we need to wake up somebody. | ||
| 122 | * | ||
| 123 | * This routine adds one to the count of processes that need to | ||
| 124 | * wake up and exit. ALL waiting processes actually wake up but | ||
| 125 | * only the one that gets to the "waking" field first will gate | ||
| 126 | * through and acquire the semaphore. The others will go back | ||
| 127 | * to sleep. | ||
| 128 | * | ||
| 129 | * Note that these functions are only called when there is | ||
| 130 | * contention on the lock, and as such all this is the | ||
| 131 | * "non-critical" part of the whole semaphore business. The | ||
| 132 | * critical part is the inline stuff in <asm/semaphore.h> | ||
| 133 | * where we want to avoid any extra jumps and calls. | ||
| 134 | */ | ||
| 135 | |||
| 136 | void __up(struct semaphore *sem) | ||
| 137 | { | ||
| 138 | wake_one_more(sem); | ||
| 139 | wake_up(&sem->wait); | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Perform the "down" function. Return zero for semaphore acquired, | ||
| 144 | * return negative for signalled out of the function. | ||
| 145 | * | ||
| 146 | * If called from __down, the return is ignored and the wait loop is | ||
| 147 | * not interruptible. This means that a task waiting on a semaphore | ||
| 148 | * using "down()" cannot be killed until someone does an "up()" on | ||
| 149 | * the semaphore. | ||
| 150 | * | ||
| 151 | * If called from __down_interruptible, the return value gets checked | ||
| 152 | * upon return. If the return value is negative then the task continues | ||
| 153 | * with the negative value in the return register (it can be tested by | ||
| 154 | * the caller). | ||
| 155 | * | ||
| 156 | * Either form may be used in conjunction with "up()". | ||
| 157 | * | ||
| 158 | */ | ||
| 159 | |||
| 160 | #define DOWN_VAR \ | ||
| 161 | struct task_struct *tsk = current; \ | ||
| 162 | wait_queue_t wait; \ | ||
| 163 | init_waitqueue_entry(&wait, tsk); | ||
| 164 | |||
| 165 | #define DOWN_HEAD(task_state) \ | ||
| 166 | \ | ||
| 167 | \ | ||
| 168 | tsk->state = (task_state); \ | ||
| 169 | add_wait_queue(&sem->wait, &wait); \ | ||
| 170 | \ | ||
| 171 | /* \ | ||
| 172 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
| 173 | * so we must wait. \ | ||
| 174 | * \ | ||
| 175 | * We can let go the lock for purposes of waiting. \ | ||
| 176 | * We re-acquire it after awaking so as to protect \ | ||
| 177 | * all semaphore operations. \ | ||
| 178 | * \ | ||
| 179 | * If "up()" is called before we call waking_non_zero() then \ | ||
| 180 | * we will catch it right away. If it is called later then \ | ||
| 181 | * we will have to go through a wakeup cycle to catch it. \ | ||
| 182 | * \ | ||
| 183 | * Multiple waiters contend for the semaphore lock to see \ | ||
| 184 | * who gets to gate through and who has to wait some more. \ | ||
| 185 | */ \ | ||
| 186 | for (;;) { | ||
| 187 | |||
| 188 | #define DOWN_TAIL(task_state) \ | ||
| 189 | tsk->state = (task_state); \ | ||
| 190 | } \ | ||
| 191 | tsk->state = TASK_RUNNING; \ | ||
| 192 | remove_wait_queue(&sem->wait, &wait); | ||
| 193 | |||
| 194 | void __sched __down(struct semaphore * sem) | ||
| 195 | { | ||
| 196 | DOWN_VAR | ||
| 197 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
| 198 | if (waking_non_zero(sem)) | ||
| 199 | break; | ||
| 200 | schedule(); | ||
| 201 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
| 202 | } | ||
| 203 | |||
| 204 | int __sched __down_interruptible(struct semaphore * sem) | ||
| 205 | { | ||
| 206 | int ret = 0; | ||
| 207 | DOWN_VAR | ||
| 208 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
| 209 | |||
| 210 | ret = waking_non_zero_interruptible(sem, tsk); | ||
| 211 | if (ret) | ||
| 212 | { | ||
| 213 | if (ret == 1) | ||
| 214 | /* ret != 0 only if we get interrupted -arca */ | ||
| 215 | ret = 0; | ||
| 216 | break; | ||
| 217 | } | ||
| 218 | schedule(); | ||
| 219 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
| 220 | return ret; | ||
| 221 | } | ||
| 222 | |||
| 223 | int __down_trylock(struct semaphore * sem) | ||
| 224 | { | ||
| 225 | return waking_non_zero_trylock(sem); | ||
| 226 | } | ||
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 60dbdb43fb4c..6e52cdd6166f 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| 27 | #include <asm/page.h> | 27 | #include <asm/page.h> |
| 28 | #include <asm/pgalloc.h> | 28 | #include <asm/pgalloc.h> |
| 29 | #include <asm/semaphore.h> | ||
| 30 | #ifdef CONFIG_BLK_DEV_FD | 29 | #ifdef CONFIG_BLK_DEV_FD |
| 31 | #include <asm/floppy.h> | 30 | #include <asm/floppy.h> |
| 32 | #endif | 31 | #endif |
| @@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3); | |||
| 71 | EXPORT_SYMBOL(__udivdi3); | 70 | EXPORT_SYMBOL(__udivdi3); |
| 72 | EXPORT_SYMBOL(__umoddi3); | 71 | EXPORT_SYMBOL(__umoddi3); |
| 73 | 72 | ||
| 74 | /* | ||
| 75 | * Semaphore operations | ||
| 76 | */ | ||
| 77 | EXPORT_SYMBOL(__down); | ||
| 78 | EXPORT_SYMBOL(__down_interruptible); | ||
| 79 | EXPORT_SYMBOL(__down_trylock); | ||
| 80 | EXPORT_SYMBOL(__up); | ||
| 81 | |||
| 82 | #ifdef CONFIG_NET | 73 | #ifdef CONFIG_NET |
| 83 | /* | 74 | /* |
| 84 | * Networking support | 75 | * Networking support |
