diff options
| author | Matthew Wilcox <matthew@wil.cx> | 2008-03-07 21:55:58 -0500 |
|---|---|---|
| committer | Matthew Wilcox <willy@linux.intel.com> | 2008-04-17 10:42:34 -0400 |
| commit | 64ac24e738823161693bf791f87adc802cf529ff (patch) | |
| tree | 19c0b0cf314d4394ca580c05b86cdf874ce0a167 /lib | |
| parent | e48b3deee475134585eed03e7afebe4bf9e0dba9 (diff) | |
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C
implementation is better for maintainability, debuggability and
extensibility. Thanks to Peter Zijlstra for fixing the lockdep
warning. Thanks to Harvey Harrison for pointing out that the
unlikely() was unnecessary.
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Makefile | 1 | ||||
| -rw-r--r-- | lib/semaphore-sleepers.c | 176 |
2 files changed, 0 insertions, 177 deletions
diff --git a/lib/Makefile b/lib/Makefile index 23de261a4c83..28dba90d5020 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -29,7 +29,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | |||
| 29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
| 30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
| 31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
| 32 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o | ||
| 33 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 32 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
| 34 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 33 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
| 35 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 34 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c deleted file mode 100644 index 0198782cdacb..000000000000 --- a/lib/semaphore-sleepers.c +++ /dev/null | |||
| @@ -1,176 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * i386 and x86-64 semaphore implementation. | ||
| 3 | * | ||
| 4 | * (C) Copyright 1999 Linus Torvalds | ||
| 5 | * | ||
| 6 | * Portions Copyright 1999 Red Hat, Inc. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version | ||
| 11 | * 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
| 14 | */ | ||
| 15 | #include <linux/sched.h> | ||
| 16 | #include <linux/err.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <asm/semaphore.h> | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Semaphores are implemented using a two-way counter: | ||
| 22 | * The "count" variable is decremented for each process | ||
| 23 | * that tries to acquire the semaphore, while the "sleeping" | ||
| 24 | * variable is a count of such acquires. | ||
| 25 | * | ||
| 26 | * Notably, the inline "up()" and "down()" functions can | ||
| 27 | * efficiently test if they need to do any extra work (up | ||
| 28 | * needs to do something only if count was negative before | ||
| 29 | * the increment operation. | ||
| 30 | * | ||
| 31 | * "sleeping" and the contention routine ordering is protected | ||
| 32 | * by the spinlock in the semaphore's waitqueue head. | ||
| 33 | * | ||
| 34 | * Note that these functions are only called when there is | ||
| 35 | * contention on the lock, and as such all this is the | ||
| 36 | * "non-critical" part of the whole semaphore business. The | ||
| 37 | * critical part is the inline stuff in <asm/semaphore.h> | ||
| 38 | * where we want to avoid any extra jumps and calls. | ||
| 39 | */ | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Logic: | ||
| 43 | * - only on a boundary condition do we need to care. When we go | ||
| 44 | * from a negative count to a non-negative, we wake people up. | ||
| 45 | * - when we go from a non-negative count to a negative do we | ||
| 46 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
| 47 | * that we're on the wakeup list before we synchronize so that | ||
| 48 | * we cannot lose wakeup events. | ||
| 49 | */ | ||
| 50 | |||
| 51 | void __up(struct semaphore *sem) | ||
| 52 | { | ||
| 53 | wake_up(&sem->wait); | ||
| 54 | } | ||
| 55 | |||
| 56 | void __sched __down(struct semaphore *sem) | ||
| 57 | { | ||
| 58 | struct task_struct *tsk = current; | ||
| 59 | DECLARE_WAITQUEUE(wait, tsk); | ||
| 60 | unsigned long flags; | ||
| 61 | |||
| 62 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
| 63 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 64 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
| 65 | |||
| 66 | sem->sleepers++; | ||
| 67 | for (;;) { | ||
| 68 | int sleepers = sem->sleepers; | ||
| 69 | |||
| 70 | /* | ||
| 71 | * Add "everybody else" into it. They aren't | ||
| 72 | * playing, because we own the spinlock in | ||
| 73 | * the wait_queue_head. | ||
| 74 | */ | ||
| 75 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
| 76 | sem->sleepers = 0; | ||
| 77 | break; | ||
| 78 | } | ||
| 79 | sem->sleepers = 1; /* us - see -1 above */ | ||
| 80 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 81 | |||
| 82 | schedule(); | ||
| 83 | |||
| 84 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 85 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
| 86 | } | ||
| 87 | remove_wait_queue_locked(&sem->wait, &wait); | ||
| 88 | wake_up_locked(&sem->wait); | ||
| 89 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 90 | tsk->state = TASK_RUNNING; | ||
| 91 | } | ||
| 92 | |||
| 93 | int __sched __down_interruptible(struct semaphore *sem) | ||
| 94 | { | ||
| 95 | int retval = 0; | ||
| 96 | struct task_struct *tsk = current; | ||
| 97 | DECLARE_WAITQUEUE(wait, tsk); | ||
| 98 | unsigned long flags; | ||
| 99 | |||
| 100 | tsk->state = TASK_INTERRUPTIBLE; | ||
| 101 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 102 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
| 103 | |||
| 104 | sem->sleepers++; | ||
| 105 | for (;;) { | ||
| 106 | int sleepers = sem->sleepers; | ||
| 107 | |||
| 108 | /* | ||
| 109 | * With signals pending, this turns into | ||
| 110 | * the trylock failure case - we won't be | ||
| 111 | * sleeping, and we* can't get the lock as | ||
| 112 | * it has contention. Just correct the count | ||
| 113 | * and exit. | ||
| 114 | */ | ||
| 115 | if (signal_pending(current)) { | ||
| 116 | retval = -EINTR; | ||
| 117 | sem->sleepers = 0; | ||
| 118 | atomic_add(sleepers, &sem->count); | ||
| 119 | break; | ||
| 120 | } | ||
| 121 | |||
| 122 | /* | ||
| 123 | * Add "everybody else" into it. They aren't | ||
| 124 | * playing, because we own the spinlock in | ||
| 125 | * wait_queue_head. The "-1" is because we're | ||
| 126 | * still hoping to get the semaphore. | ||
| 127 | */ | ||
| 128 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
| 129 | sem->sleepers = 0; | ||
| 130 | break; | ||
| 131 | } | ||
| 132 | sem->sleepers = 1; /* us - see -1 above */ | ||
| 133 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 134 | |||
| 135 | schedule(); | ||
| 136 | |||
| 137 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 138 | tsk->state = TASK_INTERRUPTIBLE; | ||
| 139 | } | ||
| 140 | remove_wait_queue_locked(&sem->wait, &wait); | ||
| 141 | wake_up_locked(&sem->wait); | ||
| 142 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 143 | |||
| 144 | tsk->state = TASK_RUNNING; | ||
| 145 | return retval; | ||
| 146 | } | ||
| 147 | |||
| 148 | /* | ||
| 149 | * Trylock failed - make sure we correct for | ||
| 150 | * having decremented the count. | ||
| 151 | * | ||
| 152 | * We could have done the trylock with a | ||
| 153 | * single "cmpxchg" without failure cases, | ||
| 154 | * but then it wouldn't work on a 386. | ||
| 155 | */ | ||
| 156 | int __down_trylock(struct semaphore *sem) | ||
| 157 | { | ||
| 158 | int sleepers; | ||
| 159 | unsigned long flags; | ||
| 160 | |||
| 161 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
| 162 | sleepers = sem->sleepers + 1; | ||
| 163 | sem->sleepers = 0; | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Add "everybody else" and us into it. They aren't | ||
| 167 | * playing, because we own the spinlock in the | ||
| 168 | * wait_queue_head. | ||
| 169 | */ | ||
| 170 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
| 171 | wake_up_locked(&sem->wait); | ||
| 172 | } | ||
| 173 | |||
| 174 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
| 175 | return 1; | ||
| 176 | } | ||
