aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-01-14 11:29:31 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-14 13:03:54 -0500
commitac6e60ee405aa3bf718f7fe4cb01b7ee0b8877ec (patch)
tree27d1ce0bab574835775d9897c8370b68f5502fff /kernel/mutex.c
parent0d66bf6d3514b35eb6897629059443132992dbd7 (diff)
mutex: adaptive spinnning, performance tweaks
Spin more agressively. This is less fair but also markedly faster. The numbers: * dbench 50 (higher is better): spin 1282MB/s v10 548MB/s v10 no wait 1868MB/s * 4k creates (numbers in files/second higher is better): spin avg 200.60 median 193.20 std 19.71 high 305.93 low 186.82 v10 avg 180.94 median 175.28 std 13.91 high 229.31 low 168.73 v10 no wait avg 232.18 median 222.38 std 22.91 high 314.66 low 209.12 * File stats (numbers in seconds, lower is better): spin 2.27s v10 5.1s v10 no wait 1.6s ( The source changes are smaller than they look, I just moved the need_resched checks in __mutex_lock_common after the cmpxchg. ) Signed-off-by: Chris Mason <chris.mason@oracle.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff42e975590..5d79781394a 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -171,12 +171,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
171 struct thread_info *owner; 171 struct thread_info *owner;
172 172
173 /* 173 /*
174 * If there are pending waiters, join them.
175 */
176 if (!list_empty(&lock->wait_list))
177 break;
178
179 /*
180 * If there's an owner, wait for it to either 174 * If there's an owner, wait for it to either
181 * release the lock or go to sleep. 175 * release the lock or go to sleep.
182 */ 176 */
@@ -184,6 +178,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
184 if (owner && !mutex_spin_on_owner(lock, owner)) 178 if (owner && !mutex_spin_on_owner(lock, owner))
185 break; 179 break;
186 180
181 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
182 lock_acquired(&lock->dep_map, ip);
183 mutex_set_owner(lock);
184 preempt_enable();
185 return 0;
186 }
187
187 /* 188 /*
188 * When there's no owner, we might have preempted between the 189 * When there's no owner, we might have preempted between the
189 * owner acquiring the lock and setting the owner field. If 190 * owner acquiring the lock and setting the owner field. If
@@ -193,13 +194,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
193 if (!owner && (need_resched() || rt_task(task))) 194 if (!owner && (need_resched() || rt_task(task)))
194 break; 195 break;
195 196
196 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
197 lock_acquired(&lock->dep_map, ip);
198 mutex_set_owner(lock);
199 preempt_enable();
200 return 0;
201 }
202
203 /* 197 /*
204 * The cpu_relax() call is a compiler barrier which forces 198 * The cpu_relax() call is a compiler barrier which forces
205 * everything in this loop to be re-loaded. We don't need 199 * everything in this loop to be re-loaded. We don't need