diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-05 11:23:41 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-14 02:52:33 -0400 |
commit | c6eb3dda25892f1f974f5420f63e6721aab02f6f (patch) | |
tree | b9be3e193dcfeda3589832be10189085cde496c5 /kernel | |
parent | 3ca7a440da394808571dad32d33d3bc0389982e6 (diff) |
mutex: Use p->on_cpu for the adaptive spin
Since we now have p->on_cpu unconditionally available, use it to
re-implement mutex_spin_on_owner.
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110405152728.826338173@chello.nl
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/mutex-debug.c | 2 | ||||
-rw-r--r-- | kernel/mutex-debug.h | 2 | ||||
-rw-r--r-- | kernel/mutex.c | 2 | ||||
-rw-r--r-- | kernel/mutex.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 83 |
5 files changed, 37 insertions, 54 deletions
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index ec815a960b5d..73da83aff418 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock) | |||
75 | return; | 75 | return; |
76 | 76 | ||
77 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); | 77 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
78 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | 78 | DEBUG_LOCKS_WARN_ON(lock->owner != current); |
79 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 79 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
80 | mutex_clear_owner(lock); | 80 | mutex_clear_owner(lock); |
81 | } | 81 | } |
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index 57d527a16f9d..0799fd3e4cfa 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h | |||
@@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name, | |||
29 | 29 | ||
30 | static inline void mutex_set_owner(struct mutex *lock) | 30 | static inline void mutex_set_owner(struct mutex *lock) |
31 | { | 31 | { |
32 | lock->owner = current_thread_info(); | 32 | lock->owner = current; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void mutex_clear_owner(struct mutex *lock) | 35 | static inline void mutex_clear_owner(struct mutex *lock) |
diff --git a/kernel/mutex.c b/kernel/mutex.c index c4195fa98900..fe4706cb0c5b 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
160 | */ | 160 | */ |
161 | 161 | ||
162 | for (;;) { | 162 | for (;;) { |
163 | struct thread_info *owner; | 163 | struct task_struct *owner; |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * If we own the BKL, then don't spin. The owner of | 166 | * If we own the BKL, then don't spin. The owner of |
diff --git a/kernel/mutex.h b/kernel/mutex.h index 67578ca48f94..4115fbf83b12 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
20 | static inline void mutex_set_owner(struct mutex *lock) | 20 | static inline void mutex_set_owner(struct mutex *lock) |
21 | { | 21 | { |
22 | lock->owner = current_thread_info(); | 22 | lock->owner = current; |
23 | } | 23 | } |
24 | 24 | ||
25 | static inline void mutex_clear_owner(struct mutex *lock) | 25 | static inline void mutex_clear_owner(struct mutex *lock) |
diff --git a/kernel/sched.c b/kernel/sched.c index cd2593e1a3ec..55cc50323ce1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4173,70 +4173,53 @@ need_resched: | |||
4173 | EXPORT_SYMBOL(schedule); | 4173 | EXPORT_SYMBOL(schedule); |
4174 | 4174 | ||
4175 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 4175 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4176 | /* | ||
4177 | * Look out! "owner" is an entirely speculative pointer | ||
4178 | * access and not reliable. | ||
4179 | */ | ||
4180 | int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | ||
4181 | { | ||
4182 | unsigned int cpu; | ||
4183 | struct rq *rq; | ||
4184 | 4176 | ||
4185 | if (!sched_feat(OWNER_SPIN)) | 4177 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
4186 | return 0; | 4178 | { |
4179 | bool ret = false; | ||
4187 | 4180 | ||
4188 | #ifdef CONFIG_DEBUG_PAGEALLOC | 4181 | rcu_read_lock(); |
4189 | /* | 4182 | if (lock->owner != owner) |
4190 | * Need to access the cpu field knowing that | 4183 | goto fail; |
4191 | * DEBUG_PAGEALLOC could have unmapped it if | ||
4192 | * the mutex owner just released it and exited. | ||
4193 | */ | ||
4194 | if (probe_kernel_address(&owner->cpu, cpu)) | ||
4195 | return 0; | ||
4196 | #else | ||
4197 | cpu = owner->cpu; | ||
4198 | #endif | ||
4199 | 4184 | ||
4200 | /* | 4185 | /* |
4201 | * Even if the access succeeded (likely case), | 4186 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
4202 | * the cpu field may no longer be valid. | 4187 | * lock->owner still matches owner, if that fails, owner might |
4188 | * point to free()d memory, if it still matches, the rcu_read_lock() | ||
4189 | * ensures the memory stays valid. | ||
4203 | */ | 4190 | */ |
4204 | if (cpu >= nr_cpumask_bits) | 4191 | barrier(); |
4205 | return 0; | ||
4206 | 4192 | ||
4207 | /* | 4193 | ret = owner->on_cpu; |
4208 | * We need to validate that we can do a | 4194 | fail: |
4209 | * get_cpu() and that we have the percpu area. | 4195 | rcu_read_unlock(); |
4210 | */ | ||
4211 | if (!cpu_online(cpu)) | ||
4212 | return 0; | ||
4213 | 4196 | ||
4214 | rq = cpu_rq(cpu); | 4197 | return ret; |
4198 | } | ||
4215 | 4199 | ||
4216 | for (;;) { | 4200 | /* |
4217 | /* | 4201 | * Look out! "owner" is an entirely speculative pointer |
4218 | * Owner changed, break to re-assess state. | 4202 | * access and not reliable. |
4219 | */ | 4203 | */ |
4220 | if (lock->owner != owner) { | 4204 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
4221 | /* | 4205 | { |
4222 | * If the lock has switched to a different owner, | 4206 | if (!sched_feat(OWNER_SPIN)) |
4223 | * we likely have heavy contention. Return 0 to quit | 4207 | return 0; |
4224 | * optimistic spinning and not contend further: | ||
4225 | */ | ||
4226 | if (lock->owner) | ||
4227 | return 0; | ||
4228 | break; | ||
4229 | } | ||
4230 | 4208 | ||
4231 | /* | 4209 | while (owner_running(lock, owner)) { |
4232 | * Is that owner really running on that cpu? | 4210 | if (need_resched()) |
4233 | */ | ||
4234 | if (task_thread_info(rq->curr) != owner || need_resched()) | ||
4235 | return 0; | 4211 | return 0; |
4236 | 4212 | ||
4237 | arch_mutex_cpu_relax(); | 4213 | arch_mutex_cpu_relax(); |
4238 | } | 4214 | } |
4239 | 4215 | ||
4216 | /* | ||
4217 | * If the owner changed to another task there is likely | ||
4218 | * heavy contention, stop spinning. | ||
4219 | */ | ||
4220 | if (lock->owner) | ||
4221 | return 0; | ||
4222 | |||
4240 | return 1; | 4223 | return 1; |
4241 | } | 4224 | } |
4242 | #endif | 4225 | #endif |