aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-10 16:07:44 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-10 16:20:47 -0500
commit73165b88ffd29813bf73b331eaf90d3521443236 (patch)
tree224b510df182c5cba7b64fea6202ed9dd414835e /kernel
parent042c904c3e35e95ac911e8a2bf4097099b059e1a (diff)
[PATCH] fix i386 mutex fastpath on FRAME_POINTER && !DEBUG_MUTEXES
Call the mutex slowpath more conservatively - e.g. FRAME_POINTERS can change the calling convention, in which case a direct branch to the slowpath becomes illegal. Bug found by Hugh Dickins. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/mutex.c9
1 files changed, 0 insertions, 9 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 7eb960661441..d3dcb8b44bac 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -84,12 +84,6 @@ void fastcall __sched mutex_lock(struct mutex *lock)
84 /* 84 /*
85 * The locking fastpath is the 1->0 transition from 85 * The locking fastpath is the 1->0 transition from
86 * 'unlocked' into 'locked' state. 86 * 'unlocked' into 'locked' state.
87 *
88 * NOTE: if asm/mutex.h is included, then some architectures
89 * rely on mutex_lock() having _no other code_ here but this
90 * fastpath. That allows the assembly fastpath to do
91 * tail-merging optimizations. (If you want to put testcode
92 * here, do it under #ifndef CONFIG_MUTEX_DEBUG.)
93 */ 87 */
94 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 88 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
95} 89}
@@ -115,8 +109,6 @@ void fastcall __sched mutex_unlock(struct mutex *lock)
115 /* 109 /*
116 * The unlocking fastpath is the 0->1 transition from 'locked' 110 * The unlocking fastpath is the 0->1 transition from 'locked'
117 * into 'unlocked' state: 111 * into 'unlocked' state:
118 *
119 * NOTE: no other code must be here - see mutex_lock() .
120 */ 112 */
121 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 113 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
122} 114}
@@ -261,7 +253,6 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__);
261 */ 253 */
262int fastcall __sched mutex_lock_interruptible(struct mutex *lock) 254int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
263{ 255{
264 /* NOTE: no other code must be here - see mutex_lock() */
265 return __mutex_fastpath_lock_retval 256 return __mutex_fastpath_lock_retval
266 (&lock->count, __mutex_lock_interruptible_slowpath); 257 (&lock->count, __mutex_lock_interruptible_slowpath);
267} 258}