diff options
-rw-r--r-- | include/asm-i386/mutex.h | 16 | ||||
-rw-r--r-- | kernel/mutex.c | 9 |
2 files changed, 14 insertions, 11 deletions
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h index 4e5e3de1b9a6..c657d4b09f0a 100644 --- a/include/asm-i386/mutex.h +++ b/include/asm-i386/mutex.h | |||
@@ -28,7 +28,13 @@ do { \ | |||
28 | \ | 28 | \ |
29 | __asm__ __volatile__( \ | 29 | __asm__ __volatile__( \ |
30 | LOCK " decl (%%eax) \n" \ | 30 | LOCK " decl (%%eax) \n" \ |
31 | " js "#fail_fn" \n" \ | 31 | " js 2f \n" \ |
32 | "1: \n" \ | ||
33 | \ | ||
34 | LOCK_SECTION_START("") \ | ||
35 | "2: call "#fail_fn" \n" \ | ||
36 | " jmp 1b \n" \ | ||
37 | LOCK_SECTION_END \ | ||
32 | \ | 38 | \ |
33 | :"=a" (dummy) \ | 39 | :"=a" (dummy) \ |
34 | : "a" (count) \ | 40 | : "a" (count) \ |
@@ -78,7 +84,13 @@ do { \ | |||
78 | \ | 84 | \ |
79 | __asm__ __volatile__( \ | 85 | __asm__ __volatile__( \ |
80 | LOCK " incl (%%eax) \n" \ | 86 | LOCK " incl (%%eax) \n" \ |
81 | " jle "#fail_fn" \n" \ | 87 | " jle 2f \n" \ |
88 | "1: \n" \ | ||
89 | \ | ||
90 | LOCK_SECTION_START("") \ | ||
91 | "2: call "#fail_fn" \n" \ | ||
92 | " jmp 1b \n" \ | ||
93 | LOCK_SECTION_END \ | ||
82 | \ | 94 | \ |
83 | :"=a" (dummy) \ | 95 | :"=a" (dummy) \ |
84 | : "a" (count) \ | 96 | : "a" (count) \ |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 7eb960661441..d3dcb8b44bac 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -84,12 +84,6 @@ void fastcall __sched mutex_lock(struct mutex *lock) | |||
84 | /* | 84 | /* |
85 | * The locking fastpath is the 1->0 transition from | 85 | * The locking fastpath is the 1->0 transition from |
86 | * 'unlocked' into 'locked' state. | 86 | * 'unlocked' into 'locked' state. |
87 | * | ||
88 | * NOTE: if asm/mutex.h is included, then some architectures | ||
89 | * rely on mutex_lock() having _no other code_ here but this | ||
90 | * fastpath. That allows the assembly fastpath to do | ||
91 | * tail-merging optimizations. (If you want to put testcode | ||
92 | * here, do it under #ifndef CONFIG_MUTEX_DEBUG.) | ||
93 | */ | 87 | */ |
94 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | 88 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
95 | } | 89 | } |
@@ -115,8 +109,6 @@ void fastcall __sched mutex_unlock(struct mutex *lock) | |||
115 | /* | 109 | /* |
116 | * The unlocking fastpath is the 0->1 transition from 'locked' | 110 | * The unlocking fastpath is the 0->1 transition from 'locked' |
117 | * into 'unlocked' state: | 111 | * into 'unlocked' state: |
118 | * | ||
119 | * NOTE: no other code must be here - see mutex_lock() . | ||
120 | */ | 112 | */ |
121 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | 113 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
122 | } | 114 | } |
@@ -261,7 +253,6 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | |||
261 | */ | 253 | */ |
262 | int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | 254 | int fastcall __sched mutex_lock_interruptible(struct mutex *lock) |
263 | { | 255 | { |
264 | /* NOTE: no other code must be here - see mutex_lock() */ | ||
265 | return __mutex_fastpath_lock_retval | 256 | return __mutex_fastpath_lock_retval |
266 | (&lock->count, __mutex_lock_interruptible_slowpath); | 257 | (&lock->count, __mutex_lock_interruptible_slowpath); |
267 | } | 258 | } |