aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-06-20 07:31:05 -0400
committerIngo Molnar <mingo@kernel.org>2013-06-26 06:10:55 -0400
commita41b56efa70e060f650aeb54740aaf52044a1ead (patch)
tree776921423ea47eeeb4451aa9f66c49535bf64048
parent1e876e3b1a9df25bb04682b0d48aaa7e8ae1fc82 (diff)
arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not
This will allow me to call functions that have multiple arguments if fastpath fails. This is required to support ticket mutexes, because they need to be able to pass an extra argument to the fail function. Originally I duplicated the functions, by adding __mutex_fastpath_lock_retval_arg. This ended up being just a duplication of the existing function, so a way to test if fastpath was called ended up being better. This also cleaned up the reservation mutex patch some by being able to call an atomic_set instead of atomic_xchg, and making it easier to detect if the wrong unlock function was previously used. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Cc: robclark@gmail.com Cc: rostedt@goodmis.org Cc: daniel@ffwll.ch Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20130620113105.4001.83929.stgit@patser Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/ia64/include/asm/mutex.h10
-rw-r--r--arch/powerpc/include/asm/mutex.h10
-rw-r--r--arch/sh/include/asm/mutex-llsc.h4
-rw-r--r--arch/x86/include/asm/mutex_32.h11
-rw-r--r--arch/x86/include/asm/mutex_64.h11
-rw-r--r--include/asm-generic/mutex-dec.h10
-rw-r--r--include/asm-generic/mutex-null.h2
-rw-r--r--include/asm-generic/mutex-xchg.h10
-rw-r--r--kernel/mutex.c32
9 files changed, 41 insertions, 59 deletions
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
index bed73a643a56..f41e66d65e31 100644
--- a/arch/ia64/include/asm/mutex.h
+++ b/arch/ia64/include/asm/mutex.h
@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
30 * from 1 to a 0 value 30 * from 1 to a 0 value
31 * @count: pointer of type atomic_t 31 * @count: pointer of type atomic_t
32 * @fail_fn: function to call if the original value was not 1
33 * 32 *
34 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 33 * Change the count from 1 to a value lower than 1. This function returns 0
35 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 34 * if the fastpath succeeds, or -1 otherwise.
36 * or anything the slow path function returns.
37 */ 35 */
38static inline int 36static inline int
39__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 37__mutex_fastpath_lock_retval(atomic_t *count)
40{ 38{
41 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) 39 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
42 return fail_fn(count); 40 return -1;
43 return 0; 41 return 0;
44} 42}
45 43
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 5399f7e18102..127ab23e1f6c 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
83 * from 1 to a 0 value 83 * from 1 to a 0 value
84 * @count: pointer of type atomic_t 84 * @count: pointer of type atomic_t
85 * @fail_fn: function to call if the original value was not 1
86 * 85 *
87 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 86 * Change the count from 1 to a value lower than 1. This function returns 0
88 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 87 * if the fastpath succeeds, or -1 otherwise.
89 * or anything the slow path function returns.
90 */ 88 */
91static inline int 89static inline int
92__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 90__mutex_fastpath_lock_retval(atomic_t *count)
93{ 91{
94 if (unlikely(__mutex_dec_return_lock(count) < 0)) 92 if (unlikely(__mutex_dec_return_lock(count) < 0))
95 return fail_fn(count); 93 return -1;
96 return 0; 94 return 0;
97} 95}
98 96
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
index 090358a7e1bb..dad29b687bd3 100644
--- a/arch/sh/include/asm/mutex-llsc.h
+++ b/arch/sh/include/asm/mutex-llsc.h
@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
37} 37}
38 38
39static inline int 39static inline int
40__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 40__mutex_fastpath_lock_retval(atomic_t *count)
41{ 41{
42 int __done, __res; 42 int __done, __res;
43 43
@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
51 : "t"); 51 : "t");
52 52
53 if (unlikely(!__done || __res != 0)) 53 if (unlikely(!__done || __res != 0))
54 __res = fail_fn(count); 54 __res = -1;
55 55
56 return __res; 56 return __res;
57} 57}
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
index 03f90c8a5a7c..0208c3c2cbc6 100644
--- a/arch/x86/include/asm/mutex_32.h
+++ b/arch/x86/include/asm/mutex_32.h
@@ -42,17 +42,14 @@ do { \
42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
43 * from 1 to a 0 value 43 * from 1 to a 0 value
44 * @count: pointer of type atomic_t 44 * @count: pointer of type atomic_t
45 * @fail_fn: function to call if the original value was not 1
46 * 45 *
47 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 46 * Change the count from 1 to a value lower than 1. This function returns 0
48 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 47 * if the fastpath succeeds, or -1 otherwise.
49 * or anything the slow path function returns
50 */ 48 */
51static inline int __mutex_fastpath_lock_retval(atomic_t *count, 49static inline int __mutex_fastpath_lock_retval(atomic_t *count)
52 int (*fail_fn)(atomic_t *))
53{ 50{
54 if (unlikely(atomic_dec_return(count) < 0)) 51 if (unlikely(atomic_dec_return(count) < 0))
55 return fail_fn(count); 52 return -1;
56 else 53 else
57 return 0; 54 return 0;
58} 55}
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 68a87b0f8e29..2c543fff241b 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -37,17 +37,14 @@ do { \
37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
38 * from 1 to a 0 value 38 * from 1 to a 0 value
39 * @count: pointer of type atomic_t 39 * @count: pointer of type atomic_t
40 * @fail_fn: function to call if the original value was not 1
41 * 40 *
42 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 41 * Change the count from 1 to a value lower than 1. This function returns 0
43 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 42 * if the fastpath succeeds, or -1 otherwise.
44 * or anything the slow path function returns
45 */ 43 */
46static inline int __mutex_fastpath_lock_retval(atomic_t *count, 44static inline int __mutex_fastpath_lock_retval(atomic_t *count)
47 int (*fail_fn)(atomic_t *))
48{ 45{
49 if (unlikely(atomic_dec_return(count) < 0)) 46 if (unlikely(atomic_dec_return(count) < 0))
50 return fail_fn(count); 47 return -1;
51 else 48 else
52 return 0; 49 return 0;
53} 50}
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index f104af7cf437..d4f9fb4e53df 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
29 * from 1 to a 0 value 29 * from 1 to a 0 value
30 * @count: pointer of type atomic_t 30 * @count: pointer of type atomic_t
31 * @fail_fn: function to call if the original value was not 1
32 * 31 *
33 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 32 * Change the count from 1 to a value lower than 1. This function returns 0
34 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 33 * if the fastpath succeeds, or -1 otherwise.
35 * or anything the slow path function returns.
36 */ 34 */
37static inline int 35static inline int
38__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 36__mutex_fastpath_lock_retval(atomic_t *count)
39{ 37{
40 if (unlikely(atomic_dec_return(count) < 0)) 38 if (unlikely(atomic_dec_return(count) < 0))
41 return fail_fn(count); 39 return -1;
42 return 0; 40 return 0;
43} 41}
44 42
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index e1bbbc72b6a2..61069ed334e2 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -11,7 +11,7 @@
11#define _ASM_GENERIC_MUTEX_NULL_H 11#define _ASM_GENERIC_MUTEX_NULL_H
12 12
13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) 13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
14#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) 14#define __mutex_fastpath_lock_retval(count) (-1)
15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) 15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) 16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
17#define __mutex_slowpath_needs_to_unlock() 1 17#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index c04e0db8a2d6..f169ec064785 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
40 * from 1 to a 0 value 40 * from 1 to a 0 value
41 * @count: pointer of type atomic_t 41 * @count: pointer of type atomic_t
42 * @fail_fn: function to call if the original value was not 1
43 * 42 *
44 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 43 * Change the count from 1 to a value lower than 1. This function returns 0
45 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 44 * if the fastpath succeeds, or -1 otherwise.
46 * or anything the slow path function returns
47 */ 45 */
48static inline int 46static inline int
49__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 47__mutex_fastpath_lock_retval(atomic_t *count)
50{ 48{
51 if (unlikely(atomic_xchg(count, 0) != 1)) 49 if (unlikely(atomic_xchg(count, 0) != 1))
52 if (likely(atomic_xchg(count, -1) != 1)) 50 if (likely(atomic_xchg(count, -1) != 1))
53 return fail_fn(count); 51 return -1;
54 return 0; 52 return 0;
55} 53}
56 54
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a664f113..42f8dda2467b 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -494,10 +494,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
494 * mutex_lock_interruptible() and mutex_trylock(). 494 * mutex_lock_interruptible() and mutex_trylock().
495 */ 495 */
496static noinline int __sched 496static noinline int __sched
497__mutex_lock_killable_slowpath(atomic_t *lock_count); 497__mutex_lock_killable_slowpath(struct mutex *lock);
498 498
499static noinline int __sched 499static noinline int __sched
500__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 500__mutex_lock_interruptible_slowpath(struct mutex *lock);
501 501
502/** 502/**
503 * mutex_lock_interruptible - acquire the mutex, interruptible 503 * mutex_lock_interruptible - acquire the mutex, interruptible
@@ -515,12 +515,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
515 int ret; 515 int ret;
516 516
517 might_sleep(); 517 might_sleep();
518 ret = __mutex_fastpath_lock_retval 518 ret = __mutex_fastpath_lock_retval(&lock->count);
519 (&lock->count, __mutex_lock_interruptible_slowpath); 519 if (likely(!ret)) {
520 if (!ret)
521 mutex_set_owner(lock); 520 mutex_set_owner(lock);
522 521 return 0;
523 return ret; 522 } else
523 return __mutex_lock_interruptible_slowpath(lock);
524} 524}
525 525
526EXPORT_SYMBOL(mutex_lock_interruptible); 526EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -530,12 +530,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
530 int ret; 530 int ret;
531 531
532 might_sleep(); 532 might_sleep();
533 ret = __mutex_fastpath_lock_retval 533 ret = __mutex_fastpath_lock_retval(&lock->count);
534 (&lock->count, __mutex_lock_killable_slowpath); 534 if (likely(!ret)) {
535 if (!ret)
536 mutex_set_owner(lock); 535 mutex_set_owner(lock);
537 536 return 0;
538 return ret; 537 } else
538 return __mutex_lock_killable_slowpath(lock);
539} 539}
540EXPORT_SYMBOL(mutex_lock_killable); 540EXPORT_SYMBOL(mutex_lock_killable);
541 541
@@ -548,18 +548,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
548} 548}
549 549
550static noinline int __sched 550static noinline int __sched
551__mutex_lock_killable_slowpath(atomic_t *lock_count) 551__mutex_lock_killable_slowpath(struct mutex *lock)
552{ 552{
553 struct mutex *lock = container_of(lock_count, struct mutex, count);
554
555 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 553 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
556} 554}
557 555
558static noinline int __sched 556static noinline int __sched
559__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 557__mutex_lock_interruptible_slowpath(struct mutex *lock)
560{ 558{
561 struct mutex *lock = container_of(lock_count, struct mutex, count);
562
563 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 559 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
564} 560}
565#endif 561#endif