aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm/mutex.h
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2006-02-08 16:19:38 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-02-08 16:19:38 -0500
commit365bf8ac6f5b3d3187cb39444fa87a5b38683ff4 (patch)
tree6b0e8234ca75d6b5b155c652028a0506875099a9 /include/asm-arm/mutex.h
parent5964eae835c3b98c69d338950651f7f414f96477 (diff)
[ARM] 3311/1: clean up include/asm-arm/mutex.h
Patch from Nicolas Pitre Since: if (unlikely(__res || __ex_flag)) produces worse code on ARM than: if (unlikely(__res | __ex_flag)) I therefore made it more explicit: __res |= __ex_flag; if (unlikely(__res != 0)) so it is not seen as a typo again. Also made everything static inline rather than macros for better readability (both produce the same code after all). And finally added missing \t from multi-line assembly code. Signed-off-by: Nicolas Pitre <nico@cam.org> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include/asm-arm/mutex.h')
-rw-r--r--include/asm-arm/mutex.h131
1 files changed, 65 insertions, 66 deletions
diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h
index 6caa59f1f595..cb29d84e690d 100644
--- a/include/asm-arm/mutex.h
+++ b/include/asm-arm/mutex.h
@@ -23,72 +23,71 @@
23 * simply bail out immediately through the slow path where the lock will be 23 * simply bail out immediately through the slow path where the lock will be
24 * reattempted until it succeeds. 24 * reattempted until it succeeds.
25 */ 25 */
26#define __mutex_fastpath_lock(count, fail_fn) \ 26static inline void
27do { \ 27__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
28 int __ex_flag, __res; \ 28{
29 \ 29 int __ex_flag, __res;
30 typecheck(atomic_t *, count); \ 30
31 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 31 __asm__ (
32 \ 32
33 __asm__ ( \ 33 "ldrex %0, [%2] \n\t"
34 "ldrex %0, [%2] \n" \ 34 "sub %0, %0, #1 \n\t"
35 "sub %0, %0, #1 \n" \ 35 "strex %1, %0, [%2] "
36 "strex %1, %0, [%2] \n" \ 36
37 \ 37 : "=&r" (__res), "=&r" (__ex_flag)
38 : "=&r" (__res), "=&r" (__ex_flag) \ 38 : "r" (&(count)->counter)
39 : "r" (&(count)->counter) \ 39 : "cc","memory" );
40 : "cc","memory" ); \ 40
41 \ 41 __res |= __ex_flag;
42 if (unlikely(__res || __ex_flag)) \ 42 if (unlikely(__res != 0))
43 fail_fn(count); \ 43 fail_fn(count);
44} while (0) 44}
45 45
46#define __mutex_fastpath_lock_retval(count, fail_fn) \ 46static inline int
47({ \ 47__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
48 int __ex_flag, __res; \ 48{
49 \ 49 int __ex_flag, __res;
50 typecheck(atomic_t *, count); \ 50
51 typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \ 51 __asm__ (
52 \ 52
53 __asm__ ( \ 53 "ldrex %0, [%2] \n\t"
54 "ldrex %0, [%2] \n" \ 54 "sub %0, %0, #1 \n\t"
55 "sub %0, %0, #1 \n" \ 55 "strex %1, %0, [%2] "
56 "strex %1, %0, [%2] \n" \ 56
57 \ 57 : "=&r" (__res), "=&r" (__ex_flag)
58 : "=&r" (__res), "=&r" (__ex_flag) \ 58 : "r" (&(count)->counter)
59 : "r" (&(count)->counter) \ 59 : "cc","memory" );
60 : "cc","memory" ); \ 60
61 \ 61 __res |= __ex_flag;
62 __res |= __ex_flag; \ 62 if (unlikely(__res != 0))
63 if (unlikely(__res != 0)) \ 63 __res = fail_fn(count);
64 __res = fail_fn(count); \ 64 return __res;
65 __res; \ 65}
66})
67 66
68/* 67/*
69 * Same trick is used for the unlock fast path. However the original value, 68 * Same trick is used for the unlock fast path. However the original value,
70 * rather than the result, is used to test for success in order to have 69 * rather than the result, is used to test for success in order to have
71 * better generated assembly. 70 * better generated assembly.
72 */ 71 */
73#define __mutex_fastpath_unlock(count, fail_fn) \ 72static inline void
74do { \ 73__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
75 int __ex_flag, __res, __orig; \ 74{
76 \ 75 int __ex_flag, __res, __orig;
77 typecheck(atomic_t *, count); \ 76
78 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 77 __asm__ (
79 \ 78
80 __asm__ ( \ 79 "ldrex %0, [%3] \n\t"
81 "ldrex %0, [%3] \n" \ 80 "add %1, %0, #1 \n\t"
82 "add %1, %0, #1 \n" \ 81 "strex %2, %1, [%3] "
83 "strex %2, %1, [%3] \n" \ 82
84 \ 83 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
85 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \ 84 : "r" (&(count)->counter)
86 : "r" (&(count)->counter) \ 85 : "cc","memory" );
87 : "cc","memory" ); \ 86
88 \ 87 __orig |= __ex_flag;
89 if (unlikely(__orig || __ex_flag)) \ 88 if (unlikely(__orig != 0))
90 fail_fn(count); \ 89 fail_fn(count);
91} while (0) 90}
92 91
93/* 92/*
94 * If the unlock was done on a contended lock, or if the unlock simply fails 93 * If the unlock was done on a contended lock, or if the unlock simply fails
@@ -110,12 +109,12 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
110 109
111 __asm__ ( 110 __asm__ (
112 111
113 "1: ldrex %0, [%3] \n" 112 "1: ldrex %0, [%3] \n\t"
114 "subs %1, %0, #1 \n" 113 "subs %1, %0, #1 \n\t"
115 "strexeq %2, %1, [%3] \n" 114 "strexeq %2, %1, [%3] \n\t"
116 "movlt %0, #0 \n" 115 "movlt %0, #0 \n\t"
117 "cmpeq %2, #0 \n" 116 "cmpeq %2, #0 \n\t"
118 "bgt 1b \n" 117 "bgt 1b "
119 118
120 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) 119 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
121 : "r" (&count->counter) 120 : "r" (&count->counter)