aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 20:31:38 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 20:31:38 -0500
commit80c0531514516e43ae118ddf38424e06e5c3cb3c (patch)
tree2eef8cf8fdf505b18f83078d1eb41167e98f5b54 /include/asm-arm
parenta457aa6c2bdd743bbbffd3f9e4fdbd8c71f8af1b (diff)
parent11b751ae8c8ca3fa24c85bd5a3e51dd9f95cda17 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/mingo/mutex-2.6
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/atomic.h2
-rw-r--r--include/asm-arm/mutex.h128
2 files changed, 130 insertions, 0 deletions
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index f72b63309bc5..3d7283d84405 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -175,6 +175,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
175 175
176#endif /* __LINUX_ARM_ARCH__ */ 176#endif /* __LINUX_ARM_ARCH__ */
177 177
178#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
179
178static inline int atomic_add_unless(atomic_t *v, int a, int u) 180static inline int atomic_add_unless(atomic_t *v, int a, int u)
179{ 181{
180 int c, old; 182 int c, old;
diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h
new file mode 100644
index 000000000000..6caa59f1f595
--- /dev/null
+++ b/include/asm-arm/mutex.h
@@ -0,0 +1,128 @@
1/*
2 * include/asm-arm/mutex.h
3 *
4 * ARM optimized mutex locking primitives
5 *
6 * Please look into asm-generic/mutex-xchg.h for a formal definition.
7 */
8#ifndef _ASM_MUTEX_H
9#define _ASM_MUTEX_H
10
11#if __LINUX_ARM_ARCH__ < 6
12/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
13# include <asm-generic/mutex-xchg.h>
14#else
15
16/*
17 * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
18 * atomic decrement (it is not a reliable atomic decrement but it satisfies
19 * the defined semantics for our purpose, while being smaller and faster
20 * than a real atomic decrement or atomic swap. The idea is to attempt
21 * decrementing the lock value only once. If once decremented it isn't zero,
22 * or if its store-back fails due to a dispute on the exclusive store, we
23 * simply bail out immediately through the slow path where the lock will be
24 * reattempted until it succeeds.
25 */
26#define __mutex_fastpath_lock(count, fail_fn) \
27do { \
28 int __ex_flag, __res; \
29 \
30 typecheck(atomic_t *, count); \
31 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
32 \
33 __asm__ ( \
34 "ldrex %0, [%2] \n" \
35 "sub %0, %0, #1 \n" \
36 "strex %1, %0, [%2] \n" \
37 \
38 : "=&r" (__res), "=&r" (__ex_flag) \
39 : "r" (&(count)->counter) \
40 : "cc","memory" ); \
41 \
42 if (unlikely(__res || __ex_flag)) \
43 fail_fn(count); \
44} while (0)
45
46#define __mutex_fastpath_lock_retval(count, fail_fn) \
47({ \
48 int __ex_flag, __res; \
49 \
50 typecheck(atomic_t *, count); \
51 typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \
52 \
53 __asm__ ( \
54 "ldrex %0, [%2] \n" \
55 "sub %0, %0, #1 \n" \
56 "strex %1, %0, [%2] \n" \
57 \
58 : "=&r" (__res), "=&r" (__ex_flag) \
59 : "r" (&(count)->counter) \
60 : "cc","memory" ); \
61 \
62 __res |= __ex_flag; \
63 if (unlikely(__res != 0)) \
64 __res = fail_fn(count); \
65 __res; \
66})
67
68/*
69 * Same trick is used for the unlock fast path. However the original value,
70 * rather than the result, is used to test for success in order to have
71 * better generated assembly.
72 */
73#define __mutex_fastpath_unlock(count, fail_fn) \
74do { \
75 int __ex_flag, __res, __orig; \
76 \
77 typecheck(atomic_t *, count); \
78 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
79 \
80 __asm__ ( \
81 "ldrex %0, [%3] \n" \
82 "add %1, %0, #1 \n" \
83 "strex %2, %1, [%3] \n" \
84 \
85 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \
86 : "r" (&(count)->counter) \
87 : "cc","memory" ); \
88 \
89 if (unlikely(__orig || __ex_flag)) \
90 fail_fn(count); \
91} while (0)
92
93/*
94 * If the unlock was done on a contended lock, or if the unlock simply fails
95 * then the mutex remains locked.
96 */
97#define __mutex_slowpath_needs_to_unlock() 1
98
99/*
100 * For __mutex_fastpath_trylock we use another construct which could be
101 * described as a "single value cmpxchg".
102 *
103 * This provides the needed trylock semantics like cmpxchg would, but it is
104 * lighter and less generic than a true cmpxchg implementation.
105 */
106static inline int
107__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
108{
109 int __ex_flag, __res, __orig;
110
111 __asm__ (
112
113 "1: ldrex %0, [%3] \n"
114 "subs %1, %0, #1 \n"
115 "strexeq %2, %1, [%3] \n"
116 "movlt %0, #0 \n"
117 "cmpeq %2, #0 \n"
118 "bgt 1b \n"
119
120 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
121 : "r" (&count->counter)
122 : "cc", "memory" );
123
124 return __orig;
125}
126
127#endif
128#endif