diff options
Diffstat (limited to 'arch/powerpc/include/asm/mutex.h')
-rw-r--r-- | arch/powerpc/include/asm/mutex.h | 135 |
1 files changed, 130 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h index 458c1f7fbc18..dabc01c727b8 100644 --- a/arch/powerpc/include/asm/mutex.h +++ b/arch/powerpc/include/asm/mutex.h | |||
@@ -1,9 +1,134 @@ | |||
1 | /* | 1 | /* |
2 | * Pull in the generic implementation for the mutex fastpath. | 2 | * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm |
3 | */ | ||
4 | #ifndef _ASM_POWERPC_MUTEX_H | ||
5 | #define _ASM_POWERPC_MUTEX_H | ||
6 | |||
7 | static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new) | ||
8 | { | ||
9 | int t; | ||
10 | |||
11 | __asm__ __volatile__ ( | ||
12 | "1: lwarx %0,0,%1 # mutex trylock\n\ | ||
13 | cmpw 0,%0,%2\n\ | ||
14 | bne- 2f\n" | ||
15 | PPC405_ERR77(0,%1) | ||
16 | " stwcx. %3,0,%1\n\ | ||
17 | bne- 1b" | ||
18 | ISYNC_ON_SMP | ||
19 | "\n\ | ||
20 | 2:" | ||
21 | : "=&r" (t) | ||
22 | : "r" (&v->counter), "r" (old), "r" (new) | ||
23 | : "cc", "memory"); | ||
24 | |||
25 | return t; | ||
26 | } | ||
27 | |||
28 | static inline int __mutex_dec_return_lock(atomic_t *v) | ||
29 | { | ||
30 | int t; | ||
31 | |||
32 | __asm__ __volatile__( | ||
33 | "1: lwarx %0,0,%1 # mutex lock\n\ | ||
34 | addic %0,%0,-1\n" | ||
35 | PPC405_ERR77(0,%1) | ||
36 | " stwcx. %0,0,%1\n\ | ||
37 | bne- 1b" | ||
38 | ISYNC_ON_SMP | ||
39 | : "=&r" (t) | ||
40 | : "r" (&v->counter) | ||
41 | : "cc", "memory"); | ||
42 | |||
43 | return t; | ||
44 | } | ||
45 | |||
46 | static inline int __mutex_inc_return_unlock(atomic_t *v) | ||
47 | { | ||
48 | int t; | ||
49 | |||
50 | __asm__ __volatile__( | ||
51 | LWSYNC_ON_SMP | ||
52 | "1: lwarx %0,0,%1 # mutex unlock\n\ | ||
53 | addic %0,%0,1\n" | ||
54 | PPC405_ERR77(0,%1) | ||
55 | " stwcx. %0,0,%1 \n\ | ||
56 | bne- 1b" | ||
57 | : "=&r" (t) | ||
58 | : "r" (&v->counter) | ||
59 | : "cc", "memory"); | ||
60 | |||
61 | return t; | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * __mutex_fastpath_lock - try to take the lock by moving the count | ||
66 | * from 1 to a 0 value | ||
67 | * @count: pointer of type atomic_t | ||
68 | * @fail_fn: function to call if the original value was not 1 | ||
69 | * | ||
70 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
71 | * it wasn't 1 originally. This function MUST leave the value lower than | ||
72 | * 1 even when the "1" assertion wasn't true. | ||
73 | */ | ||
74 | static inline void | ||
75 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
76 | { | ||
77 | if (unlikely(__mutex_dec_return_lock(count) < 0)) | ||
78 | fail_fn(count); | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | ||
83 | * from 1 to a 0 value | ||
84 | * @count: pointer of type atomic_t | ||
85 | * @fail_fn: function to call if the original value was not 1 | ||
86 | * | ||
87 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
88 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | ||
89 | * or anything the slow path function returns. | ||
90 | */ | ||
91 | static inline int | ||
92 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
93 | { | ||
94 | if (unlikely(__mutex_dec_return_lock(count) < 0)) | ||
95 | return fail_fn(count); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * __mutex_fastpath_unlock - try to promote the count from 0 to 1 | ||
101 | * @count: pointer of type atomic_t | ||
102 | * @fail_fn: function to call if the original value was not 0 | ||
103 | * | ||
104 | * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. | ||
105 | * In the failure case, this function is allowed to either set the value to | ||
106 | * 1, or to set it to a value lower than 1. | ||
107 | */ | ||
108 | static inline void | ||
109 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
110 | { | ||
111 | if (unlikely(__mutex_inc_return_unlock(count) <= 0)) | ||
112 | fail_fn(count); | ||
113 | } | ||
114 | |||
115 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
116 | |||
117 | /** | ||
118 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | ||
119 | * | ||
120 | * @count: pointer of type atomic_t | ||
121 | * @fail_fn: fallback function | ||
3 | * | 122 | * |
4 | * TODO: implement optimized primitives instead, or leave the generic | 123 | * Change the count from 1 to 0, and return 1 (success), or if the count |
5 | * implementation in place, or pick the atomic_xchg() based generic | 124 | * was not 1, then return 0 (failure). |
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | ||
7 | */ | 125 | */ |
126 | static inline int | ||
127 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
128 | { | ||
129 | if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1)) | ||
130 | return 1; | ||
131 | return 0; | ||
132 | } | ||
8 | 133 | ||
9 | #include <asm-generic/mutex-dec.h> | 134 | #endif |