aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/local.h13
-rw-r--r--include/asm-generic/mutex-dec.h30
-rw-r--r--include/asm-generic/mutex-xchg.h33
3 files changed, 43 insertions, 33 deletions
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index de4614840c2c..9291c24f5819 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -7,8 +7,15 @@
7#include <asm/atomic.h> 7#include <asm/atomic.h>
8#include <asm/types.h> 8#include <asm/types.h>
9 9
10/* An unsigned long type for operations which are atomic for a single 10/*
11 * CPU. Usually used in combination with per-cpu variables. */ 11 * A signed long type for operations which are atomic for a single CPU.
12 * Usually used in combination with per-cpu variables.
13 *
14 * This is the default implementation, which uses atomic_long_t. Which is
15 * rather pointless. The whole point behind local_t is that some processors
16 * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
17 * running on this CPU. local_t allows exploitation of such capabilities.
18 */
12 19
13/* Implement in terms of atomics. */ 20/* Implement in terms of atomics. */
14 21
@@ -20,7 +27,7 @@ typedef struct
20 27
21#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } 28#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
22 29
23#define local_read(l) ((unsigned long)atomic_long_read(&(l)->a)) 30#define local_read(l) atomic_long_read(&(l)->a)
24#define local_set(l,i) atomic_long_set((&(l)->a),(i)) 31#define local_set(l,i) atomic_long_set((&(l)->a),(i))
25#define local_inc(l) atomic_long_inc(&(l)->a) 32#define local_inc(l) atomic_long_inc(&(l)->a)
26#define local_dec(l) atomic_long_dec(&(l)->a) 33#define local_dec(l) atomic_long_dec(&(l)->a)
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index 40c6d1f86598..29c6ac34e236 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -17,13 +17,14 @@
17 * it wasn't 1 originally. This function MUST leave the value lower than 17 * it wasn't 1 originally. This function MUST leave the value lower than
18 * 1 even when the "1" assertion wasn't true. 18 * 1 even when the "1" assertion wasn't true.
19 */ 19 */
20#define __mutex_fastpath_lock(count, fail_fn) \ 20static inline void
21do { \ 21__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
22 if (unlikely(atomic_dec_return(count) < 0)) \ 22{
23 fail_fn(count); \ 23 if (unlikely(atomic_dec_return(count) < 0))
24 else \ 24 fail_fn(count);
25 smp_mb(); \ 25 else
26} while (0) 26 smp_mb();
27}
27 28
28/** 29/**
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 30 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -36,7 +37,7 @@ do { \
36 * or anything the slow path function returns. 37 * or anything the slow path function returns.
37 */ 38 */
38static inline int 39static inline int
39__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 40__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
40{ 41{
41 if (unlikely(atomic_dec_return(count) < 0)) 42 if (unlikely(atomic_dec_return(count) < 0))
42 return fail_fn(count); 43 return fail_fn(count);
@@ -59,12 +60,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
59 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs 60 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
60 * to return 0 otherwise. 61 * to return 0 otherwise.
61 */ 62 */
62#define __mutex_fastpath_unlock(count, fail_fn) \ 63static inline void
63do { \ 64__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
64 smp_mb(); \ 65{
65 if (unlikely(atomic_inc_return(count) <= 0)) \ 66 smp_mb();
66 fail_fn(count); \ 67 if (unlikely(atomic_inc_return(count) <= 0))
67} while (0) 68 fail_fn(count);
69}
68 70
69#define __mutex_slowpath_needs_to_unlock() 1 71#define __mutex_slowpath_needs_to_unlock() 1
70 72
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 1d24f47e6c48..32a2100c1aeb 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Generic implementation of the mutex fastpath, based on xchg(). 4 * Generic implementation of the mutex fastpath, based on xchg().
5 * 5 *
6 * NOTE: An xchg based implementation is less optimal than an atomic 6 * NOTE: An xchg based implementation might be less optimal than an atomic
7 * decrement/increment based implementation. If your architecture 7 * decrement/increment based implementation. If your architecture
8 * has a reasonable atomic dec/inc then you should probably use 8 * has a reasonable atomic dec/inc then you should probably use
9 * asm-generic/mutex-dec.h instead, or you could open-code an 9 * asm-generic/mutex-dec.h instead, or you could open-code an
@@ -22,14 +22,14 @@
22 * wasn't 1 originally. This function MUST leave the value lower than 1 22 * wasn't 1 originally. This function MUST leave the value lower than 1
23 * even when the "1" assertion wasn't true. 23 * even when the "1" assertion wasn't true.
24 */ 24 */
25#define __mutex_fastpath_lock(count, fail_fn) \ 25static inline void
26do { \ 26__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
27 if (unlikely(atomic_xchg(count, 0) != 1)) \ 27{
28 fail_fn(count); \ 28 if (unlikely(atomic_xchg(count, 0) != 1))
29 else \ 29 fail_fn(count);
30 smp_mb(); \ 30 else
31} while (0) 31 smp_mb();
32 32}
33 33
34/** 34/**
35 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 35 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -42,7 +42,7 @@ do { \
42 * or anything the slow path function returns 42 * or anything the slow path function returns
43 */ 43 */
44static inline int 44static inline int
45__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 45__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
46{ 46{
47 if (unlikely(atomic_xchg(count, 0) != 1)) 47 if (unlikely(atomic_xchg(count, 0) != 1))
48 return fail_fn(count); 48 return fail_fn(count);
@@ -64,12 +64,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
64 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs 64 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
65 * to return 0 otherwise. 65 * to return 0 otherwise.
66 */ 66 */
67#define __mutex_fastpath_unlock(count, fail_fn) \ 67static inline void
68do { \ 68__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
69 smp_mb(); \ 69{
70 if (unlikely(atomic_xchg(count, 1) != 0)) \ 70 smp_mb();
71 fail_fn(count); \ 71 if (unlikely(atomic_xchg(count, 1) != 0))
72} while (0) 72 fail_fn(count);
73}
73 74
74#define __mutex_slowpath_needs_to_unlock() 0 75#define __mutex_slowpath_needs_to_unlock() 0
75 76