aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2008-03-23 04:02:54 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:25 -0400
commit2c4e8830414de84cc969a1f9685f4c48b91ca6e7 (patch)
tree60d09c3a1f5f2b471cf3ae845a3e4d29e8bd20da
parentb2347fad517f61553e03135db60def2392d9c2bc (diff)
include/asm-x86/mutex_64.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/asm-x86/mutex_64.h73
1 files changed, 34 insertions, 39 deletions
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index 6c2949a3c677..f3fae9becb38 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -16,23 +16,21 @@
16 * 16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative. 17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */ 18 */
19#define __mutex_fastpath_lock(v, fail_fn) \ 19#define __mutex_fastpath_lock(v, fail_fn) \
20do { \ 20do { \
21 unsigned long dummy; \ 21 unsigned long dummy; \
22 \ 22 \
23 typecheck(atomic_t *, v); \ 23 typecheck(atomic_t *, v); \
24 typecheck_fn(void (*)(atomic_t *), fail_fn); \ 24 typecheck_fn(void (*)(atomic_t *), fail_fn); \
25 \ 25 \
26 __asm__ __volatile__( \ 26 asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
27 LOCK_PREFIX " decl (%%rdi) \n" \ 27 " jns 1f \n" \
28 " jns 1f \n" \ 28 " call " #fail_fn "\n" \
29 " call "#fail_fn" \n" \ 29 "1:" \
30 "1:" \ 30 : "=D" (dummy) \
31 \ 31 : "D" (v) \
32 :"=D" (dummy) \ 32 : "rax", "rsi", "rdx", "rcx", \
33 : "D" (v) \ 33 "r8", "r9", "r10", "r11", "memory"); \
34 : "rax", "rsi", "rdx", "rcx", \
35 "r8", "r9", "r10", "r11", "memory"); \
36} while (0) 34} while (0)
37 35
38/** 36/**
@@ -45,9 +43,8 @@ do { \
45 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 43 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
46 * or anything the slow path function returns 44 * or anything the slow path function returns
47 */ 45 */
48static inline int 46static inline int __mutex_fastpath_lock_retval(atomic_t *count,
49__mutex_fastpath_lock_retval(atomic_t *count, 47 int (*fail_fn)(atomic_t *))
50 int (*fail_fn)(atomic_t *))
51{ 48{
52 if (unlikely(atomic_dec_return(count) < 0)) 49 if (unlikely(atomic_dec_return(count) < 0))
53 return fail_fn(count); 50 return fail_fn(count);
@@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count,
62 * 59 *
63 * Atomically increments @v and calls <fail_fn> if the result is nonpositive. 60 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
64 */ 61 */
65#define __mutex_fastpath_unlock(v, fail_fn) \ 62#define __mutex_fastpath_unlock(v, fail_fn) \
66do { \ 63do { \
67 unsigned long dummy; \ 64 unsigned long dummy; \
68 \ 65 \
69 typecheck(atomic_t *, v); \ 66 typecheck(atomic_t *, v); \
70 typecheck_fn(void (*)(atomic_t *), fail_fn); \ 67 typecheck_fn(void (*)(atomic_t *), fail_fn); \
71 \ 68 \
72 __asm__ __volatile__( \ 69 asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
73 LOCK_PREFIX " incl (%%rdi) \n" \ 70 " jg 1f\n" \
74 " jg 1f \n" \ 71 " call " #fail_fn "\n" \
75 " call "#fail_fn" \n" \ 72 "1:" \
76 "1: " \ 73 : "=D" (dummy) \
77 \ 74 : "D" (v) \
78 :"=D" (dummy) \ 75 : "rax", "rsi", "rdx", "rcx", \
79 : "D" (v) \ 76 "r8", "r9", "r10", "r11", "memory"); \
80 : "rax", "rsi", "rdx", "rcx", \
81 "r8", "r9", "r10", "r11", "memory"); \
82} while (0) 77} while (0)
83 78
84#define __mutex_slowpath_needs_to_unlock() 1 79#define __mutex_slowpath_needs_to_unlock() 1
@@ -93,8 +88,8 @@ do { \
93 * if it wasn't 1 originally. [the fallback function is never used on 88 * if it wasn't 1 originally. [the fallback function is never used on
94 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] 89 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
95 */ 90 */
96static inline int 91static inline int __mutex_fastpath_trylock(atomic_t *count,
97__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 92 int (*fail_fn)(atomic_t *))
98{ 93{
99 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) 94 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
100 return 1; 95 return 1;