diff options
author | Joe Perches <joe@perches.com> | 2008-03-23 04:02:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:25 -0400 |
commit | b2347fad517f61553e03135db60def2392d9c2bc (patch) | |
tree | 8a3dadd51ec75850f0c30d2b1972135a9a12c967 /include | |
parent | 9969b4405469e12070c560ff27dbe587470fc945 (diff) |
include/asm-x86/mutex_32.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/mutex_32.h | 64 |
1 files changed, 30 insertions, 34 deletions
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index 9a6b3da25914..73e928ef5f03 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h | |||
@@ -21,22 +21,20 @@ | |||
21 | * wasn't 1 originally. This function MUST leave the value lower than 1 | 21 | * wasn't 1 originally. This function MUST leave the value lower than 1 |
22 | * even when the "1" assertion wasn't true. | 22 | * even when the "1" assertion wasn't true. |
23 | */ | 23 | */ |
24 | #define __mutex_fastpath_lock(count, fail_fn) \ | 24 | #define __mutex_fastpath_lock(count, fail_fn) \ |
25 | do { \ | 25 | do { \ |
26 | unsigned int dummy; \ | 26 | unsigned int dummy; \ |
27 | \ | 27 | \ |
28 | typecheck(atomic_t *, count); \ | 28 | typecheck(atomic_t *, count); \ |
29 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 29 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
30 | \ | 30 | \ |
31 | __asm__ __volatile__( \ | 31 | asm volatile(LOCK_PREFIX " decl (%%eax)\n" \ |
32 | LOCK_PREFIX " decl (%%eax) \n" \ | 32 | " jns 1f \n" \ |
33 | " jns 1f \n" \ | 33 | " call " #fail_fn "\n" \ |
34 | " call "#fail_fn" \n" \ | 34 | "1:\n" \ |
35 | "1: \n" \ | 35 | : "=a" (dummy) \ |
36 | \ | 36 | : "a" (count) \ |
37 | :"=a" (dummy) \ | 37 | : "memory", "ecx", "edx"); \ |
38 | : "a" (count) \ | ||
39 | : "memory", "ecx", "edx"); \ | ||
40 | } while (0) | 38 | } while (0) |
41 | 39 | ||
42 | 40 | ||
@@ -50,8 +48,8 @@ do { \ | |||
50 | * wasn't 1 originally. This function returns 0 if the fastpath succeeds, | 48 | * wasn't 1 originally. This function returns 0 if the fastpath succeeds, |
51 | * or anything the slow path function returns | 49 | * or anything the slow path function returns |
52 | */ | 50 | */ |
53 | static inline int | 51 | static inline int __mutex_fastpath_lock_retval(atomic_t *count, |
54 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | 52 | int (*fail_fn)(atomic_t *)) |
55 | { | 53 | { |
56 | if (unlikely(atomic_dec_return(count) < 0)) | 54 | if (unlikely(atomic_dec_return(count) < 0)) |
57 | return fail_fn(count); | 55 | return fail_fn(count); |
@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
72 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | 70 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs |
73 | * to return 0 otherwise. | 71 | * to return 0 otherwise. |
74 | */ | 72 | */ |
75 | #define __mutex_fastpath_unlock(count, fail_fn) \ | 73 | #define __mutex_fastpath_unlock(count, fail_fn) \ |
76 | do { \ | 74 | do { \ |
77 | unsigned int dummy; \ | 75 | unsigned int dummy; \ |
78 | \ | 76 | \ |
79 | typecheck(atomic_t *, count); \ | 77 | typecheck(atomic_t *, count); \ |
80 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 78 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
81 | \ | 79 | \ |
82 | __asm__ __volatile__( \ | 80 | asm volatile(LOCK_PREFIX " incl (%%eax)\n" \ |
83 | LOCK_PREFIX " incl (%%eax) \n" \ | 81 | " jg 1f\n" \ |
84 | " jg 1f \n" \ | 82 | " call " #fail_fn "\n" \ |
85 | " call "#fail_fn" \n" \ | 83 | "1:\n" \ |
86 | "1: \n" \ | 84 | : "=a" (dummy) \ |
87 | \ | 85 | : "a" (count) \ |
88 | :"=a" (dummy) \ | 86 | : "memory", "ecx", "edx"); \ |
89 | : "a" (count) \ | ||
90 | : "memory", "ecx", "edx"); \ | ||
91 | } while (0) | 87 | } while (0) |
92 | 88 | ||
93 | #define __mutex_slowpath_needs_to_unlock() 1 | 89 | #define __mutex_slowpath_needs_to_unlock() 1 |
@@ -104,8 +100,8 @@ do { \ | |||
104 | * Additionally, if the value was < 0 originally, this function must not leave | 100 | * Additionally, if the value was < 0 originally, this function must not leave |
105 | * it to 0 on failure. | 101 | * it to 0 on failure. |
106 | */ | 102 | */ |
107 | static inline int | 103 | static inline int __mutex_fastpath_trylock(atomic_t *count, |
108 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 104 | int (*fail_fn)(atomic_t *)) |
109 | { | 105 | { |
110 | /* | 106 | /* |
111 | * We have two variants here. The cmpxchg based one is the best one | 107 | * We have two variants here. The cmpxchg based one is the best one |