diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-12-07 21:25:50 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-12-22 04:43:52 -0500 |
commit | 77ba93a7ac5fb0d9338bffbf97c787b8efe00806 (patch) | |
tree | 5446453de9b7317e0c5ca255e13e553843c30093 /arch/sh | |
parent | c6f17cb2272121475c87592560534b157b17544e (diff) |
sh: Fix up the SH-4A mutex fastpath semantics.
This fixes up the __mutex_fastpath_xxx() routines to match the semantics
noted in the comment. Previously these were looping rather than doing a
single-pass, which is counter-intuitive, as the slow path takes care of
the looping for us in the event of contention.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/mutex-llsc.h | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h index 7c75af5e734b..a91990c6e8e5 100644 --- a/arch/sh/include/asm/mutex-llsc.h +++ b/arch/sh/include/asm/mutex-llsc.h | |||
@@ -21,16 +21,18 @@ | |||
21 | static inline void | 21 | static inline void |
22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
23 | { | 23 | { |
24 | int __res; | 24 | int __ex_flag, __res; |
25 | 25 | ||
26 | __asm__ __volatile__ ( | 26 | __asm__ __volatile__ ( |
27 | "movli.l @%1, %0 \n" | 27 | "movli.l @%2, %0 \n" |
28 | "dt %0 \n" | 28 | "add #-1, %0 \n" |
29 | "movco.l %0, @%1 \n" | 29 | "movco.l %0, @%2 \n" |
30 | : "=&z" (__res) | 30 | "movt %1 \n" |
31 | : "=&z" (__res), "=&r" (__ex_flag) | ||
31 | : "r" (&(count)->counter) | 32 | : "r" (&(count)->counter) |
32 | : "t"); | 33 | : "t"); |
33 | 34 | ||
35 | __res |= !__ex_flag; | ||
34 | if (unlikely(__res != 0)) | 36 | if (unlikely(__res != 0)) |
35 | fail_fn(count); | 37 | fail_fn(count); |
36 | } | 38 | } |
@@ -38,16 +40,18 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
38 | static inline int | 40 | static inline int |
39 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | 41 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
40 | { | 42 | { |
41 | int __res; | 43 | int __ex_flag, __res; |
42 | 44 | ||
43 | __asm__ __volatile__ ( | 45 | __asm__ __volatile__ ( |
44 | "movli.l @%1, %0 \n" | 46 | "movli.l @%2, %0 \n" |
45 | "dt %0 \n" | 47 | "add #-1, %0 \n" |
46 | "movco.l %0, @%1 \n" | 48 | "movco.l %0, @%2 \n" |
47 | : "=&z" (__res) | 49 | "movt %1 \n" |
50 | : "=&z" (__res), "=&r" (__ex_flag) | ||
48 | : "r" (&(count)->counter) | 51 | : "r" (&(count)->counter) |
49 | : "t"); | 52 | : "t"); |
50 | 53 | ||
54 | __res |= !__ex_flag; | ||
51 | if (unlikely(__res != 0)) | 55 | if (unlikely(__res != 0)) |
52 | __res = fail_fn(count); | 56 | __res = fail_fn(count); |
53 | 57 | ||
@@ -57,18 +61,19 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
57 | static inline void | 61 | static inline void |
58 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 62 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
59 | { | 63 | { |
60 | int __res; | 64 | int __ex_flag, __res; |
61 | 65 | ||
62 | __asm__ __volatile__ ( | 66 | __asm__ __volatile__ ( |
63 | "1: movli.l @%1, %0 \n\t" | 67 | "movli.l @%2, %0 \n\t" |
64 | "add #1, %0 \n\t" | 68 | "add #1, %0 \n\t" |
65 | "movco.l %0, @%1 \n\t" | 69 | "movco.l %0, @%2 \n\t" |
66 | "bf 1b\n\t" | 70 | "movt %1 \n\t" |
67 | : "=&z" (__res) | 71 | : "=&z" (__res), "=&r" (__ex_flag) |
68 | : "r" (&(count)->counter) | 72 | : "r" (&(count)->counter) |
69 | : "t"); | 73 | : "t"); |
70 | 74 | ||
71 | if (unlikely(__res <= 0)) | 75 | __res |= !__ex_flag; |
76 | if (unlikely(__res != 0)) | ||
72 | fail_fn(count); | 77 | fail_fn(count); |
73 | } | 78 | } |
74 | 79 | ||