diff options
author | Takashi Yoshii <yoshii.takashi@renesas.com> | 2009-01-28 04:29:13 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-01-28 21:56:03 -0500 |
commit | c20f326a62c046ee958c3aa584f183201adb229f (patch) | |
tree | 1e8c72e5fd824d4182e14a21410c0e714e080dfe | |
parent | 03f07876df2565321871a2dbf33c5c737df185df (diff) |
sh: Fix up T-bit error handling in SH-4A mutex fastpath.
This corrects a deadlock encountered on ap325 in the cases where the
mutex is contended and the slow-path needs to be fallen back upon.
Signed-off-by: Takashi YOSHII <yoshii.takashi@renesas.com>
Signed-off-by: Kuninori Morimoto <morimoto.kuninori@renesas.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/include/asm/mutex-llsc.h | 21 |
1 files changed, 9 insertions, 12 deletions
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h index ee839ee58ac8..090358a7e1bb 100644 --- a/arch/sh/include/asm/mutex-llsc.h +++ b/arch/sh/include/asm/mutex-llsc.h | |||
@@ -21,38 +21,36 @@ | |||
21 | static inline void | 21 | static inline void |
22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
23 | { | 23 | { |
24 | int __ex_flag, __res; | 24 | int __done, __res; |
25 | 25 | ||
26 | __asm__ __volatile__ ( | 26 | __asm__ __volatile__ ( |
27 | "movli.l @%2, %0 \n" | 27 | "movli.l @%2, %0 \n" |
28 | "add #-1, %0 \n" | 28 | "add #-1, %0 \n" |
29 | "movco.l %0, @%2 \n" | 29 | "movco.l %0, @%2 \n" |
30 | "movt %1 \n" | 30 | "movt %1 \n" |
31 | : "=&z" (__res), "=&r" (__ex_flag) | 31 | : "=&z" (__res), "=&r" (__done) |
32 | : "r" (&(count)->counter) | 32 | : "r" (&(count)->counter) |
33 | : "t"); | 33 | : "t"); |
34 | 34 | ||
35 | __res |= !__ex_flag; | 35 | if (unlikely(!__done || __res != 0)) |
36 | if (unlikely(__res != 0)) | ||
37 | fail_fn(count); | 36 | fail_fn(count); |
38 | } | 37 | } |
39 | 38 | ||
40 | static inline int | 39 | static inline int |
41 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | 40 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
42 | { | 41 | { |
43 | int __ex_flag, __res; | 42 | int __done, __res; |
44 | 43 | ||
45 | __asm__ __volatile__ ( | 44 | __asm__ __volatile__ ( |
46 | "movli.l @%2, %0 \n" | 45 | "movli.l @%2, %0 \n" |
47 | "add #-1, %0 \n" | 46 | "add #-1, %0 \n" |
48 | "movco.l %0, @%2 \n" | 47 | "movco.l %0, @%2 \n" |
49 | "movt %1 \n" | 48 | "movt %1 \n" |
50 | : "=&z" (__res), "=&r" (__ex_flag) | 49 | : "=&z" (__res), "=&r" (__done) |
51 | : "r" (&(count)->counter) | 50 | : "r" (&(count)->counter) |
52 | : "t"); | 51 | : "t"); |
53 | 52 | ||
54 | __res |= !__ex_flag; | 53 | if (unlikely(!__done || __res != 0)) |
55 | if (unlikely(__res != 0)) | ||
56 | __res = fail_fn(count); | 54 | __res = fail_fn(count); |
57 | 55 | ||
58 | return __res; | 56 | return __res; |
@@ -61,19 +59,18 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
61 | static inline void | 59 | static inline void |
62 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 60 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
63 | { | 61 | { |
64 | int __ex_flag, __res; | 62 | int __done, __res; |
65 | 63 | ||
66 | __asm__ __volatile__ ( | 64 | __asm__ __volatile__ ( |
67 | "movli.l @%2, %0 \n\t" | 65 | "movli.l @%2, %0 \n\t" |
68 | "add #1, %0 \n\t" | 66 | "add #1, %0 \n\t" |
69 | "movco.l %0, @%2 \n\t" | 67 | "movco.l %0, @%2 \n\t" |
70 | "movt %1 \n\t" | 68 | "movt %1 \n\t" |
71 | : "=&z" (__res), "=&r" (__ex_flag) | 69 | : "=&z" (__res), "=&r" (__done) |
72 | : "r" (&(count)->counter) | 70 | : "r" (&(count)->counter) |
73 | : "t"); | 71 | : "t"); |
74 | 72 | ||
75 | __res |= !__ex_flag; | 73 | if (unlikely(!__done || __res <= 0)) |
76 | if (unlikely(__res <= 0)) | ||
77 | fail_fn(count); | 74 | fail_fn(count); |
78 | } | 75 | } |
79 | 76 | ||