diff options
Diffstat (limited to 'arch/mn10300')
-rw-r--r-- | arch/mn10300/include/asm/atomic.h | 125 |
1 files changed, 42 insertions, 83 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index cadeb1e2cdfc..5be655e83e70 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h | |||
@@ -33,7 +33,6 @@ | |||
33 | * @v: pointer of type atomic_t | 33 | * @v: pointer of type atomic_t |
34 | * | 34 | * |
35 | * Atomically reads the value of @v. Note that the guaranteed | 35 | * Atomically reads the value of @v. Note that the guaranteed |
36 | * useful range of an atomic_t is only 24 bits. | ||
37 | */ | 36 | */ |
38 | #define atomic_read(v) (ACCESS_ONCE((v)->counter)) | 37 | #define atomic_read(v) (ACCESS_ONCE((v)->counter)) |
39 | 38 | ||
@@ -43,102 +42,62 @@ | |||
43 | * @i: required value | 42 | * @i: required value |
44 | * | 43 | * |
45 | * Atomically sets the value of @v to @i. Note that the guaranteed | 44 | * Atomically sets the value of @v to @i. Note that the guaranteed |
46 | * useful range of an atomic_t is only 24 bits. | ||
47 | */ | 45 | */ |
48 | #define atomic_set(v, i) (((v)->counter) = (i)) | 46 | #define atomic_set(v, i) (((v)->counter) = (i)) |
49 | 47 | ||
50 | /** | 48 | #define ATOMIC_OP(op) \ |
51 | * atomic_add_return - add integer to atomic variable | 49 | static inline void atomic_##op(int i, atomic_t *v) \ |
52 | * @i: integer value to add | 50 | { \ |
53 | * @v: pointer of type atomic_t | 51 | int retval, status; \ |
54 | * | 52 | \ |
55 | * Atomically adds @i to @v and returns the result | 53 | asm volatile( \ |
56 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | 54 | "1: mov %4,(_AAR,%3) \n" \ |
57 | */ | 55 | " mov (_ADR,%3),%1 \n" \ |
58 | static inline int atomic_add_return(int i, atomic_t *v) | 56 | " " #op " %5,%1 \n" \ |
59 | { | 57 | " mov %1,(_ADR,%3) \n" \ |
60 | int retval; | 58 | " mov (_ADR,%3),%0 \n" /* flush */ \ |
61 | #ifdef CONFIG_SMP | 59 | " mov (_ASR,%3),%0 \n" \ |
62 | int status; | 60 | " or %0,%0 \n" \ |
63 | 61 | " bne 1b \n" \ | |
64 | asm volatile( | 62 | : "=&r"(status), "=&r"(retval), "=m"(v->counter) \ |
65 | "1: mov %4,(_AAR,%3) \n" | 63 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \ |
66 | " mov (_ADR,%3),%1 \n" | 64 | : "memory", "cc"); \ |
67 | " add %5,%1 \n" | 65 | } |
68 | " mov %1,(_ADR,%3) \n" | ||
69 | " mov (_ADR,%3),%0 \n" /* flush */ | ||
70 | " mov (_ASR,%3),%0 \n" | ||
71 | " or %0,%0 \n" | ||
72 | " bne 1b \n" | ||
73 | : "=&r"(status), "=&r"(retval), "=m"(v->counter) | ||
74 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) | ||
75 | : "memory", "cc"); | ||
76 | |||
77 | #else | ||
78 | unsigned long flags; | ||
79 | 66 | ||
80 | flags = arch_local_cli_save(); | 67 | #define ATOMIC_OP_RETURN(op) \ |
81 | retval = v->counter; | 68 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
82 | retval += i; | 69 | { \ |
83 | v->counter = retval; | 70 | int retval, status; \ |
84 | arch_local_irq_restore(flags); | 71 | \ |
85 | #endif | 72 | asm volatile( \ |
86 | return retval; | 73 | "1: mov %4,(_AAR,%3) \n" \ |
74 | " mov (_ADR,%3),%1 \n" \ | ||
75 | " " #op " %5,%1 \n" \ | ||
76 | " mov %1,(_ADR,%3) \n" \ | ||
77 | " mov (_ADR,%3),%0 \n" /* flush */ \ | ||
78 | " mov (_ASR,%3),%0 \n" \ | ||
79 | " or %0,%0 \n" \ | ||
80 | " bne 1b \n" \ | ||
81 | : "=&r"(status), "=&r"(retval), "=m"(v->counter) \ | ||
82 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \ | ||
83 | : "memory", "cc"); \ | ||
84 | return retval; \ | ||
87 | } | 85 | } |
88 | 86 | ||
89 | /** | 87 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) |
90 | * atomic_sub_return - subtract integer from atomic variable | ||
91 | * @i: integer value to subtract | ||
92 | * @v: pointer of type atomic_t | ||
93 | * | ||
94 | * Atomically subtracts @i from @v and returns the result | ||
95 | * Note that the guaranteed useful range of an atomic_t is only 24 bits. | ||
96 | */ | ||
97 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
98 | { | ||
99 | int retval; | ||
100 | #ifdef CONFIG_SMP | ||
101 | int status; | ||
102 | 88 | ||
103 | asm volatile( | 89 | ATOMIC_OPS(add) |
104 | "1: mov %4,(_AAR,%3) \n" | 90 | ATOMIC_OPS(sub) |
105 | " mov (_ADR,%3),%1 \n" | ||
106 | " sub %5,%1 \n" | ||
107 | " mov %1,(_ADR,%3) \n" | ||
108 | " mov (_ADR,%3),%0 \n" /* flush */ | ||
109 | " mov (_ASR,%3),%0 \n" | ||
110 | " or %0,%0 \n" | ||
111 | " bne 1b \n" | ||
112 | : "=&r"(status), "=&r"(retval), "=m"(v->counter) | ||
113 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) | ||
114 | : "memory", "cc"); | ||
115 | 91 | ||
116 | #else | 92 | #undef ATOMIC_OPS |
117 | unsigned long flags; | 93 | #undef ATOMIC_OP_RETURN |
118 | flags = arch_local_cli_save(); | 94 | #undef ATOMIC_OP |
119 | retval = v->counter; | ||
120 | retval -= i; | ||
121 | v->counter = retval; | ||
122 | arch_local_irq_restore(flags); | ||
123 | #endif | ||
124 | return retval; | ||
125 | } | ||
126 | 95 | ||
127 | static inline int atomic_add_negative(int i, atomic_t *v) | 96 | static inline int atomic_add_negative(int i, atomic_t *v) |
128 | { | 97 | { |
129 | return atomic_add_return(i, v) < 0; | 98 | return atomic_add_return(i, v) < 0; |
130 | } | 99 | } |
131 | 100 | ||
132 | static inline void atomic_add(int i, atomic_t *v) | ||
133 | { | ||
134 | atomic_add_return(i, v); | ||
135 | } | ||
136 | |||
137 | static inline void atomic_sub(int i, atomic_t *v) | ||
138 | { | ||
139 | atomic_sub_return(i, v); | ||
140 | } | ||
141 | |||
142 | static inline void atomic_inc(atomic_t *v) | 101 | static inline void atomic_inc(atomic_t *v) |
143 | { | 102 | { |
144 | atomic_add_return(1, v); | 103 | atomic_add_return(1, v); |