aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/frv/include/asm/atomic.h119
-rw-r--r--arch/frv/include/asm/atomic_defs.h172
-rw-r--r--arch/frv/include/asm/bitops.h99
-rw-r--r--arch/frv/kernel/dma.c6
-rw-r--r--arch/frv/kernel/frv_ksyms.c5
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/atomic-lib.c7
-rw-r--r--arch/frv/lib/atomic-ops.S110
-rw-r--r--arch/frv/lib/atomic64-ops.S94
9 files changed, 259 insertions, 355 deletions
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 102190a61d65..74d22454d7c6 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -15,7 +15,6 @@
15#define _ASM_ATOMIC_H 15#define _ASM_ATOMIC_H
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/spr-regs.h>
19#include <asm/cmpxchg.h> 18#include <asm/cmpxchg.h>
20#include <asm/barrier.h> 19#include <asm/barrier.h>
21 20
@@ -23,6 +22,8 @@
23#error not SMP safe 22#error not SMP safe
24#endif 23#endif
25 24
25#include <asm/atomic_defs.h>
26
26/* 27/*
27 * Atomic operations that C can't guarantee us. Useful for 28 * Atomic operations that C can't guarantee us. Useful for
28 * resource counting etc.. 29 * resource counting etc..
@@ -34,56 +35,26 @@
34#define atomic_read(v) ACCESS_ONCE((v)->counter) 35#define atomic_read(v) ACCESS_ONCE((v)->counter)
35#define atomic_set(v, i) (((v)->counter) = (i)) 36#define atomic_set(v, i) (((v)->counter) = (i))
36 37
37#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 38static inline int atomic_inc_return(atomic_t *v)
38static inline int atomic_add_return(int i, atomic_t *v)
39{ 39{
40 unsigned long val; 40 return __atomic_add_return(1, &v->counter);
41}
41 42
42 asm("0: \n" 43static inline int atomic_dec_return(atomic_t *v)
43 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 44{
44 " ckeq icc3,cc7 \n" 45 return __atomic_sub_return(1, &v->counter);
45 " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ 46}
46 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
47 " add%I2 %1,%2,%1 \n"
48 " cst.p %1,%M0 ,cc3,#1 \n"
49 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
50 " beq icc3,#0,0b \n"
51 : "+U"(v->counter), "=&r"(val)
52 : "NPr"(i)
53 : "memory", "cc7", "cc3", "icc3"
54 );
55 47
56 return val; 48static inline int atomic_add_return(int i, atomic_t *v)
49{
50 return __atomic_add_return(i, &v->counter);
57} 51}
58 52
59static inline int atomic_sub_return(int i, atomic_t *v) 53static inline int atomic_sub_return(int i, atomic_t *v)
60{ 54{
61 unsigned long val; 55 return __atomic_sub_return(i, &v->counter);
62
63 asm("0: \n"
64 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
65 " ckeq icc3,cc7 \n"
66 " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
67 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
68 " sub%I2 %1,%2,%1 \n"
69 " cst.p %1,%M0 ,cc3,#1 \n"
70 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
71 " beq icc3,#0,0b \n"
72 : "+U"(v->counter), "=&r"(val)
73 : "NPr"(i)
74 : "memory", "cc7", "cc3", "icc3"
75 );
76
77 return val;
78} 56}
79 57
80#else
81
82extern int atomic_add_return(int i, atomic_t *v);
83extern int atomic_sub_return(int i, atomic_t *v);
84
85#endif
86
87static inline int atomic_add_negative(int i, atomic_t *v) 58static inline int atomic_add_negative(int i, atomic_t *v)
88{ 59{
89 return atomic_add_return(i, v) < 0; 60 return atomic_add_return(i, v) < 0;
@@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v)
101 72
102static inline void atomic_inc(atomic_t *v) 73static inline void atomic_inc(atomic_t *v)
103{ 74{
104 atomic_add_return(1, v); 75 atomic_inc_return(v);
105} 76}
106 77
107static inline void atomic_dec(atomic_t *v) 78static inline void atomic_dec(atomic_t *v)
108{ 79{
109 atomic_sub_return(1, v); 80 atomic_dec_return(v);
110} 81}
111 82
112#define atomic_dec_return(v) atomic_sub_return(1, (v))
113#define atomic_inc_return(v) atomic_add_return(1, (v))
114
115#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 83#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
116#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 84#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
117#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 85#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
@@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v)
120 * 64-bit atomic ops 88 * 64-bit atomic ops
121 */ 89 */
122typedef struct { 90typedef struct {
123 volatile long long counter; 91 long long counter;
124} atomic64_t; 92} atomic64_t;
125 93
126#define ATOMIC64_INIT(i) { (i) } 94#define ATOMIC64_INIT(i) { (i) }
127 95
128static inline long long atomic64_read(atomic64_t *v) 96static inline long long atomic64_read(const atomic64_t *v)
129{ 97{
130 long long counter; 98 long long counter;
131 99
132 asm("ldd%I1 %M1,%0" 100 asm("ldd%I1 %M1,%0"
133 : "=e"(counter) 101 : "=e"(counter)
134 : "m"(v->counter)); 102 : "m"(v->counter));
103
135 return counter; 104 return counter;
136} 105}
137 106
@@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i)
142 : "e"(i)); 111 : "e"(i));
143} 112}
144 113
145extern long long atomic64_inc_return(atomic64_t *v); 114static inline long long atomic64_inc_return(atomic64_t *v)
146extern long long atomic64_dec_return(atomic64_t *v); 115{
147extern long long atomic64_add_return(long long i, atomic64_t *v); 116 return __atomic64_add_return(1, &v->counter);
148extern long long atomic64_sub_return(long long i, atomic64_t *v); 117}
118
119static inline long long atomic64_dec_return(atomic64_t *v)
120{
121 return __atomic64_sub_return(1, &v->counter);
122}
123
124static inline long long atomic64_add_return(long long i, atomic64_t *v)
125{
126 return __atomic64_add_return(i, &v->counter);
127}
128
129static inline long long atomic64_sub_return(long long i, atomic64_t *v)
130{
131 return __atomic64_sub_return(i, &v->counter);
132}
149 133
150static inline long long atomic64_add_negative(long long i, atomic64_t *v) 134static inline long long atomic64_add_negative(long long i, atomic64_t *v)
151{ 135{
@@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v)
176#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 160#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
177#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 161#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
178 162
163
179#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 164#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
180#define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 165#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
181#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) 166#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
@@ -196,5 +181,33 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
196 return c; 181 return c;
197} 182}
198 183
184#define ATOMIC_OP(op) \
185static inline void atomic_##op(int i, atomic_t *v) \
186{ \
187 (void)__atomic32_fetch_##op(i, &v->counter); \
188} \
189 \
190static inline void atomic64_##op(long long i, atomic64_t *v) \
191{ \
192 (void)__atomic64_fetch_##op(i, &v->counter); \
193}
194
195#define CONFIG_ARCH_HAS_ATOMIC_OR
196
197ATOMIC_OP(or)
198ATOMIC_OP(and)
199ATOMIC_OP(xor)
200
201#undef ATOMIC_OP
202
203static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
204{
205 atomic_and(~mask, v);
206}
207
208static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
209{
210 atomic_or(mask, v);
211}
199 212
200#endif /* _ASM_ATOMIC_H */ 213#endif /* _ASM_ATOMIC_H */
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h
new file mode 100644
index 000000000000..36e126d2f801
--- /dev/null
+++ b/arch/frv/include/asm/atomic_defs.h
@@ -0,0 +1,172 @@
1
2#include <asm/spr-regs.h>
3
4#ifdef __ATOMIC_LIB__
5
6#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
7
8#define ATOMIC_QUALS
9#define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x)
10
11#else /* !OUTOFLINE && LIB */
12
13#define ATOMIC_OP_RETURN(op)
14#define ATOMIC_FETCH_OP(op)
15
16#endif /* OUTOFLINE */
17
18#else /* !__ATOMIC_LIB__ */
19
20#define ATOMIC_EXPORT(x)
21
22#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
23
24#define ATOMIC_OP_RETURN(op) \
25extern int __atomic_##op##_return(int i, int *v); \
26extern long long __atomic64_##op##_return(long long i, long long *v);
27
28#define ATOMIC_FETCH_OP(op) \
29extern int __atomic32_fetch_##op(int i, int *v); \
30extern long long __atomic64_fetch_##op(long long i, long long *v);
31
32#else /* !OUTOFLINE && !LIB */
33
34#define ATOMIC_QUALS static inline
35
36#endif /* OUTOFLINE */
37#endif /* __ATOMIC_LIB__ */
38
39
40/*
41 * Note on the 64 bit inline asm variants...
42 *
43 * CSTD is a conditional instruction and needs a constrained memory reference.
44 * Normally 'U' provides the correct constraints for conditional instructions
45 * and this is used for the 32 bit version, however 'U' does not appear to work
46 * for 64 bit values (gcc-4.9)
47 *
48 * The exact constraint is that conditional instructions cannot deal with an
49 * immediate displacement in the memory reference, so what we do is we read the
50 * address through a volatile cast into a local variable in order to insure we
51 * _have_ to compute the correct address without displacement. This allows us
52 * to use the regular 'm' for the memory address.
53 *
54 * Furthermore, the %Ln operand, which prints the low word register (r+1),
55 * really only works for registers, this means we cannot allow immediate values
56 * for the 64 bit versions -- like we do for the 32 bit ones.
57 *
58 */
59
60#ifndef ATOMIC_OP_RETURN
61#define ATOMIC_OP_RETURN(op) \
62ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \
63{ \
64 int val; \
65 \
66 asm volatile( \
67 "0: \n" \
68 " orcc gr0,gr0,gr0,icc3 \n" \
69 " ckeq icc3,cc7 \n" \
70 " ld.p %M0,%1 \n" \
71 " orcr cc7,cc7,cc3 \n" \
72 " "#op"%I2 %1,%2,%1 \n" \
73 " cst.p %1,%M0 ,cc3,#1 \n" \
74 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
75 " beq icc3,#0,0b \n" \
76 : "+U"(*v), "=&r"(val) \
77 : "NPr"(i) \
78 : "memory", "cc7", "cc3", "icc3" \
79 ); \
80 \
81 return val; \
82} \
83ATOMIC_EXPORT(__atomic_##op##_return); \
84 \
85ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \
86{ \
87 long long *__v = READ_ONCE(v); \
88 long long val; \
89 \
90 asm volatile( \
91 "0: \n" \
92 " orcc gr0,gr0,gr0,icc3 \n" \
93 " ckeq icc3,cc7 \n" \
94 " ldd.p %M0,%1 \n" \
95 " orcr cc7,cc7,cc3 \n" \
96 " "#op"cc %L1,%L2,%L1,icc0 \n" \
97 " "#op"x %1,%2,%1,icc0 \n" \
98 " cstd.p %1,%M0 ,cc3,#1 \n" \
99 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
100 " beq icc3,#0,0b \n" \
101 : "+m"(*__v), "=&e"(val) \
102 : "e"(i) \
103 : "memory", "cc7", "cc3", "icc0", "icc3" \
104 ); \
105 \
106 return val; \
107} \
108ATOMIC_EXPORT(__atomic64_##op##_return);
109#endif
110
111#ifndef ATOMIC_FETCH_OP
112#define ATOMIC_FETCH_OP(op) \
113ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \
114{ \
115 int old, tmp; \
116 \
117 asm volatile( \
118 "0: \n" \
119 " orcc gr0,gr0,gr0,icc3 \n" \
120 " ckeq icc3,cc7 \n" \
121 " ld.p %M0,%1 \n" \
122 " orcr cc7,cc7,cc3 \n" \
123 " "#op"%I3 %1,%3,%2 \n" \
124 " cst.p %2,%M0 ,cc3,#1 \n" \
125 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
126 " beq icc3,#0,0b \n" \
127 : "+U"(*v), "=&r"(old), "=r"(tmp) \
128 : "NPr"(i) \
129 : "memory", "cc7", "cc3", "icc3" \
130 ); \
131 \
132 return old; \
133} \
134ATOMIC_EXPORT(__atomic32_fetch_##op); \
135 \
136ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \
137{ \
138 long long *__v = READ_ONCE(v); \
139 long long old, tmp; \
140 \
141 asm volatile( \
142 "0: \n" \
143 " orcc gr0,gr0,gr0,icc3 \n" \
144 " ckeq icc3,cc7 \n" \
145 " ldd.p %M0,%1 \n" \
146 " orcr cc7,cc7,cc3 \n" \
147 " "#op" %L1,%L3,%L2 \n" \
148 " "#op" %1,%3,%2 \n" \
149 " cstd.p %2,%M0 ,cc3,#1 \n" \
150 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
151 " beq icc3,#0,0b \n" \
152 : "+m"(*__v), "=&e"(old), "=e"(tmp) \
153 : "e"(i) \
154 : "memory", "cc7", "cc3", "icc3" \
155 ); \
156 \
157 return old; \
158} \
159ATOMIC_EXPORT(__atomic64_fetch_##op);
160#endif
161
162ATOMIC_FETCH_OP(or)
163ATOMIC_FETCH_OP(and)
164ATOMIC_FETCH_OP(xor)
165
166ATOMIC_OP_RETURN(add)
167ATOMIC_OP_RETURN(sub)
168
169#undef ATOMIC_FETCH_OP
170#undef ATOMIC_OP_RETURN
171#undef ATOMIC_QUALS
172#undef ATOMIC_EXPORT
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 96de220ef131..0df8e95e3715 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -25,109 +25,30 @@
25 25
26#include <asm-generic/bitops/ffz.h> 26#include <asm-generic/bitops/ffz.h>
27 27
28#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 28#include <asm/atomic.h>
29static inline
30unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
31{
32 unsigned long old, tmp;
33
34 asm volatile(
35 "0: \n"
36 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
37 " ckeq icc3,cc7 \n"
38 " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
39 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
40 " and%I3 %1,%3,%2 \n"
41 " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
42 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
43 " beq icc3,#0,0b \n"
44 : "+U"(*v), "=&r"(old), "=r"(tmp)
45 : "NPr"(~mask)
46 : "memory", "cc7", "cc3", "icc3"
47 );
48
49 return old;
50}
51
52static inline
53unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
54{
55 unsigned long old, tmp;
56
57 asm volatile(
58 "0: \n"
59 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
60 " ckeq icc3,cc7 \n"
61 " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
62 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
63 " or%I3 %1,%3,%2 \n"
64 " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
65 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
66 " beq icc3,#0,0b \n"
67 : "+U"(*v), "=&r"(old), "=r"(tmp)
68 : "NPr"(mask)
69 : "memory", "cc7", "cc3", "icc3"
70 );
71
72 return old;
73}
74
75static inline
76unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
77{
78 unsigned long old, tmp;
79
80 asm volatile(
81 "0: \n"
82 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
83 " ckeq icc3,cc7 \n"
84 " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
85 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
86 " xor%I3 %1,%3,%2 \n"
87 " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
88 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
89 " beq icc3,#0,0b \n"
90 : "+U"(*v), "=&r"(old), "=r"(tmp)
91 : "NPr"(mask)
92 : "memory", "cc7", "cc3", "icc3"
93 );
94
95 return old;
96}
97
98#else
99
100extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
101extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
102extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
103
104#endif
105
106#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
107#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
108 29
109static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) 30static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
110{ 31{
111 volatile unsigned long *ptr = addr; 32 unsigned int *ptr = (void *)addr;
112 unsigned long mask = 1UL << (nr & 31); 33 unsigned int mask = 1UL << (nr & 31);
113 ptr += nr >> 5; 34 ptr += nr >> 5;
114 return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; 35 return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
115} 36}
116 37
117static inline int test_and_set_bit(unsigned long nr, volatile void *addr) 38static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
118{ 39{
119 volatile unsigned long *ptr = addr; 40 unsigned int *ptr = (void *)addr;
120 unsigned long mask = 1UL << (nr & 31); 41 unsigned int mask = 1UL << (nr & 31);
121 ptr += nr >> 5; 42 ptr += nr >> 5;
122 return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; 43 return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
123} 44}
124 45
125static inline int test_and_change_bit(unsigned long nr, volatile void *addr) 46static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
126{ 47{
127 volatile unsigned long *ptr = addr; 48 unsigned int *ptr = (void *)addr;
128 unsigned long mask = 1UL << (nr & 31); 49 unsigned int mask = 1UL << (nr & 31);
129 ptr += nr >> 5; 50 ptr += nr >> 5;
130 return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; 51 return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
131} 52}
132 53
133static inline void clear_bit(unsigned long nr, volatile void *addr) 54static inline void clear_bit(unsigned long nr, volatile void *addr)
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
index 156184e17e57..370dc9fa0b11 100644
--- a/arch/frv/kernel/dma.c
+++ b/arch/frv/kernel/dma.c
@@ -109,13 +109,13 @@ static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
109 109
110static DEFINE_RWLOCK(frv_dma_channels_lock); 110static DEFINE_RWLOCK(frv_dma_channels_lock);
111 111
112unsigned long frv_dma_inprogress; 112unsigned int frv_dma_inprogress;
113 113
114#define frv_clear_dma_inprogress(channel) \ 114#define frv_clear_dma_inprogress(channel) \
115 atomic_clear_mask(1 << (channel), &frv_dma_inprogress); 115 (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
116 116
117#define frv_set_dma_inprogress(channel) \ 117#define frv_set_dma_inprogress(channel) \
118 atomic_set_mask(1 << (channel), &frv_dma_inprogress); 118 (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
119 119
120/*****************************************************************************/ 120/*****************************************************************************/
121/* 121/*
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 86c516d96dcd..cdb4ce9960eb 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -58,11 +58,6 @@ EXPORT_SYMBOL(__outsl_ns);
58EXPORT_SYMBOL(__insl_ns); 58EXPORT_SYMBOL(__insl_ns);
59 59
60#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 60#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
61EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
62EXPORT_SYMBOL(atomic_test_and_OR_mask);
63EXPORT_SYMBOL(atomic_test_and_XOR_mask);
64EXPORT_SYMBOL(atomic_add_return);
65EXPORT_SYMBOL(atomic_sub_return);
66EXPORT_SYMBOL(__xchg_32); 61EXPORT_SYMBOL(__xchg_32);
67EXPORT_SYMBOL(__cmpxchg_32); 62EXPORT_SYMBOL(__cmpxchg_32);
68#endif 63#endif
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 4ff2fb1e6b16..970e8b4f1a02 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
5lib-y := \ 5lib-y := \
6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o 8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
diff --git a/arch/frv/lib/atomic-lib.c b/arch/frv/lib/atomic-lib.c
new file mode 100644
index 000000000000..4d1b887c248b
--- /dev/null
+++ b/arch/frv/lib/atomic-lib.c
@@ -0,0 +1,7 @@
1
2#include <linux/export.h>
3#include <asm/atomic.h>
4
5#define __ATOMIC_LIB__
6
7#include <asm/atomic_defs.h>
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index 5e9e6ab5dd0e..b7439a960b5b 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -19,116 +19,6 @@
19 19
20############################################################################### 20###############################################################################
21# 21#
22# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
23#
24###############################################################################
25 .globl atomic_test_and_ANDNOT_mask
26 .type atomic_test_and_ANDNOT_mask,@function
27atomic_test_and_ANDNOT_mask:
28 not.p gr8,gr10
290:
30 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
31 ckeq icc3,cc7
32 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
33 orcr cc7,cc7,cc3 /* set CC3 to true */
34 and gr8,gr10,gr11
35 cst.p gr11,@(gr9,gr0) ,cc3,#1
36 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
37 beq icc3,#0,0b
38 bralr
39
40 .size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
41
42###############################################################################
43#
44# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
45#
46###############################################################################
47 .globl atomic_test_and_OR_mask
48 .type atomic_test_and_OR_mask,@function
49atomic_test_and_OR_mask:
50 or.p gr8,gr8,gr10
510:
52 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
53 ckeq icc3,cc7
54 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
55 orcr cc7,cc7,cc3 /* set CC3 to true */
56 or gr8,gr10,gr11
57 cst.p gr11,@(gr9,gr0) ,cc3,#1
58 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
59 beq icc3,#0,0b
60 bralr
61
62 .size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
63
64###############################################################################
65#
66# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
67#
68###############################################################################
69 .globl atomic_test_and_XOR_mask
70 .type atomic_test_and_XOR_mask,@function
71atomic_test_and_XOR_mask:
72 or.p gr8,gr8,gr10
730:
74 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
75 ckeq icc3,cc7
76 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
77 orcr cc7,cc7,cc3 /* set CC3 to true */
78 xor gr8,gr10,gr11
79 cst.p gr11,@(gr9,gr0) ,cc3,#1
80 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
81 beq icc3,#0,0b
82 bralr
83
84 .size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
85
86###############################################################################
87#
88# int atomic_add_return(int i, atomic_t *v)
89#
90###############################################################################
91 .globl atomic_add_return
92 .type atomic_add_return,@function
93atomic_add_return:
94 or.p gr8,gr8,gr10
950:
96 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
97 ckeq icc3,cc7
98 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
99 orcr cc7,cc7,cc3 /* set CC3 to true */
100 add gr8,gr10,gr8
101 cst.p gr8,@(gr9,gr0) ,cc3,#1
102 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
103 beq icc3,#0,0b
104 bralr
105
106 .size atomic_add_return, .-atomic_add_return
107
108###############################################################################
109#
110# int atomic_sub_return(int i, atomic_t *v)
111#
112###############################################################################
113 .globl atomic_sub_return
114 .type atomic_sub_return,@function
115atomic_sub_return:
116 or.p gr8,gr8,gr10
1170:
118 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
119 ckeq icc3,cc7
120 ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
121 orcr cc7,cc7,cc3 /* set CC3 to true */
122 sub gr8,gr10,gr8
123 cst.p gr8,@(gr9,gr0) ,cc3,#1
124 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
125 beq icc3,#0,0b
126 bralr
127
128 .size atomic_sub_return, .-atomic_sub_return
129
130###############################################################################
131#
132# uint32_t __xchg_32(uint32_t i, uint32_t *v) 22# uint32_t __xchg_32(uint32_t i, uint32_t *v)
133# 23#
134############################################################################### 24###############################################################################
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
index b6194eeac127..c4c472308a33 100644
--- a/arch/frv/lib/atomic64-ops.S
+++ b/arch/frv/lib/atomic64-ops.S
@@ -20,100 +20,6 @@
20 20
21############################################################################### 21###############################################################################
22# 22#
23# long long atomic64_inc_return(atomic64_t *v)
24#
25###############################################################################
26 .globl atomic64_inc_return
27 .type atomic64_inc_return,@function
28atomic64_inc_return:
29 or.p gr8,gr8,gr10
300:
31 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
32 ckeq icc3,cc7
33 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
34 orcr cc7,cc7,cc3 /* set CC3 to true */
35 addicc gr9,#1,gr9,icc0
36 addxi gr8,#0,gr8,icc0
37 cstd.p gr8,@(gr10,gr0) ,cc3,#1
38 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
39 beq icc3,#0,0b
40 bralr
41
42 .size atomic64_inc_return, .-atomic64_inc_return
43
44###############################################################################
45#
46# long long atomic64_dec_return(atomic64_t *v)
47#
48###############################################################################
49 .globl atomic64_dec_return
50 .type atomic64_dec_return,@function
51atomic64_dec_return:
52 or.p gr8,gr8,gr10
530:
54 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
55 ckeq icc3,cc7
56 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
57 orcr cc7,cc7,cc3 /* set CC3 to true */
58 subicc gr9,#1,gr9,icc0
59 subxi gr8,#0,gr8,icc0
60 cstd.p gr8,@(gr10,gr0) ,cc3,#1
61 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
62 beq icc3,#0,0b
63 bralr
64
65 .size atomic64_dec_return, .-atomic64_dec_return
66
67###############################################################################
68#
69# long long atomic64_add_return(long long i, atomic64_t *v)
70#
71###############################################################################
72 .globl atomic64_add_return
73 .type atomic64_add_return,@function
74atomic64_add_return:
75 or.p gr8,gr8,gr4
76 or gr9,gr9,gr5
770:
78 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
79 ckeq icc3,cc7
80 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
81 orcr cc7,cc7,cc3 /* set CC3 to true */
82 addcc gr9,gr5,gr9,icc0
83 addx gr8,gr4,gr8,icc0
84 cstd.p gr8,@(gr10,gr0) ,cc3,#1
85 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
86 beq icc3,#0,0b
87 bralr
88
89 .size atomic64_add_return, .-atomic64_add_return
90
91###############################################################################
92#
93# long long atomic64_sub_return(long long i, atomic64_t *v)
94#
95###############################################################################
96 .globl atomic64_sub_return
97 .type atomic64_sub_return,@function
98atomic64_sub_return:
99 or.p gr8,gr8,gr4
100 or gr9,gr9,gr5
1010:
102 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
103 ckeq icc3,cc7
104 ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
105 orcr cc7,cc7,cc3 /* set CC3 to true */
106 subcc gr9,gr5,gr9,icc0
107 subx gr8,gr4,gr8,icc0
108 cstd.p gr8,@(gr10,gr0) ,cc3,#1
109 corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
110 beq icc3,#0,0b
111 bralr
112
113 .size atomic64_sub_return, .-atomic64_sub_return
114
115###############################################################################
116#
117# uint64_t __xchg_64(uint64_t i, uint64_t *v) 23# uint64_t __xchg_64(uint64_t i, uint64_t *v)
118# 24#
119############################################################################### 25###############################################################################