diff options
author | Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> | 2014-05-13 15:30:56 -0400 |
---|---|---|
committer | Stafford Horne <shorne@gmail.com> | 2017-02-24 14:14:06 -0500 |
commit | bc19598f1dde267e5214e386b97bb647973275db (patch) | |
tree | fb35f0097281aa51ce3b629342f4a4287693a7de /arch/openrisc | |
parent | 11595172537788f0007bfc16590aab18f2b9c40f (diff) |
openrisc: add optimized atomic operations
Using the l.lwa and l.swa atomic instruction pair.
Most openrisc processor cores provide these instructions now. If the
instructions are not available emulation is provided.
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
[shorne@gmail.com: remove OPENRISC_HAVE_INST_LWA_SWA config suggesed by
Alan Cox https://lkml.org/lkml/2014/7/23/666]
[shorne@gmail.com: expand to implement all ops suggested by Peter
Zijlstra https://lkml.org/lkml/2017/2/20/317]
Signed-off-by: Stafford Horne <shorne@gmail.com>
Diffstat (limited to 'arch/openrisc')
-rw-r--r-- | arch/openrisc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/openrisc/include/asm/atomic.h | 126 |
2 files changed, 126 insertions, 1 deletions
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 15e6ed526453..1cedd6309fa6 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild | |||
@@ -1,7 +1,6 @@ | |||
1 | 1 | ||
2 | header-y += ucontext.h | 2 | header-y += ucontext.h |
3 | 3 | ||
4 | generic-y += atomic.h | ||
5 | generic-y += auxvec.h | 4 | generic-y += auxvec.h |
6 | generic-y += barrier.h | 5 | generic-y += barrier.h |
7 | generic-y += bitsperlong.h | 6 | generic-y += bitsperlong.h |
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h new file mode 100644 index 000000000000..146e1660f00e --- /dev/null +++ b/arch/openrisc/include/asm/atomic.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> | ||
3 | * | ||
4 | * This file is licensed under the terms of the GNU General Public License | ||
5 | * version 2. This program is licensed "as is" without any warranty of any | ||
6 | * kind, whether express or implied. | ||
7 | */ | ||
8 | |||
9 | #ifndef __ASM_OPENRISC_ATOMIC_H | ||
10 | #define __ASM_OPENRISC_ATOMIC_H | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | |||
14 | /* Atomically perform op with v->counter and i */ | ||
15 | #define ATOMIC_OP(op) \ | ||
16 | static inline void atomic_##op(int i, atomic_t *v) \ | ||
17 | { \ | ||
18 | int tmp; \ | ||
19 | \ | ||
20 | __asm__ __volatile__( \ | ||
21 | "1: l.lwa %0,0(%1) \n" \ | ||
22 | " l." #op " %0,%0,%2 \n" \ | ||
23 | " l.swa 0(%1),%0 \n" \ | ||
24 | " l.bnf 1b \n" \ | ||
25 | " l.nop \n" \ | ||
26 | : "=&r"(tmp) \ | ||
27 | : "r"(&v->counter), "r"(i) \ | ||
28 | : "cc", "memory"); \ | ||
29 | } | ||
30 | |||
31 | /* Atomically perform op with v->counter and i, return the result */ | ||
32 | #define ATOMIC_OP_RETURN(op) \ | ||
33 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | ||
34 | { \ | ||
35 | int tmp; \ | ||
36 | \ | ||
37 | __asm__ __volatile__( \ | ||
38 | "1: l.lwa %0,0(%1) \n" \ | ||
39 | " l." #op " %0,%0,%2 \n" \ | ||
40 | " l.swa 0(%1),%0 \n" \ | ||
41 | " l.bnf 1b \n" \ | ||
42 | " l.nop \n" \ | ||
43 | : "=&r"(tmp) \ | ||
44 | : "r"(&v->counter), "r"(i) \ | ||
45 | : "cc", "memory"); \ | ||
46 | \ | ||
47 | return tmp; \ | ||
48 | } | ||
49 | |||
50 | /* Atomically perform op with v->counter and i, return orig v->counter */ | ||
51 | #define ATOMIC_FETCH_OP(op) \ | ||
52 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
53 | { \ | ||
54 | int tmp, old; \ | ||
55 | \ | ||
56 | __asm__ __volatile__( \ | ||
57 | "1: l.lwa %0,0(%2) \n" \ | ||
58 | " l." #op " %1,%0,%3 \n" \ | ||
59 | " l.swa 0(%2),%1 \n" \ | ||
60 | " l.bnf 1b \n" \ | ||
61 | " l.nop \n" \ | ||
62 | : "=&r"(old), "=&r"(tmp) \ | ||
63 | : "r"(&v->counter), "r"(i) \ | ||
64 | : "cc", "memory"); \ | ||
65 | \ | ||
66 | return old; \ | ||
67 | } | ||
68 | |||
69 | ATOMIC_OP_RETURN(add) | ||
70 | ATOMIC_OP_RETURN(sub) | ||
71 | |||
72 | ATOMIC_FETCH_OP(add) | ||
73 | ATOMIC_FETCH_OP(sub) | ||
74 | ATOMIC_FETCH_OP(and) | ||
75 | ATOMIC_FETCH_OP(or) | ||
76 | ATOMIC_FETCH_OP(xor) | ||
77 | |||
78 | ATOMIC_OP(and) | ||
79 | ATOMIC_OP(or) | ||
80 | ATOMIC_OP(xor) | ||
81 | |||
82 | #undef ATOMIC_FETCH_OP | ||
83 | #undef ATOMIC_OP_RETURN | ||
84 | #undef ATOMIC_OP | ||
85 | |||
86 | #define atomic_add_return atomic_add_return | ||
87 | #define atomic_sub_return atomic_sub_return | ||
88 | #define atomic_fetch_add atomic_fetch_add | ||
89 | #define atomic_fetch_sub atomic_fetch_sub | ||
90 | #define atomic_fetch_and atomic_fetch_and | ||
91 | #define atomic_fetch_or atomic_fetch_or | ||
92 | #define atomic_fetch_xor atomic_fetch_xor | ||
93 | #define atomic_and atomic_and | ||
94 | #define atomic_or atomic_or | ||
95 | #define atomic_xor atomic_xor | ||
96 | |||
97 | /* | ||
98 | * Atomically add a to v->counter as long as v is not already u. | ||
99 | * Returns the original value at v->counter. | ||
100 | * | ||
101 | * This is often used through atomic_inc_not_zero() | ||
102 | */ | ||
103 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
104 | { | ||
105 | int old, tmp; | ||
106 | |||
107 | __asm__ __volatile__( | ||
108 | "1: l.lwa %0, 0(%2) \n" | ||
109 | " l.sfeq %0, %4 \n" | ||
110 | " l.bf 2f \n" | ||
111 | " l.add %1, %0, %3 \n" | ||
112 | " l.swa 0(%2), %1 \n" | ||
113 | " l.bnf 1b \n" | ||
114 | " l.nop \n" | ||
115 | "2: \n" | ||
116 | : "=&r"(old), "=&r" (tmp) | ||
117 | : "r"(&v->counter), "r"(a), "r"(u) | ||
118 | : "cc", "memory"); | ||
119 | |||
120 | return old; | ||
121 | } | ||
122 | #define __atomic_add_unless __atomic_add_unless | ||
123 | |||
124 | #include <asm-generic/atomic.h> | ||
125 | |||
126 | #endif /* __ASM_OPENRISC_ATOMIC_H */ | ||