diff options
Diffstat (limited to 'arch/arm64/include/asm/cmpxchg.h')
-rw-r--r-- | arch/arm64/include/asm/cmpxchg.h | 74 |
1 files changed, 37 insertions, 37 deletions
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index e0e65b069d9e..968b5cbfc260 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size | |||
29 | switch (size) { | 29 | switch (size) { |
30 | case 1: | 30 | case 1: |
31 | asm volatile("// __xchg1\n" | 31 | asm volatile("// __xchg1\n" |
32 | "1: ldaxrb %w0, [%3]\n" | 32 | "1: ldaxrb %w0, %2\n" |
33 | " stlxrb %w1, %w2, [%3]\n" | 33 | " stlxrb %w1, %w3, %2\n" |
34 | " cbnz %w1, 1b\n" | 34 | " cbnz %w1, 1b\n" |
35 | : "=&r" (ret), "=&r" (tmp) | 35 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) |
36 | : "r" (x), "r" (ptr) | 36 | : "r" (x) |
37 | : "memory", "cc"); | 37 | : "cc", "memory"); |
38 | break; | 38 | break; |
39 | case 2: | 39 | case 2: |
40 | asm volatile("// __xchg2\n" | 40 | asm volatile("// __xchg2\n" |
41 | "1: ldaxrh %w0, [%3]\n" | 41 | "1: ldaxrh %w0, %2\n" |
42 | " stlxrh %w1, %w2, [%3]\n" | 42 | " stlxrh %w1, %w3, %2\n" |
43 | " cbnz %w1, 1b\n" | 43 | " cbnz %w1, 1b\n" |
44 | : "=&r" (ret), "=&r" (tmp) | 44 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) |
45 | : "r" (x), "r" (ptr) | 45 | : "r" (x) |
46 | : "memory", "cc"); | 46 | : "cc", "memory"); |
47 | break; | 47 | break; |
48 | case 4: | 48 | case 4: |
49 | asm volatile("// __xchg4\n" | 49 | asm volatile("// __xchg4\n" |
50 | "1: ldaxr %w0, [%3]\n" | 50 | "1: ldaxr %w0, %2\n" |
51 | " stlxr %w1, %w2, [%3]\n" | 51 | " stlxr %w1, %w3, %2\n" |
52 | " cbnz %w1, 1b\n" | 52 | " cbnz %w1, 1b\n" |
53 | : "=&r" (ret), "=&r" (tmp) | 53 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) |
54 | : "r" (x), "r" (ptr) | 54 | : "r" (x) |
55 | : "memory", "cc"); | 55 | : "cc", "memory"); |
56 | break; | 56 | break; |
57 | case 8: | 57 | case 8: |
58 | asm volatile("// __xchg8\n" | 58 | asm volatile("// __xchg8\n" |
59 | "1: ldaxr %0, [%3]\n" | 59 | "1: ldaxr %0, %2\n" |
60 | " stlxr %w1, %2, [%3]\n" | 60 | " stlxr %w1, %3, %2\n" |
61 | " cbnz %w1, 1b\n" | 61 | " cbnz %w1, 1b\n" |
62 | : "=&r" (ret), "=&r" (tmp) | 62 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) |
63 | : "r" (x), "r" (ptr) | 63 | : "r" (x) |
64 | : "memory", "cc"); | 64 | : "cc", "memory"); |
65 | break; | 65 | break; |
66 | default: | 66 | default: |
67 | BUILD_BUG(); | 67 | BUILD_BUG(); |
@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
82 | case 1: | 82 | case 1: |
83 | do { | 83 | do { |
84 | asm volatile("// __cmpxchg1\n" | 84 | asm volatile("// __cmpxchg1\n" |
85 | " ldxrb %w1, [%2]\n" | 85 | " ldxrb %w1, %2\n" |
86 | " mov %w0, #0\n" | 86 | " mov %w0, #0\n" |
87 | " cmp %w1, %w3\n" | 87 | " cmp %w1, %w3\n" |
88 | " b.ne 1f\n" | 88 | " b.ne 1f\n" |
89 | " stxrb %w0, %w4, [%2]\n" | 89 | " stxrb %w0, %w4, %2\n" |
90 | "1:\n" | 90 | "1:\n" |
91 | : "=&r" (res), "=&r" (oldval) | 91 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) |
92 | : "r" (ptr), "Ir" (old), "r" (new) | 92 | : "Ir" (old), "r" (new) |
93 | : "cc"); | 93 | : "cc"); |
94 | } while (res); | 94 | } while (res); |
95 | break; | 95 | break; |
@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
97 | case 2: | 97 | case 2: |
98 | do { | 98 | do { |
99 | asm volatile("// __cmpxchg2\n" | 99 | asm volatile("// __cmpxchg2\n" |
100 | " ldxrh %w1, [%2]\n" | 100 | " ldxrh %w1, %2\n" |
101 | " mov %w0, #0\n" | 101 | " mov %w0, #0\n" |
102 | " cmp %w1, %w3\n" | 102 | " cmp %w1, %w3\n" |
103 | " b.ne 1f\n" | 103 | " b.ne 1f\n" |
104 | " stxrh %w0, %w4, [%2]\n" | 104 | " stxrh %w0, %w4, %2\n" |
105 | "1:\n" | 105 | "1:\n" |
106 | : "=&r" (res), "=&r" (oldval) | 106 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) |
107 | : "r" (ptr), "Ir" (old), "r" (new) | 107 | : "Ir" (old), "r" (new) |
108 | : "memory", "cc"); | 108 | : "cc"); |
109 | } while (res); | 109 | } while (res); |
110 | break; | 110 | break; |
111 | 111 | ||
112 | case 4: | 112 | case 4: |
113 | do { | 113 | do { |
114 | asm volatile("// __cmpxchg4\n" | 114 | asm volatile("// __cmpxchg4\n" |
115 | " ldxr %w1, [%2]\n" | 115 | " ldxr %w1, %2\n" |
116 | " mov %w0, #0\n" | 116 | " mov %w0, #0\n" |
117 | " cmp %w1, %w3\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 1f\n" | 118 | " b.ne 1f\n" |
119 | " stxr %w0, %w4, [%2]\n" | 119 | " stxr %w0, %w4, %2\n" |
120 | "1:\n" | 120 | "1:\n" |
121 | : "=&r" (res), "=&r" (oldval) | 121 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) |
122 | : "r" (ptr), "Ir" (old), "r" (new) | 122 | : "Ir" (old), "r" (new) |
123 | : "cc"); | 123 | : "cc"); |
124 | } while (res); | 124 | } while (res); |
125 | break; | 125 | break; |
@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
127 | case 8: | 127 | case 8: |
128 | do { | 128 | do { |
129 | asm volatile("// __cmpxchg8\n" | 129 | asm volatile("// __cmpxchg8\n" |
130 | " ldxr %1, [%2]\n" | 130 | " ldxr %1, %2\n" |
131 | " mov %w0, #0\n" | 131 | " mov %w0, #0\n" |
132 | " cmp %1, %3\n" | 132 | " cmp %1, %3\n" |
133 | " b.ne 1f\n" | 133 | " b.ne 1f\n" |
134 | " stxr %w0, %4, [%2]\n" | 134 | " stxr %w0, %4, %2\n" |
135 | "1:\n" | 135 | "1:\n" |
136 | : "=&r" (res), "=&r" (oldval) | 136 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) |
137 | : "r" (ptr), "Ir" (old), "r" (new) | 137 | : "Ir" (old), "r" (new) |
138 | : "cc"); | 138 | : "cc"); |
139 | } while (res); | 139 | } while (res); |
140 | break; | 140 | break; |