diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2016-01-08 02:23:58 -0500 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2016-01-12 13:47:02 -0500 |
commit | 9e3f84ce416663c84a191cb3ead300fc1a4adadc (patch) | |
tree | 8c4537b92a1498af2599787cfc8622cd24a0ab20 /arch/sh | |
parent | 3226aad81aa670015a59e51458a0deb2d3bcb600 (diff) |
sh: move xchg_cmpxchg to a header by itself
Looks like future sh variants will support a 4-byte cas which will be
used to implement 1 and 2 byte xchg.
This is exactly what we do for llsc now, move the portable part of the
code into a separate header so it's easy to reuse.
Suggested-by: Rich Felker <dalias@libc.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/cmpxchg-llsc.h | 35 | ||||
-rw-r--r-- | arch/sh/include/asm/cmpxchg-xchg.h | 51 |
2 files changed, 52 insertions, 34 deletions
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index e754794e282f..fcfd32271bff 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h | |||
@@ -1,9 +1,6 @@ | |||
1 | #ifndef __ASM_SH_CMPXCHG_LLSC_H | 1 | #ifndef __ASM_SH_CMPXCHG_LLSC_H |
2 | #define __ASM_SH_CMPXCHG_LLSC_H | 2 | #define __ASM_SH_CMPXCHG_LLSC_H |
3 | 3 | ||
4 | #include <linux/bitops.h> | ||
5 | #include <asm/byteorder.h> | ||
6 | |||
7 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | 4 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) |
8 | { | 5 | { |
9 | unsigned long retval; | 6 | unsigned long retval; |
@@ -50,36 +47,6 @@ __cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) | |||
50 | return retval; | 47 | return retval; |
51 | } | 48 | } |
52 | 49 | ||
53 | static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) | 50 | #include <asm/cmpxchg-xchg.h> |
54 | { | ||
55 | int off = (unsigned long)ptr % sizeof(u32); | ||
56 | volatile u32 *p = ptr - off; | ||
57 | #ifdef __BIG_ENDIAN | ||
58 | int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; | ||
59 | #else | ||
60 | int bitoff = off * BITS_PER_BYTE; | ||
61 | #endif | ||
62 | u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; | ||
63 | u32 oldv, newv; | ||
64 | u32 ret; | ||
65 | |||
66 | do { | ||
67 | oldv = READ_ONCE(*p); | ||
68 | ret = (oldv & bitmask) >> bitoff; | ||
69 | newv = (oldv & ~bitmask) | (x << bitoff); | ||
70 | } while (__cmpxchg_u32(p, oldv, newv) != oldv); | ||
71 | |||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) | ||
76 | { | ||
77 | return __xchg_cmpxchg(m, val, sizeof *m); | ||
78 | } | ||
79 | |||
80 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
81 | { | ||
82 | return __xchg_cmpxchg(m, val, sizeof *m); | ||
83 | } | ||
84 | 51 | ||
85 | #endif /* __ASM_SH_CMPXCHG_LLSC_H */ | 52 | #endif /* __ASM_SH_CMPXCHG_LLSC_H */ |
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h new file mode 100644 index 000000000000..7219719c23a3 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg-xchg.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef __ASM_SH_CMPXCHG_XCHG_H | ||
2 | #define __ASM_SH_CMPXCHG_XCHG_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2016 Red Hat, Inc. | ||
6 | * Author: Michael S. Tsirkin <mst@redhat.com> | ||
7 | * | ||
8 | * This work is licensed under the terms of the GNU GPL, version 2. See the | ||
9 | * file "COPYING" in the main directory of this archive for more details. | ||
10 | */ | ||
11 | #include <linux/bitops.h> | ||
12 | #include <asm/byteorder.h> | ||
13 | |||
14 | /* | ||
15 | * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg. | ||
16 | * Note: this header isn't self-contained: before including it, __cmpxchg_u32 | ||
17 | * must be defined first. | ||
18 | */ | ||
19 | static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) | ||
20 | { | ||
21 | int off = (unsigned long)ptr % sizeof(u32); | ||
22 | volatile u32 *p = ptr - off; | ||
23 | #ifdef __BIG_ENDIAN | ||
24 | int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; | ||
25 | #else | ||
26 | int bitoff = off * BITS_PER_BYTE; | ||
27 | #endif | ||
28 | u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; | ||
29 | u32 oldv, newv; | ||
30 | u32 ret; | ||
31 | |||
32 | do { | ||
33 | oldv = READ_ONCE(*p); | ||
34 | ret = (oldv & bitmask) >> bitoff; | ||
35 | newv = (oldv & ~bitmask) | (x << bitoff); | ||
36 | } while (__cmpxchg_u32(p, oldv, newv) != oldv); | ||
37 | |||
38 | return ret; | ||
39 | } | ||
40 | |||
41 | static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) | ||
42 | { | ||
43 | return __xchg_cmpxchg(m, val, sizeof *m); | ||
44 | } | ||
45 | |||
46 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
47 | { | ||
48 | return __xchg_cmpxchg(m, val, sizeof *m); | ||
49 | } | ||
50 | |||
51 | #endif /* __ASM_SH_CMPXCHG_XCHG_H */ | ||