aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-10-31 13:44:08 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2018-11-01 09:07:43 -0400
commit51f5fd2e4615dcdc25cd7f9d19b7b27eb9ecdac7 (patch)
tree2c0a76d935b5d402b9c259963915c7c79729c2b3 /tools
parent29995d296e3e9ce4f9767963ecbef143ade26c36 (diff)
tools headers barrier: Fix arm64 tools build failure wrt smp_load_{acquire,release}
Cheers for reporting this. I managed to reproduce the build failure with gcc version 6.3.0 20170516 (Debian 6.3.0-18+deb9u1). The code in question is the arm64 versions of smp_load_acquire() and smp_store_release(). Unlike other architectures, these are not built around READ_ONCE() and WRITE_ONCE() since we have instructions we can use instead of fences. Bringing our macros up-to-date with those (i.e. tweaking the union initialisation and using the special "uXX_alias_t" types) appears to fix the issue for me. Committer notes: Testing it in the systems previously failing: # time dm android-ndk:r12b-arm \ android-ndk:r15c-arm \ debian:experimental-x-arm64 \ ubuntu:14.04.4-x-linaro-arm64 \ ubuntu:16.04-x-arm \ ubuntu:16.04-x-arm64 \ ubuntu:18.04-x-arm \ ubuntu:18.04-x-arm64 1 android-ndk:r12b-arm : Ok arm-linux-androideabi-gcc (GCC) 4.9.x 20150123 (prerelease) 2 android-ndk:r15c-arm : Ok arm-linux-androideabi-gcc (GCC) 4.9.x 20150123 (prerelease) 3 debian:experimental-x-arm64 : Ok aarch64-linux-gnu-gcc (Debian 8.2.0-7) 8.2.0 4 ubuntu:14.04.4-x-linaro-arm64 : Ok aarch64-linux-gnu-gcc (Linaro GCC 5.5-2017.10) 5.5.0 5 ubuntu:16.04-x-arm : Ok arm-linux-gnueabihf-gcc (Ubuntu/Linaro 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609 6 ubuntu:16.04-x-arm64 : Ok aarch64-linux-gnu-gcc (Ubuntu/Linaro 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609 7 ubuntu:18.04-x-arm : Ok arm-linux-gnueabihf-gcc (Ubuntu/Linaro 7.3.0-27ubuntu1~18.04) 7.3.0 8 ubuntu:18.04-x-arm64 : Ok aarch64-linux-gnu-gcc (Ubuntu/Linaro 7.3.0-27ubuntu1~18.04) 7.3.0 Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20181031174408.GA27871@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm64/include/asm/barrier.h133
1 files changed, 67 insertions, 66 deletions
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 12835ea0e417..378c051fa177 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,74 +14,75 @@
14#define wmb() asm volatile("dmb ishst" ::: "memory") 14#define wmb() asm volatile("dmb ishst" ::: "memory")
15#define rmb() asm volatile("dmb ishld" ::: "memory") 15#define rmb() asm volatile("dmb ishld" ::: "memory")
16 16
17#define smp_store_release(p, v) \ 17#define smp_store_release(p, v) \
18do { \ 18do { \
19 union { typeof(*p) __val; char __c[1]; } __u = \ 19 union { typeof(*p) __val; char __c[1]; } __u = \
20 { .__val = (__force typeof(*p)) (v) }; \ 20 { .__val = (v) }; \
21 \ 21 \
22 switch (sizeof(*p)) { \ 22 switch (sizeof(*p)) { \
23 case 1: \ 23 case 1: \
24 asm volatile ("stlrb %w1, %0" \ 24 asm volatile ("stlrb %w1, %0" \
25 : "=Q" (*p) \ 25 : "=Q" (*p) \
26 : "r" (*(__u8 *)__u.__c) \ 26 : "r" (*(__u8_alias_t *)__u.__c) \
27 : "memory"); \ 27 : "memory"); \
28 break; \ 28 break; \
29 case 2: \ 29 case 2: \
30 asm volatile ("stlrh %w1, %0" \ 30 asm volatile ("stlrh %w1, %0" \
31 : "=Q" (*p) \ 31 : "=Q" (*p) \
32 : "r" (*(__u16 *)__u.__c) \ 32 : "r" (*(__u16_alias_t *)__u.__c) \
33 : "memory"); \ 33 : "memory"); \
34 break; \ 34 break; \
35 case 4: \ 35 case 4: \
36 asm volatile ("stlr %w1, %0" \ 36 asm volatile ("stlr %w1, %0" \
37 : "=Q" (*p) \ 37 : "=Q" (*p) \
38 : "r" (*(__u32 *)__u.__c) \ 38 : "r" (*(__u32_alias_t *)__u.__c) \
39 : "memory"); \ 39 : "memory"); \
40 break; \ 40 break; \
41 case 8: \ 41 case 8: \
42 asm volatile ("stlr %1, %0" \ 42 asm volatile ("stlr %1, %0" \
43 : "=Q" (*p) \ 43 : "=Q" (*p) \
44 : "r" (*(__u64 *)__u.__c) \ 44 : "r" (*(__u64_alias_t *)__u.__c) \
45 : "memory"); \ 45 : "memory"); \
46 break; \ 46 break; \
47 default: \ 47 default: \
48 /* Only to shut up gcc ... */ \ 48 /* Only to shut up gcc ... */ \
49 mb(); \ 49 mb(); \
50 break; \ 50 break; \
51 } \ 51 } \
52} while (0) 52} while (0)
53 53
54#define smp_load_acquire(p) \ 54#define smp_load_acquire(p) \
55({ \ 55({ \
56 union { typeof(*p) __val; char __c[1]; } __u; \ 56 union { typeof(*p) __val; char __c[1]; } __u = \
57 \ 57 { .__c = { 0 } }; \
58 switch (sizeof(*p)) { \ 58 \
59 case 1: \ 59 switch (sizeof(*p)) { \
60 asm volatile ("ldarb %w0, %1" \ 60 case 1: \
61 : "=r" (*(__u8 *)__u.__c) \ 61 asm volatile ("ldarb %w0, %1" \
62 : "Q" (*p) : "memory"); \ 62 : "=r" (*(__u8_alias_t *)__u.__c) \
63 break; \ 63 : "Q" (*p) : "memory"); \
64 case 2: \ 64 break; \
65 asm volatile ("ldarh %w0, %1" \ 65 case 2: \
66 : "=r" (*(__u16 *)__u.__c) \ 66 asm volatile ("ldarh %w0, %1" \
67 : "Q" (*p) : "memory"); \ 67 : "=r" (*(__u16_alias_t *)__u.__c) \
68 break; \ 68 : "Q" (*p) : "memory"); \
69 case 4: \ 69 break; \
70 asm volatile ("ldar %w0, %1" \ 70 case 4: \
71 : "=r" (*(__u32 *)__u.__c) \ 71 asm volatile ("ldar %w0, %1" \
72 : "Q" (*p) : "memory"); \ 72 : "=r" (*(__u32_alias_t *)__u.__c) \
73 break; \ 73 : "Q" (*p) : "memory"); \
74 case 8: \ 74 break; \
75 asm volatile ("ldar %0, %1" \ 75 case 8: \
76 : "=r" (*(__u64 *)__u.__c) \ 76 asm volatile ("ldar %0, %1" \
77 : "Q" (*p) : "memory"); \ 77 : "=r" (*(__u64_alias_t *)__u.__c) \
78 break; \ 78 : "Q" (*p) : "memory"); \
79 default: \ 79 break; \
80 /* Only to shut up gcc ... */ \ 80 default: \
81 mb(); \ 81 /* Only to shut up gcc ... */ \
82 break; \ 82 mb(); \
83 } \ 83 break; \
84 __u.__val; \ 84 } \
85 __u.__val; \
85}) 86})
86 87
87#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */ 88#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */