aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:23:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:23:08 -0500
commit6ffbe7d1fabddc768724656f159759cae7818cd9 (patch)
treeece184db0c35bcd9606968303984b430c24b847f /arch/arm64/include
parent897aea303fec0c24b2a21b8e29f45dc73a234555 (diff)
parent63b1a81699c2a45c9f737419b1ec1da0ecf92812 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar: - futex performance increases: larger hashes, smarter wakeups - mutex debugging improvements - lots of SMP ordering documentation updates - introduce the smp_load_acquire(), smp_store_release() primitives. (There are WIP patches that make use of them - not yet merged) - lockdep micro-optimizations - lockdep improvement: better cover IRQ contexts - liblockdep at last. We'll continue to monitor how useful this is * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits) futexes: Fix futex_hashsize initialization arch: Re-sort some Kbuild files to hopefully help avoid some conflicts futexes: Avoid taking the hb->lock if there's nothing to wake up futexes: Document multiprocessor ordering guarantees futexes: Increase hash table size for better performance futexes: Clean up various details arch: Introduce smp_load_acquire(), smp_store_release() arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h arch: Move smp_mb__{before,after}_atomic_{inc,dec}.h into asm/atomic.h locking/doc: Rename LOCK/UNLOCK to ACQUIRE/RELEASE mutexes: Give more informative mutex warning in the !lock->owner case powerpc: Full barrier for smp_mb__after_unlock_lock() rcu: Apply smp_mb__after_unlock_lock() to preserve grace periods Documentation/memory-barriers.txt: Downgrade UNLOCK+BLOCK locking: Add an smp_mb__after_unlock_lock() for UNLOCK+BLOCK barrier Documentation/memory-barriers.txt: Document ACCESS_ONCE() Documentation/memory-barriers.txt: Prohibit speculative writes Documentation/memory-barriers.txt: Add long atomic examples to memory-barriers.txt Documentation/memory-barriers.txt: Add needed ACCESS_ONCE() calls to memory-barriers.txt Revert "smp/cpumask: Make CONFIG_CPUMASK_OFFSTACK=y usable without debug dependency" ...
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/barrier.h50
1 files changed, 50 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index d4a63338a53c..78e20ba8806b 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,10 +35,60 @@
35#define smp_mb() barrier() 35#define smp_mb() barrier()
36#define smp_rmb() barrier() 36#define smp_rmb() barrier()
37#define smp_wmb() barrier() 37#define smp_wmb() barrier()
38
39#define smp_store_release(p, v) \
40do { \
41 compiletime_assert_atomic_type(*p); \
42 smp_mb(); \
43 ACCESS_ONCE(*p) = (v); \
44} while (0)
45
46#define smp_load_acquire(p) \
47({ \
48 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
49 compiletime_assert_atomic_type(*p); \
50 smp_mb(); \
51 ___p1; \
52})
53
38#else 54#else
55
39#define smp_mb() asm volatile("dmb ish" : : : "memory") 56#define smp_mb() asm volatile("dmb ish" : : : "memory")
40#define smp_rmb() asm volatile("dmb ishld" : : : "memory") 57#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
41#define smp_wmb() asm volatile("dmb ishst" : : : "memory") 58#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
59
60#define smp_store_release(p, v) \
61do { \
62 compiletime_assert_atomic_type(*p); \
63 switch (sizeof(*p)) { \
64 case 4: \
65 asm volatile ("stlr %w1, %0" \
66 : "=Q" (*p) : "r" (v) : "memory"); \
67 break; \
68 case 8: \
69 asm volatile ("stlr %1, %0" \
70 : "=Q" (*p) : "r" (v) : "memory"); \
71 break; \
72 } \
73} while (0)
74
75#define smp_load_acquire(p) \
76({ \
77 typeof(*p) ___p1; \
78 compiletime_assert_atomic_type(*p); \
79 switch (sizeof(*p)) { \
80 case 4: \
81 asm volatile ("ldar %w0, %1" \
82 : "=r" (___p1) : "Q" (*p) : "memory"); \
83 break; \
84 case 8: \
85 asm volatile ("ldar %0, %1" \
86 : "=r" (___p1) : "Q" (*p) : "memory"); \
87 break; \
88 } \
89 ___p1; \
90})
91
42#endif 92#endif
43 93
44#define read_barrier_depends() do { } while(0) 94#define read_barrier_depends() do { } while(0)