diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-20 13:23:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-20 13:23:08 -0500 |
commit | 6ffbe7d1fabddc768724656f159759cae7818cd9 (patch) | |
tree | ece184db0c35bcd9606968303984b430c24b847f /arch/sparc | |
parent | 897aea303fec0c24b2a21b8e29f45dc73a234555 (diff) | |
parent | 63b1a81699c2a45c9f737419b1ec1da0ecf92812 (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar:
- futex performance increases: larger hashes, smarter wakeups
- mutex debugging improvements
- lots of SMP ordering documentation updates
- introduce the smp_load_acquire(), smp_store_release() primitives.
(There are WIP patches that make use of them - not yet merged)
- lockdep micro-optimizations
- lockdep improvement: better cover IRQ contexts
- liblockdep at last. We'll continue to monitor how useful this is
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
futexes: Fix futex_hashsize initialization
arch: Re-sort some Kbuild files to hopefully help avoid some conflicts
futexes: Avoid taking the hb->lock if there's nothing to wake up
futexes: Document multiprocessor ordering guarantees
futexes: Increase hash table size for better performance
futexes: Clean up various details
arch: Introduce smp_load_acquire(), smp_store_release()
arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h
arch: Move smp_mb__{before,after}_atomic_{inc,dec}.h into asm/atomic.h
locking/doc: Rename LOCK/UNLOCK to ACQUIRE/RELEASE
mutexes: Give more informative mutex warning in the !lock->owner case
powerpc: Full barrier for smp_mb__after_unlock_lock()
rcu: Apply smp_mb__after_unlock_lock() to preserve grace periods
Documentation/memory-barriers.txt: Downgrade UNLOCK+BLOCK
locking: Add an smp_mb__after_unlock_lock() for UNLOCK+BLOCK barrier
Documentation/memory-barriers.txt: Document ACCESS_ONCE()
Documentation/memory-barriers.txt: Prohibit speculative writes
Documentation/memory-barriers.txt: Add long atomic examples to memory-barriers.txt
Documentation/memory-barriers.txt: Add needed ACCESS_ONCE() calls to memory-barriers.txt
Revert "smp/cpumask: Make CONFIG_CPUMASK_OFFSTACK=y usable without debug dependency"
...
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/barrier_32.h | 12 | ||||
-rw-r--r-- | arch/sparc/include/asm/barrier_64.h | 15 |
2 files changed, 17 insertions, 10 deletions
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index c1b76654ee76..ae69eda288f4 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h | |||
@@ -1,15 +1,7 @@ | |||
1 | #ifndef __SPARC_BARRIER_H | 1 | #ifndef __SPARC_BARRIER_H |
2 | #define __SPARC_BARRIER_H | 2 | #define __SPARC_BARRIER_H |
3 | 3 | ||
4 | /* XXX Change this if we ever use a PSO mode kernel. */ | 4 | #include <asm/processor.h> /* for nop() */ |
5 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 5 | #include <asm-generic/barrier.h> |
6 | #define rmb() mb() | ||
7 | #define wmb() mb() | ||
8 | #define read_barrier_depends() do { } while(0) | ||
9 | #define set_mb(__var, __value) do { __var = __value; mb(); } while(0) | ||
10 | #define smp_mb() __asm__ __volatile__("":::"memory") | ||
11 | #define smp_rmb() __asm__ __volatile__("":::"memory") | ||
12 | #define smp_wmb() __asm__ __volatile__("":::"memory") | ||
13 | #define smp_read_barrier_depends() do { } while(0) | ||
14 | 6 | ||
15 | #endif /* !(__SPARC_BARRIER_H) */ | 7 | #endif /* !(__SPARC_BARRIER_H) */ |
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 95d45986f908..b5aad964558e 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h | |||
@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |||
53 | 53 | ||
54 | #define smp_read_barrier_depends() do { } while(0) | 54 | #define smp_read_barrier_depends() do { } while(0) |
55 | 55 | ||
56 | #define smp_store_release(p, v) \ | ||
57 | do { \ | ||
58 | compiletime_assert_atomic_type(*p); \ | ||
59 | barrier(); \ | ||
60 | ACCESS_ONCE(*p) = (v); \ | ||
61 | } while (0) | ||
62 | |||
63 | #define smp_load_acquire(p) \ | ||
64 | ({ \ | ||
65 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
66 | compiletime_assert_atomic_type(*p); \ | ||
67 | barrier(); \ | ||
68 | ___p1; \ | ||
69 | }) | ||
70 | |||
56 | #endif /* !(__SPARC64_BARRIER_H) */ | 71 | #endif /* !(__SPARC64_BARRIER_H) */ |