diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-22 17:54:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-22 17:54:22 -0400 |
commit | 1bf7067c6e173dc10411704db48338ed69c05565 (patch) | |
tree | 06d731d9647c525fa598d03d7ec957ff9772ff40 /arch/sparc | |
parent | fc934d40178ad4e551a17e2733241d9f29fddd70 (diff) | |
parent | 68722101ec3a0e179408a13708dd020e04f54aab (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes are:
- 'qspinlock' support, enabled on x86: queued spinlocks - these are
now the spinlock variant used by x86 as they outperform ticket
spinlocks in every category. (Waiman Long)
- 'pvqspinlock' support on x86: paravirtualized variant of queued
spinlocks. (Waiman Long, Peter Zijlstra)
- 'qrwlock' support, enabled on x86: queued rwlocks. Similar to
queued spinlocks, they are now the variant used by x86:
CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
CONFIG_QUEUED_SPINLOCKS=y
CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
CONFIG_QUEUED_RWLOCKS=y
- various lockdep fixlets
- various locking primitives cleanups, further WRITE_ONCE()
propagation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
locking/lockdep: Remove hard coded array size dependency
locking/qrwlock: Don't contend with readers when setting _QW_WAITING
lockdep: Do not break user-visible string
locking/arch: Rename set_mb() to smp_store_mb()
locking/arch: Add WRITE_ONCE() to set_mb()
rtmutex: Warn if trylock is called from hard/softirq context
arch: Remove __ARCH_HAVE_CMPXCHG
locking/rtmutex: Drop usage of __HAVE_ARCH_CMPXCHG
locking/qrwlock: Rename QUEUE_RWLOCK to QUEUED_RWLOCKS
locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS
locking/pvqspinlock: Replace xchg() by the more descriptive set_mb()
locking/pvqspinlock, x86: Enable PV qspinlock for Xen
locking/pvqspinlock, x86: Enable PV qspinlock for KVM
locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching
locking/pvqspinlock: Implement simple paravirt support for the qspinlock
locking/qspinlock: Revert to test-and-set on hypervisors
locking/qspinlock: Use a simple write to grab the lock
locking/qspinlock: Optimize for smaller NR_CPUS
locking/qspinlock: Extract out code snippets for the next patch
locking/qspinlock: Add pending bit
...
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/barrier_64.h | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/cmpxchg_32.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/cmpxchg_64.h | 2 |
3 files changed, 2 insertions, 5 deletions
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 76648941fea7..809941e33e12 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h | |||
@@ -40,8 +40,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |||
40 | #define dma_rmb() rmb() | 40 | #define dma_rmb() rmb() |
41 | #define dma_wmb() wmb() | 41 | #define dma_wmb() wmb() |
42 | 42 | ||
43 | #define set_mb(__var, __value) \ | 43 | #define smp_store_mb(__var, __value) \ |
44 | do { __var = __value; membar_safe("#StoreLoad"); } while(0) | 44 | do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) |
45 | 45 | ||
46 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP |
47 | #define smp_mb() mb() | 47 | #define smp_mb() mb() |
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index d38b52dca216..83ffb83c5397 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h | |||
@@ -34,7 +34,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int | |||
34 | * | 34 | * |
35 | * Cribbed from <asm-parisc/atomic.h> | 35 | * Cribbed from <asm-parisc/atomic.h> |
36 | */ | 36 | */ |
37 | #define __HAVE_ARCH_CMPXCHG 1 | ||
38 | 37 | ||
39 | /* bug catcher for when unsupported size is used - won't link */ | 38 | /* bug catcher for when unsupported size is used - won't link */ |
40 | void __cmpxchg_called_with_bad_pointer(void); | 39 | void __cmpxchg_called_with_bad_pointer(void); |
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 0e1ed6cfbf68..faa2f61058c2 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h | |||
@@ -65,8 +65,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, | |||
65 | 65 | ||
66 | #include <asm-generic/cmpxchg-local.h> | 66 | #include <asm-generic/cmpxchg-local.h> |
67 | 67 | ||
68 | #define __HAVE_ARCH_CMPXCHG 1 | ||
69 | |||
70 | static inline unsigned long | 68 | static inline unsigned long |
71 | __cmpxchg_u32(volatile int *m, int old, int new) | 69 | __cmpxchg_u32(volatile int *m, int old, int new) |
72 | { | 70 | { |