aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /arch/m68k/include
parent59a3d4c3631e553357b7305dc09db1990aa6757c (diff)
parent3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'arch/m68k/include')
-rw-r--r--arch/m68k/include/asm/atomic.h8
-rw-r--r--arch/m68k/include/asm/bitops.h7
2 files changed, 2 insertions, 13 deletions
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index f4e32de263a7..55695212a2ae 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/irqflags.h> 5#include <linux/irqflags.h>
6#include <asm/cmpxchg.h> 6#include <asm/cmpxchg.h>
7#include <asm/barrier.h>
7 8
8/* 9/*
9 * Atomic operations that C can't guarantee us. Useful for 10 * Atomic operations that C can't guarantee us. Useful for
@@ -209,11 +210,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
209 return c; 210 return c;
210} 211}
211 212
212
213/* Atomic operations are already serializing */
214#define smp_mb__before_atomic_dec() barrier()
215#define smp_mb__after_atomic_dec() barrier()
216#define smp_mb__before_atomic_inc() barrier()
217#define smp_mb__after_atomic_inc() barrier()
218
219#endif /* __ARCH_M68K_ATOMIC __ */ 213#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index c6baa913592a..b4a9b0d5928d 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -13,6 +13,7 @@
13#endif 13#endif
14 14
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/barrier.h>
16 17
17/* 18/*
18 * Bit access functions vary across the ColdFire and 68k families. 19 * Bit access functions vary across the ColdFire and 68k families.
@@ -67,12 +68,6 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
67#define __set_bit(nr, vaddr) set_bit(nr, vaddr) 68#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
68 69
69 70
70/*
71 * clear_bit() doesn't provide any barrier for the compiler.
72 */
73#define smp_mb__before_clear_bit() barrier()
74#define smp_mb__after_clear_bit() barrier()
75
76static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) 71static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
77{ 72{
78 char *p = (char *)vaddr + (nr ^ 31) / 8; 73 char *p = (char *)vaddr + (nr ^ 31) / 8;