aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r--include/linux/spinlock.h14
1 files changed, 11 insertions, 3 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949a..75f34949d9ab 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
117#endif /*arch_spin_is_contended*/ 117#endif /*arch_spin_is_contended*/
118#endif 118#endif
119 119
120/* The lock does not imply full memory barrier. */ 120/*
121#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK 121 * Despite its name it doesn't necessarily has to be a full barrier.
122static inline void smp_mb__after_lock(void) { smp_mb(); } 122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
123#endif 131#endif
124 132
125/** 133/**