aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2019-02-22 07:59:59 -0500
committerWill Deacon <will.deacon@arm.com>2019-04-08 06:59:47 -0400
commit60ca1e5a200cd294a12907fa36dece4241db4ab8 (patch)
tree2dcd2e666b2b1cad4ce0c80f263cb122bdeebc1b
parentfdcd06a8ab775cbe716ff893372bed580e4c8a1c (diff)
mmiowb: Hook up mmiowb helpers to spinlocks and generic I/O accessors
Removing explicit calls to mmiowb() from driver code means that we must now call into the generic mmiowb_spin_{lock,unlock}() functions from the core spinlock code. In order to elide barriers following critical sections without any I/O writes, we also hook into the asm-generic I/O routines. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--include/asm-generic/io.h3
-rw-r--r--include/linux/spinlock.h11
-rw-r--r--kernel/locking/spinlock_debug.c6
3 files changed, 17 insertions, 3 deletions
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 303871651f8a..bc490a746602 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -19,6 +19,7 @@
19#include <asm-generic/iomap.h> 19#include <asm-generic/iomap.h>
20#endif 20#endif
21 21
22#include <asm/mmiowb.h>
22#include <asm-generic/pci_iomap.h> 23#include <asm-generic/pci_iomap.h>
23 24
24#ifndef mmiowb 25#ifndef mmiowb
@@ -49,7 +50,7 @@
49 50
50/* serialize device access against a spin_unlock, usually handled there. */ 51/* serialize device access against a spin_unlock, usually handled there. */
51#ifndef __io_aw 52#ifndef __io_aw
52#define __io_aw() barrier() 53#define __io_aw() mmiowb_set_pending()
53#endif 54#endif
54 55
55#ifndef __io_pbw 56#ifndef __io_pbw
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e089157dcf97..ed7c4d6b8235 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -57,6 +57,7 @@
57#include <linux/stringify.h> 57#include <linux/stringify.h>
58#include <linux/bottom_half.h> 58#include <linux/bottom_half.h>
59#include <asm/barrier.h> 59#include <asm/barrier.h>
60#include <asm/mmiowb.h>
60 61
61 62
62/* 63/*
@@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
178{ 179{
179 __acquire(lock); 180 __acquire(lock);
180 arch_spin_lock(&lock->raw_lock); 181 arch_spin_lock(&lock->raw_lock);
182 mmiowb_spin_lock();
181} 183}
182 184
183#ifndef arch_spin_lock_flags 185#ifndef arch_spin_lock_flags
@@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo
189{ 191{
190 __acquire(lock); 192 __acquire(lock);
191 arch_spin_lock_flags(&lock->raw_lock, *flags); 193 arch_spin_lock_flags(&lock->raw_lock, *flags);
194 mmiowb_spin_lock();
192} 195}
193 196
194static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 197static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
195{ 198{
196 return arch_spin_trylock(&(lock)->raw_lock); 199 int ret = arch_spin_trylock(&(lock)->raw_lock);
200
201 if (ret)
202 mmiowb_spin_lock();
203
204 return ret;
197} 205}
198 206
199static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 207static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
200{ 208{
209 mmiowb_spin_unlock();
201 arch_spin_unlock(&lock->raw_lock); 210 arch_spin_unlock(&lock->raw_lock);
202 __release(lock); 211 __release(lock);
203} 212}
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 9aa0fccd5d43..399669f7eba8 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -111,6 +111,7 @@ void do_raw_spin_lock(raw_spinlock_t *lock)
111{ 111{
112 debug_spin_lock_before(lock); 112 debug_spin_lock_before(lock);
113 arch_spin_lock(&lock->raw_lock); 113 arch_spin_lock(&lock->raw_lock);
114 mmiowb_spin_lock();
114 debug_spin_lock_after(lock); 115 debug_spin_lock_after(lock);
115} 116}
116 117
@@ -118,8 +119,10 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
118{ 119{
119 int ret = arch_spin_trylock(&lock->raw_lock); 120 int ret = arch_spin_trylock(&lock->raw_lock);
120 121
121 if (ret) 122 if (ret) {
123 mmiowb_spin_lock();
122 debug_spin_lock_after(lock); 124 debug_spin_lock_after(lock);
125 }
123#ifndef CONFIG_SMP 126#ifndef CONFIG_SMP
124 /* 127 /*
125 * Must not happen on UP: 128 * Must not happen on UP:
@@ -131,6 +134,7 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
131 134
132void do_raw_spin_unlock(raw_spinlock_t *lock) 135void do_raw_spin_unlock(raw_spinlock_t *lock)
133{ 136{
137 mmiowb_spin_unlock();
134 debug_spin_unlock(lock); 138 debug_spin_unlock(lock);
135 arch_spin_unlock(&lock->raw_lock); 139 arch_spin_unlock(&lock->raw_lock);
136} 140}