aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/io.h15
-rw-r--r--arch/riscv/include/asm/mmiowb.h14
4 files changed, 17 insertions, 14 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index eb56c82d8aa1..6e30e8126799 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -48,6 +48,7 @@ config RISCV
48 select RISCV_TIMER 48 select RISCV_TIMER
49 select GENERIC_IRQ_MULTI_HANDLER 49 select GENERIC_IRQ_MULTI_HANDLER
50 select ARCH_HAS_PTE_SPECIAL 50 select ARCH_HAS_PTE_SPECIAL
51 select ARCH_HAS_MMIOWB
51 select HAVE_EBPF_JIT if 64BIT 52 select HAVE_EBPF_JIT if 64BIT
52 53
53config MMU 54config MMU
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 221cd2ec78a4..cccd12cf27d4 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -21,7 +21,6 @@ generic-y += kvm_para.h
21generic-y += local.h 21generic-y += local.h
22generic-y += local64.h 22generic-y += local64.h
23generic-y += mm-arch-hooks.h 23generic-y += mm-arch-hooks.h
24generic-y += mmiowb.h
25generic-y += mutex.h 24generic-y += mutex.h
26generic-y += percpu.h 25generic-y += percpu.h
27generic-y += preempt.h 26generic-y += preempt.h
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 1d9c1376dc64..744fd92e77bc 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -20,6 +20,7 @@
20#define _ASM_RISCV_IO_H 20#define _ASM_RISCV_IO_H
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23#include <asm/mmiowb.h>
23 24
24extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); 25extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
25 26
@@ -100,18 +101,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
100#endif 101#endif
101 102
102/* 103/*
103 * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
104 * the ordering with I/O on spinlocks like PowerPC does. The worry is that
105 * drivers won't get this correct, but I also don't want to introduce a fence
106 * into the lock code that otherwise only uses AMOs (and is essentially defined
107 * by the ISA to be correct). For now I'm leaving this here: "o,w" is
108 * sufficient to ensure that all writes to the device have completed before the
109 * write to the spinlock is allowed to commit. I surmised this from reading
110 * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
111 */
112#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
113
114/*
115 * Unordered I/O memory access primitives. These are even more relaxed than 104 * Unordered I/O memory access primitives. These are even more relaxed than
116 * the relaxed versions, as they don't even order accesses between successive 105 * the relaxed versions, as they don't even order accesses between successive
117 * operations to the I/O regions. 106 * operations to the I/O regions.
@@ -165,7 +154,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
165#define __io_br() do {} while (0) 154#define __io_br() do {} while (0)
166#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory"); 155#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
167#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory"); 156#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
168#define __io_aw() do {} while (0) 157#define __io_aw() mmiowb_set_pending()
169 158
170#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) 159#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
171#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) 160#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
new file mode 100644
index 000000000000..5d7e3a2b4e3b
--- /dev/null
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -0,0 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _ASM_RISCV_MMIOWB_H
4#define _ASM_RISCV_MMIOWB_H
5
6/*
7 * "o,w" is sufficient to ensure that all writes to the device have completed
8 * before the write to the spinlock is allowed to commit.
9 */
10#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
11
12#include <asm-generic/mmiowb.h>
13
14#endif /* ASM_RISCV_MMIOWB_H */