aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.txt162
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile3
-rw-r--r--arch/riscv/Kconfig310
-rw-r--r--arch/riscv/Makefile72
-rw-r--r--arch/riscv/configs/defconfig0
-rw-r--r--arch/riscv/include/asm/Kbuild61
-rw-r--r--arch/riscv/include/asm/asm-offsets.h1
-rw-r--r--arch/riscv/include/asm/asm.h76
-rw-r--r--arch/riscv/include/asm/atomic.h375
-rw-r--r--arch/riscv/include/asm/barrier.h68
-rw-r--r--arch/riscv/include/asm/bitops.h218
-rw-r--r--arch/riscv/include/asm/bug.h88
-rw-r--r--arch/riscv/include/asm/cache.h22
-rw-r--r--arch/riscv/include/asm/cacheflush.h39
-rw-r--r--arch/riscv/include/asm/cmpxchg.h134
-rw-r--r--arch/riscv/include/asm/compat.h29
-rw-r--r--arch/riscv/include/asm/csr.h132
-rw-r--r--arch/riscv/include/asm/current.h45
-rw-r--r--arch/riscv/include/asm/delay.h28
-rw-r--r--arch/riscv/include/asm/dma-mapping.h38
-rw-r--r--arch/riscv/include/asm/elf.h84
-rw-r--r--arch/riscv/include/asm/hwcap.h37
-rw-r--r--arch/riscv/include/asm/io.h303
-rw-r--r--arch/riscv/include/asm/irq.h28
-rw-r--r--arch/riscv/include/asm/irqflags.h63
-rw-r--r--arch/riscv/include/asm/kprobes.h22
-rw-r--r--arch/riscv/include/asm/linkage.h20
-rw-r--r--arch/riscv/include/asm/mmu.h26
-rw-r--r--arch/riscv/include/asm/mmu_context.h69
-rw-r--r--arch/riscv/include/asm/page.h130
-rw-r--r--arch/riscv/include/asm/pci.h48
-rw-r--r--arch/riscv/include/asm/pgalloc.h124
-rw-r--r--arch/riscv/include/asm/pgtable-32.h25
-rw-r--r--arch/riscv/include/asm/pgtable-64.h84
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h48
-rw-r--r--arch/riscv/include/asm/pgtable.h430
-rw-r--r--arch/riscv/include/asm/processor.h97
-rw-r--r--arch/riscv/include/asm/ptrace.h118
-rw-r--r--arch/riscv/include/asm/sbi.h100
-rw-r--r--arch/riscv/include/asm/smp.h52
-rw-r--r--arch/riscv/include/asm/spinlock.h151
-rw-r--r--arch/riscv/include/asm/spinlock_types.h33
-rw-r--r--arch/riscv/include/asm/string.h26
-rw-r--r--arch/riscv/include/asm/switch_to.h69
-rw-r--r--arch/riscv/include/asm/syscall.h102
-rw-r--r--arch/riscv/include/asm/thread_info.h94
-rw-r--r--arch/riscv/include/asm/timex.h59
-rw-r--r--arch/riscv/include/asm/tlb.h24
-rw-r--r--arch/riscv/include/asm/tlbflush.h64
-rw-r--r--arch/riscv/include/asm/uaccess.h513
-rw-r--r--arch/riscv/include/asm/unistd.h16
-rw-r--r--arch/riscv/include/asm/vdso.h41
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h55
-rw-r--r--arch/riscv/include/uapi/asm/Kbuild27
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h24
-rw-r--r--arch/riscv/include/uapi/asm/bitsperlong.h25
-rw-r--r--arch/riscv/include/uapi/asm/byteorder.h23
-rw-r--r--arch/riscv/include/uapi/asm/elf.h83
-rw-r--r--arch/riscv/include/uapi/asm/hwcap.h36
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h90
-rw-r--r--arch/riscv/include/uapi/asm/sigcontext.h30
-rw-r--r--arch/riscv/include/uapi/asm/siginfo.h24
-rw-r--r--arch/riscv/include/uapi/asm/ucontext.h45
-rw-r--r--arch/riscv/kernel/.gitignore1
-rw-r--r--arch/riscv/kernel/Makefile33
-rw-r--r--arch/riscv/kernel/asm-offsets.c322
-rw-r--r--arch/riscv/kernel/cacheinfo.c105
-rw-r--r--arch/riscv/kernel/cpu.c108
-rw-r--r--arch/riscv/kernel/cpufeature.c61
-rw-r--r--arch/riscv/kernel/entry.S464
-rw-r--r--arch/riscv/kernel/head.S157
-rw-r--r--arch/riscv/kernel/irq.c39
-rw-r--r--arch/riscv/kernel/module.c217
-rw-r--r--arch/riscv/kernel/process.c129
-rw-r--r--arch/riscv/kernel/ptrace.c125
-rw-r--r--arch/riscv/kernel/reset.c36
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c15
-rw-r--r--arch/riscv/kernel/setup.c257
-rw-r--r--arch/riscv/kernel/signal.c292
-rw-r--r--arch/riscv/kernel/smp.c110
-rw-r--r--arch/riscv/kernel/smpboot.c114
-rw-r--r--arch/riscv/kernel/stacktrace.c177
-rw-r--r--arch/riscv/kernel/sys_riscv.c49
-rw-r--r--arch/riscv/kernel/syscall_table.c25
-rw-r--r--arch/riscv/kernel/time.c61
-rw-r--r--arch/riscv/kernel/traps.c180
-rw-r--r--arch/riscv/kernel/vdso.c125
-rw-r--r--arch/riscv/kernel/vdso/.gitignore2
-rw-r--r--arch/riscv/kernel/vdso/Makefile63
-rw-r--r--arch/riscv/kernel/vdso/rt_sigreturn.S24
-rw-r--r--arch/riscv/kernel/vdso/vdso.S27
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S77
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S92
-rw-r--r--arch/riscv/lib/Makefile6
-rw-r--r--arch/riscv/lib/delay.c110
-rw-r--r--arch/riscv/lib/memcpy.S115
-rw-r--r--arch/riscv/lib/memset.S120
-rw-r--r--arch/riscv/lib/uaccess.S117
-rw-r--r--arch/riscv/lib/udivdi3.S38
-rw-r--r--arch/riscv/mm/Makefile4
-rw-r--r--arch/riscv/mm/extable.c37
-rw-r--r--arch/riscv/mm/fault.c282
-rw-r--r--arch/riscv/mm/init.c70
-rw-r--r--arch/riscv/mm/ioremap.c92
-rw-r--r--include/lib/libgcc.h43
-rw-r--r--lib/Kconfig18
-rw-r--r--lib/Makefile8
-rw-r--r--lib/ashldi3.c44
-rw-r--r--lib/ashrdi3.c46
-rw-r--r--lib/cmpdi2.c42
-rw-r--r--lib/lshrdi3.c45
-rw-r--r--lib/muldi3.c72
-rw-r--r--lib/ucmpdi2.c35
114 files changed, 10303 insertions, 1 deletions
diff --git a/Documentation/devicetree/bindings/riscv/cpus.txt b/Documentation/devicetree/bindings/riscv/cpus.txt
new file mode 100644
index 000000000000..adf7b7af5dc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/cpus.txt
@@ -0,0 +1,162 @@
1===================
2RISC-V CPU Bindings
3===================
4
5The device tree allows to describe the layout of CPUs in a system through
6the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
7defining properties for every cpu.
8
9Bindings for CPU nodes follow the Devicetree Specification, available from:
10
11https://www.devicetree.org/specifications/
12
13with updates for 32-bit and 64-bit RISC-V systems provided in this document.
14
15===========
16Terminology
17===========
18
19This document uses some terminology common to the RISC-V community that is not
20widely used, the definitions of which are listed here:
21
22* hart: A hardware execution context, which contains all the state mandated by
23 the RISC-V ISA: a PC and some registers. This terminology is designed to
24 disambiguate software's view of execution contexts from any particular
25 microarchitectural implementation strategy. For example, my Intel laptop is
26 described as having one socket with two cores, each of which has two hyper
27 threads. Therefore this system has four harts.
28
29=====================================
30cpus and cpu node bindings definition
31=====================================
32
33The RISC-V architecture, in accordance with the Devicetree Specification,
34requires the cpus and cpu nodes to be present and contain the properties
35described below.
36
37- cpus node
38
39 Description: Container of cpu nodes
40
41 The node name must be "cpus".
42
43 A cpus node must define the following properties:
44
45 - #address-cells
46 Usage: required
47 Value type: <u32>
48 Definition: must be set to 1
49 - #size-cells
50 Usage: required
51 Value type: <u32>
52 Definition: must be set to 0
53
54- cpu node
55
56 Description: Describes a hart context
57
58 PROPERTIES
59
60 - device_type
61 Usage: required
62 Value type: <string>
63 Definition: must be "cpu"
64 - reg
65 Usage: required
66 Value type: <u32>
67 Definition: The hart ID of this CPU node
68 - compatible:
69 Usage: required
70 Value type: <stringlist>
71 Definition: must contain "riscv", may contain one of
72 "sifive,rocket0"
73 - mmu-type:
74 Usage: optional
75 Value type: <string>
76 Definition: Specifies the CPU's MMU type. Possible values are
77 "riscv,sv32"
78 "riscv,sv39"
79 "riscv,sv48"
80 - riscv,isa:
81 Usage: required
82 Value type: <string>
83 Definition: Contains the RISC-V ISA string of this hart. These
84 ISA strings are defined by the RISC-V ISA manual.
85
86Example: SiFive Freedom U540G Development Kit
87---------------------------------------------
88
89This system contains two harts: a hart marked as disabled that's used for
90low-level system tasks and should be ignored by Linux, and a second hart that
91Linux is allowed to run on.
92
93 cpus {
94 #address-cells = <1>;
95 #size-cells = <0>;
96 timebase-frequency = <1000000>;
97 cpu@0 {
98 clock-frequency = <1600000000>;
99 compatible = "sifive,rocket0", "riscv";
100 device_type = "cpu";
101 i-cache-block-size = <64>;
102 i-cache-sets = <128>;
103 i-cache-size = <16384>;
104 next-level-cache = <&L15 &L0>;
105 reg = <0>;
106 riscv,isa = "rv64imac";
107 status = "disabled";
108 L10: interrupt-controller {
109 #interrupt-cells = <1>;
110 compatible = "riscv,cpu-intc";
111 interrupt-controller;
112 };
113 };
114 cpu@1 {
115 clock-frequency = <1600000000>;
116 compatible = "sifive,rocket0", "riscv";
117 d-cache-block-size = <64>;
118 d-cache-sets = <64>;
119 d-cache-size = <32768>;
120 d-tlb-sets = <1>;
121 d-tlb-size = <32>;
122 device_type = "cpu";
123 i-cache-block-size = <64>;
124 i-cache-sets = <64>;
125 i-cache-size = <32768>;
126 i-tlb-sets = <1>;
127 i-tlb-size = <32>;
128 mmu-type = "riscv,sv39";
129 next-level-cache = <&L15 &L0>;
130 reg = <1>;
131 riscv,isa = "rv64imafdc";
132 status = "okay";
133 tlb-split;
134 L13: interrupt-controller {
135 #interrupt-cells = <1>;
136 compatible = "riscv,cpu-intc";
137 interrupt-controller;
138 };
139 };
140 };
141
142Example: Spike ISA Simulator with 1 Hart
143----------------------------------------
144
145This device tree matches the Spike ISA golden model as run with `spike -p1`.
146
147 cpus {
148 cpu@0 {
149 device_type = "cpu";
150 reg = <0x00000000>;
151 status = "okay";
152 compatible = "riscv";
153 riscv,isa = "rv64imafdc";
154 mmu-type = "riscv,sv48";
155 clock-frequency = <0x3b9aca00>;
156 interrupt-controller {
157 #interrupt-cells = <0x00000001>;
158 interrupt-controller;
159 compatible = "riscv,cpu-intc";
160 }
161 }
162 }
diff --git a/MAINTAINERS b/MAINTAINERS
index f6fed47d548e..82ed85135971 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11534,6 +11534,16 @@ S: Maintained
11534F: drivers/mtd/nand/r852.c 11534F: drivers/mtd/nand/r852.c
11535F: drivers/mtd/nand/r852.h 11535F: drivers/mtd/nand/r852.h
11536 11536
11537RISC-V ARCHITECTURE
11538M: Palmer Dabbelt <palmer@sifive.com>
11539M: Albert Ou <albert@sifive.com>
11540L: patches@groups.riscv.org
11541T: git https://github.com/riscv/riscv-linux
11542S: Supported
11543F: arch/riscv/
11544K: riscv
11545N: riscv
11546
11537ROCCAT DRIVERS 11547ROCCAT DRIVERS
11538M: Stefan Achatz <erazor_de@users.sourceforge.net> 11548M: Stefan Achatz <erazor_de@users.sourceforge.net>
11539W: http://sourceforge.net/projects/roccat/ 11549W: http://sourceforge.net/projects/roccat/
diff --git a/Makefile b/Makefile
index 33cefc31124d..763ab35df12a 100644
--- a/Makefile
+++ b/Makefile
@@ -226,7 +226,8 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
226 -e s/arm.*/arm/ -e s/sa110/arm/ \ 226 -e s/arm.*/arm/ -e s/sa110/arm/ \
227 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 227 -e s/s390x/s390/ -e s/parisc64/parisc/ \
228 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 228 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
229 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ ) 229 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
230 -e s/riscv.*/riscv/)
230 231
231# Cross compiling and selecting different set of gcc/bin-utils 232# Cross compiling and selecting different set of gcc/bin-utils
232# --------------------------------------------------------------------------- 233# ---------------------------------------------------------------------------
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
new file mode 100644
index 000000000000..2c6adf12713a
--- /dev/null
+++ b/arch/riscv/Kconfig
@@ -0,0 +1,310 @@
1#
2# For a description of the syntax of this configuration file,
3# see Documentation/kbuild/kconfig-language.txt.
4#
5
6config RISCV
7 def_bool y
8 select OF
9 select OF_EARLY_FLATTREE
10 select OF_IRQ
11 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
12 select ARCH_WANT_FRAME_POINTERS
13 select CLONE_BACKWARDS
14 select COMMON_CLK
15 select GENERIC_CLOCKEVENTS
16 select GENERIC_CPU_DEVICES
17 select GENERIC_IRQ_SHOW
18 select GENERIC_PCI_IOMAP
19 select GENERIC_STRNCPY_FROM_USER
20 select GENERIC_STRNLEN_USER
21 select GENERIC_SMP_IDLE_THREAD
22 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
23 select ARCH_WANT_OPTIONAL_GPIOLIB
24 select HAVE_MEMBLOCK
25 select HAVE_DMA_API_DEBUG
26 select HAVE_DMA_CONTIGUOUS
27 select HAVE_GENERIC_DMA_COHERENT
28 select IRQ_DOMAIN
29 select NO_BOOTMEM
30 select RISCV_ISA_A if SMP
31 select SPARSE_IRQ
32 select SYSCTL_EXCEPTION_TRACE
33 select HAVE_ARCH_TRACEHOOK
34 select MODULES_USE_ELF_RELA if MODULES
35 select THREAD_INFO_IN_TASK
36 select RISCV_IRQ_INTC
37 select RISCV_TIMER
38
39config MMU
40 def_bool y
41
42# even on 32-bit, physical (and DMA) addresses are > 32-bits
43config ARCH_PHYS_ADDR_T_64BIT
44 def_bool y
45
46config ARCH_DMA_ADDR_T_64BIT
47 def_bool y
48
49config PAGE_OFFSET
50 hex
51 default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
52 default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
53 default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
54
55config STACKTRACE_SUPPORT
56 def_bool y
57
58config RWSEM_GENERIC_SPINLOCK
59 def_bool y
60
61config GENERIC_BUG
62 def_bool y
63 depends on BUG
64 select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
65
66config GENERIC_BUG_RELATIVE_POINTERS
67 bool
68
69config GENERIC_CALIBRATE_DELAY
70 def_bool y
71
72config GENERIC_CSUM
73 def_bool y
74
75config GENERIC_HWEIGHT
76 def_bool y
77
78config PGTABLE_LEVELS
79 int
80 default 3 if 64BIT
81 default 2
82
83config HAVE_KPROBES
84 def_bool n
85
86config DMA_NOOP_OPS
87 def_bool y
88
89menu "Platform type"
90
91choice
92 prompt "Base ISA"
93 default ARCH_RV64I
94 help
95 This selects the base ISA that this kernel will traget and must match
96 the target platform.
97
98config ARCH_RV32I
99 bool "RV32I"
100 select CPU_SUPPORTS_32BIT_KERNEL
101 select 32BIT
102 select GENERIC_ASHLDI3
103 select GENERIC_ASHRDI3
104 select GENERIC_LSHRDI3
105
106config ARCH_RV64I
107 bool "RV64I"
108 select CPU_SUPPORTS_64BIT_KERNEL
109 select 64BIT
110
111endchoice
112
113# We must be able to map all physical memory into the kernel, but the compiler
114# is still a bit more efficient when generating code if it's setup in a manner
115# such that it can only map 2GiB of memory.
116choice
117 prompt "Kernel Code Model"
118 default CMODEL_MEDLOW if 32BIT
119 default CMODEL_MEDANY if 64BIT
120
121 config CMODEL_MEDLOW
122 bool "medium low code model"
123 config CMODEL_MEDANY
124 bool "medium any code model"
125endchoice
126
127choice
128 prompt "Maximum Physical Memory"
129 default MAXPHYSMEM_2GB if 32BIT
130 default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
131 default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
132
133 config MAXPHYSMEM_2GB
134 bool "2GiB"
135 config MAXPHYSMEM_128GB
136 depends on 64BIT && CMODEL_MEDANY
137 bool "128GiB"
138endchoice
139
140
141config SMP
142 bool "Symmetric Multi-Processing"
143 help
144 This enables support for systems with more than one CPU. If
145 you say N here, the kernel will run on single and
146 multiprocessor machines, but will use only one CPU of a
147 multiprocessor machine. If you say Y here, the kernel will run
148 on many, but not all, single processor machines. On a single
149 processor machine, the kernel will run faster if you say N
150 here.
151
152 If you don't know what to do here, say N.
153
154config NR_CPUS
155 int "Maximum number of CPUs (2-32)"
156 range 2 32
157 depends on SMP
158 default "8"
159
160config CPU_SUPPORTS_32BIT_KERNEL
161 bool
162config CPU_SUPPORTS_64BIT_KERNEL
163 bool
164
165choice
166 prompt "CPU Tuning"
167 default TUNE_GENERIC
168
169config TUNE_GENERIC
170 bool "generic"
171
172endchoice
173
174config RISCV_ISA_C
175 bool "Emit compressed instructions when building Linux"
176 default y
177 help
178 Adds "C" to the ISA subsets that the toolchain is allowed to emit
179 when building Linux, which results in compressed instructions in the
180 Linux binary.
181
182 If you don't know what to do here, say Y.
183
184config RISCV_ISA_A
185 def_bool y
186
187endmenu
188
189menu "Kernel type"
190
191choice
192 prompt "Kernel code model"
193 default 64BIT
194
195config 32BIT
196 bool "32-bit kernel"
197 depends on CPU_SUPPORTS_32BIT_KERNEL
198 help
199 Select this option to build a 32-bit kernel.
200
201config 64BIT
202 bool "64-bit kernel"
203 depends on CPU_SUPPORTS_64BIT_KERNEL
204 help
205 Select this option to build a 64-bit kernel.
206
207endchoice
208
209source "mm/Kconfig"
210
211source "kernel/Kconfig.preempt"
212
213source "kernel/Kconfig.hz"
214
215endmenu
216
217menu "Bus support"
218
219config PCI
220 bool "PCI support"
221 select PCI_MSI
222 help
223 This feature enables support for PCI bus system. If you say Y
224 here, the kernel will include drivers and infrastructure code
225 to support PCI bus devices.
226
227 If you don't know what to do here, say Y.
228
229config PCI_DOMAINS
230 def_bool PCI
231
232config PCI_DOMAINS_GENERIC
233 def_bool PCI
234
235source "drivers/pci/Kconfig"
236
237endmenu
238
239source "init/Kconfig"
240
241source "kernel/Kconfig.freezer"
242
243menu "Executable file formats"
244
245source "fs/Kconfig.binfmt"
246
247endmenu
248
249menu "Power management options"
250
251source kernel/power/Kconfig
252
253endmenu
254
255source "net/Kconfig"
256
257source "drivers/Kconfig"
258
259source "fs/Kconfig"
260
261menu "Kernel hacking"
262
263config CMDLINE_BOOL
264 bool "Built-in kernel command line"
265 help
266 For most platforms, it is firmware or second stage bootloader
267 that by default specifies the kernel command line options.
268 However, it might be necessary or advantageous to either override
269 the default kernel command line or add a few extra options to it.
270 For such cases, this option allows hardcoding command line options
271 directly into the kernel.
272
273 For that, choose 'Y' here and fill in the extra boot parameters
274 in CONFIG_CMDLINE.
275
276 The built-in options will be concatenated to the default command
277 line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default
278 command line will be ignored and replaced by the built-in string.
279
280config CMDLINE
281 string "Built-in kernel command string"
282 depends on CMDLINE_BOOL
283 default ""
284 help
285 Supply command-line options at build time by entering them here.
286
287config CMDLINE_OVERRIDE
288 bool "Built-in command line overrides bootloader arguments"
289 depends on CMDLINE_BOOL
290 help
291 Set this option to 'Y' to have the kernel ignore the bootloader
292 or firmware command line. Instead, the built-in command line
293 will be used exclusively.
294
295 If you don't know what to do here, say N.
296
297config EARLY_PRINTK
298 def_bool y
299
300source "lib/Kconfig.debug"
301
302config CMDLINE_BOOL
303 bool
304endmenu
305
306source "security/Kconfig"
307
308source "crypto/Kconfig"
309
310source "lib/Kconfig"
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
new file mode 100644
index 000000000000..6719dd30ec5b
--- /dev/null
+++ b/arch/riscv/Makefile
@@ -0,0 +1,72 @@
1# This file is included by the global makefile so that you can add your own
2# architecture-specific flags and dependencies. Remember to do have actions
3# for "archclean" and "archdep" for cleaning up and making dependencies for
4# this architecture
5#
6# This file is subject to the terms and conditions of the GNU General Public
7# License. See the file "COPYING" in the main directory of this archive
8# for more details.
9#
10
11LDFLAGS :=
12OBJCOPYFLAGS := -O binary
13LDFLAGS_vmlinux :=
14KBUILD_AFLAGS_MODULE += -fPIC
15KBUILD_CFLAGS_MODULE += -fPIC
16
17KBUILD_DEFCONFIG = defconfig
18
19export BITS
20ifeq ($(CONFIG_ARCH_RV64I),y)
21 BITS := 64
22 UTS_MACHINE := riscv64
23
24 KBUILD_CFLAGS += -mabi=lp64
25 KBUILD_AFLAGS += -mabi=lp64
26 KBUILD_MARCH = rv64im
27 LDFLAGS += -melf64lriscv
28else
29 BITS := 32
30 UTS_MACHINE := riscv32
31
32 KBUILD_CFLAGS += -mabi=ilp32
33 KBUILD_AFLAGS += -mabi=ilp32
34 KBUILD_MARCH = rv32im
35 LDFLAGS += -melf32lriscv
36endif
37
38KBUILD_CFLAGS += -Wall
39
40ifeq ($(CONFIG_RISCV_ISA_A),y)
41 KBUILD_ARCH_A = a
42endif
43ifeq ($(CONFIG_RISCV_ISA_C),y)
44 KBUILD_ARCH_C = c
45endif
46
47KBUILD_AFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)fd$(KBUILD_ARCH_C)
48
49KBUILD_CFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)$(KBUILD_ARCH_C)
50KBUILD_CFLAGS += -mno-save-restore
51KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
52
53ifeq ($(CONFIG_CMODEL_MEDLOW),y)
54 KBUILD_CFLAGS += -mcmodel=medlow
55endif
56ifeq ($(CONFIG_CMODEL_MEDANY),y)
57 KBUILD_CFLAGS += -mcmodel=medany
58endif
59
60# GCC versions that support the "-mstrict-align" option default to allowing
61# unaligned accesses. While unaligned accesses are explicitly allowed in the
62# RISC-V ISA, they're emulated by machine mode traps on all extant
63# architectures. It's faster to have GCC emit only aligned accesses.
64KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
65
66head-y := arch/riscv/kernel/head.o
67
68core-y += arch/riscv/kernel/ arch/riscv/mm/
69
70libs-y += arch/riscv/lib/
71
72all: vmlinux
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/riscv/configs/defconfig
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
new file mode 100644
index 000000000000..18158be62a2b
--- /dev/null
+++ b/arch/riscv/include/asm/Kbuild
@@ -0,0 +1,61 @@
1generic-y += bugs.h
2generic-y += cacheflush.h
3generic-y += checksum.h
4generic-y += clkdev.h
5generic-y += cputime.h
6generic-y += device.h
7generic-y += div64.h
8generic-y += dma.h
9generic-y += dma-contiguous.h
10generic-y += emergency-restart.h
11generic-y += errno.h
12generic-y += exec.h
13generic-y += fb.h
14generic-y += fcntl.h
15generic-y += ftrace.h
16generic-y += futex.h
17generic-y += hardirq.h
18generic-y += hash.h
19generic-y += hw_irq.h
20generic-y += ioctl.h
21generic-y += ioctls.h
22generic-y += ipcbuf.h
23generic-y += irq_regs.h
24generic-y += irq_work.h
25generic-y += kdebug.h
26generic-y += kmap_types.h
27generic-y += kvm_para.h
28generic-y += local.h
29generic-y += mm-arch-hooks.h
30generic-y += mman.h
31generic-y += module.h
32generic-y += msgbuf.h
33generic-y += mutex.h
34generic-y += param.h
35generic-y += percpu.h
36generic-y += poll.h
37generic-y += posix_types.h
38generic-y += preempt.h
39generic-y += resource.h
40generic-y += scatterlist.h
41generic-y += sections.h
42generic-y += sembuf.h
43generic-y += setup.h
44generic-y += shmbuf.h
45generic-y += shmparam.h
46generic-y += signal.h
47generic-y += socket.h
48generic-y += sockios.h
49generic-y += stat.h
50generic-y += statfs.h
51generic-y += swab.h
52generic-y += termbits.h
53generic-y += termios.h
54generic-y += topology.h
55generic-y += trace_clock.h
56generic-y += types.h
57generic-y += unaligned.h
58generic-y += user.h
59generic-y += vga.h
60generic-y += vmlinux.lds.h
61generic-y += xor.h
diff --git a/arch/riscv/include/asm/asm-offsets.h b/arch/riscv/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/riscv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
new file mode 100644
index 000000000000..6cbbb6a68d76
--- /dev/null
+++ b/arch/riscv/include/asm/asm.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_ASM_H
15#define _ASM_RISCV_ASM_H
16
17#ifdef __ASSEMBLY__
18#define __ASM_STR(x) x
19#else
20#define __ASM_STR(x) #x
21#endif
22
23#if __riscv_xlen == 64
24#define __REG_SEL(a, b) __ASM_STR(a)
25#elif __riscv_xlen == 32
26#define __REG_SEL(a, b) __ASM_STR(b)
27#else
28#error "Unexpected __riscv_xlen"
29#endif
30
31#define REG_L __REG_SEL(ld, lw)
32#define REG_S __REG_SEL(sd, sw)
33#define SZREG __REG_SEL(8, 4)
34#define LGREG __REG_SEL(3, 2)
35
36#if __SIZEOF_POINTER__ == 8
37#ifdef __ASSEMBLY__
38#define RISCV_PTR .dword
39#define RISCV_SZPTR 8
40#define RISCV_LGPTR 3
41#else
42#define RISCV_PTR ".dword"
43#define RISCV_SZPTR "8"
44#define RISCV_LGPTR "3"
45#endif
46#elif __SIZEOF_POINTER__ == 4
47#ifdef __ASSEMBLY__
48#define RISCV_PTR .word
49#define RISCV_SZPTR 4
50#define RISCV_LGPTR 2
51#else
52#define RISCV_PTR ".word"
53#define RISCV_SZPTR "4"
54#define RISCV_LGPTR "2"
55#endif
56#else
57#error "Unexpected __SIZEOF_POINTER__"
58#endif
59
60#if (__SIZEOF_INT__ == 4)
61#define INT __ASM_STR(.word)
62#define SZINT __ASM_STR(4)
63#define LGINT __ASM_STR(2)
64#else
65#error "Unexpected __SIZEOF_INT__"
66#endif
67
68#if (__SIZEOF_SHORT__ == 2)
69#define SHORT __ASM_STR(.half)
70#define SZSHORT __ASM_STR(2)
71#define LGSHORT __ASM_STR(1)
72#else
73#error "Unexpected __SIZEOF_SHORT__"
74#endif
75
76#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
new file mode 100644
index 000000000000..e2e37c57cbeb
--- /dev/null
+++ b/arch/riscv/include/asm/atomic.h
@@ -0,0 +1,375 @@
1/*
2 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_RISCV_ATOMIC_H
13#define _ASM_RISCV_ATOMIC_H
14
15#ifdef CONFIG_GENERIC_ATOMIC64
16# include <asm-generic/atomic64.h>
17#else
18# if (__riscv_xlen < 64)
19# error "64-bit atomics require XLEN to be at least 64"
20# endif
21#endif
22
23#include <asm/cmpxchg.h>
24#include <asm/barrier.h>
25
26#define ATOMIC_INIT(i) { (i) }
27static __always_inline int atomic_read(const atomic_t *v)
28{
29 return READ_ONCE(v->counter);
30}
31static __always_inline void atomic_set(atomic_t *v, int i)
32{
33 WRITE_ONCE(v->counter, i);
34}
35
36#ifndef CONFIG_GENERIC_ATOMIC64
37#define ATOMIC64_INIT(i) { (i) }
38static __always_inline long atomic64_read(const atomic64_t *v)
39{
40 return READ_ONCE(v->counter);
41}
42static __always_inline void atomic64_set(atomic64_t *v, long i)
43{
44 WRITE_ONCE(v->counter, i);
45}
46#endif
47
48/*
49 * First, the atomic ops that have no ordering constraints and therefor don't
50 * have the AQ or RL bits set. These don't return anything, so there's only
51 * one version to worry about.
52 */
53#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \
54static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
55{ \
56 __asm__ __volatile__ ( \
57 "amo" #asm_op "." #asm_type " zero, %1, %0" \
58 : "+A" (v->counter) \
59 : "r" (I) \
60 : "memory"); \
61}
62
63#ifdef CONFIG_GENERIC_ATOMIC64
64#define ATOMIC_OPS(op, asm_op, c_op, I) \
65 ATOMIC_OP (op, asm_op, c_op, I, w, int, )
66#else
67#define ATOMIC_OPS(op, asm_op, c_op, I) \
68 ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \
69 ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
70#endif
71
72ATOMIC_OPS(add, add, +, i)
73ATOMIC_OPS(sub, add, +, -i)
74ATOMIC_OPS(and, and, &, i)
75ATOMIC_OPS( or, or, |, i)
76ATOMIC_OPS(xor, xor, ^, i)
77
78#undef ATOMIC_OP
79#undef ATOMIC_OPS
80
81/*
82 * Atomic ops that have ordered, relaxed, acquire, and relese variants.
83 * There's two flavors of these: the arithmatic ops have both fetch and return
84 * versions, while the logical ops only have fetch versions.
85 */
86#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
87static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
88{ \
89 register c_type ret; \
90 __asm__ __volatile__ ( \
91 "amo" #asm_op "." #asm_type #asm_or " %1, %2, %0" \
92 : "+A" (v->counter), "=r" (ret) \
93 : "r" (I) \
94 : "memory"); \
95 return ret; \
96}
97
98#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
99static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, atomic##prefix##_t *v) \
100{ \
101 return atomic##prefix##_fetch_##op##c_or(i, v) c_op I; \
102}
103
104#ifdef CONFIG_GENERIC_ATOMIC64
105#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
106 ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
107 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
108#else
109#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
110 ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
111 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
112 ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \
113 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
114#endif
115
116ATOMIC_OPS(add, add, +, i, , _relaxed)
117ATOMIC_OPS(add, add, +, i, .aq , _acquire)
118ATOMIC_OPS(add, add, +, i, .rl , _release)
119ATOMIC_OPS(add, add, +, i, .aqrl, )
120
121ATOMIC_OPS(sub, add, +, -i, , _relaxed)
122ATOMIC_OPS(sub, add, +, -i, .aq , _acquire)
123ATOMIC_OPS(sub, add, +, -i, .rl , _release)
124ATOMIC_OPS(sub, add, +, -i, .aqrl, )
125
126#undef ATOMIC_OPS
127
128#ifdef CONFIG_GENERIC_ATOMIC64
129#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
130 ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, )
131#else
132#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
133 ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
134 ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
135#endif
136
137ATOMIC_OPS(and, and, &, i, , _relaxed)
138ATOMIC_OPS(and, and, &, i, .aq , _acquire)
139ATOMIC_OPS(and, and, &, i, .rl , _release)
140ATOMIC_OPS(and, and, &, i, .aqrl, )
141
142ATOMIC_OPS( or, or, |, i, , _relaxed)
143ATOMIC_OPS( or, or, |, i, .aq , _acquire)
144ATOMIC_OPS( or, or, |, i, .rl , _release)
145ATOMIC_OPS( or, or, |, i, .aqrl, )
146
147ATOMIC_OPS(xor, xor, ^, i, , _relaxed)
148ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire)
149ATOMIC_OPS(xor, xor, ^, i, .rl , _release)
150ATOMIC_OPS(xor, xor, ^, i, .aqrl, )
151
152#undef ATOMIC_OPS
153
154#undef ATOMIC_FETCH_OP
155#undef ATOMIC_OP_RETURN
156
157/*
158 * The extra atomic operations that are constructed from one of the core
159 * AMO-based operations above (aside from sub, which is easier to fit above).
160 * These are required to perform a barrier, but they're OK this way because
161 * atomic_*_return is also required to perform a barrier.
162 */
163#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
164static __always_inline bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
165{ \
166 return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
167}
168
169#ifdef CONFIG_GENERIC_ATOMIC64
170#define ATOMIC_OPS(op, func_op, comp_op, I) \
171 ATOMIC_OP (op, func_op, comp_op, I, int, )
172#else
173#define ATOMIC_OPS(op, func_op, comp_op, I) \
174 ATOMIC_OP (op, func_op, comp_op, I, int, ) \
175 ATOMIC_OP (op, func_op, comp_op, I, long, 64)
176#endif
177
178ATOMIC_OPS(add_and_test, add, ==, 0)
179ATOMIC_OPS(sub_and_test, sub, ==, 0)
180ATOMIC_OPS(add_negative, add, <, 0)
181
182#undef ATOMIC_OP
183#undef ATOMIC_OPS
184
185#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \
186static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \
187{ \
188 atomic##prefix##_##func_op(I, v); \
189}
190
191#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \
192static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
193{ \
194 return atomic##prefix##_fetch_##func_op(I, v); \
195}
196
197#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
198static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
199{ \
200 return atomic##prefix##_fetch_##op(v) c_op I; \
201}
202
203#ifdef CONFIG_GENERIC_ATOMIC64
204#define ATOMIC_OPS(op, asm_op, c_op, I) \
205 ATOMIC_OP (op, asm_op, c_op, I, int, ) \
206 ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
207 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
208#else
209#define ATOMIC_OPS(op, asm_op, c_op, I) \
210 ATOMIC_OP (op, asm_op, c_op, I, int, ) \
211 ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
212 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
213 ATOMIC_OP (op, asm_op, c_op, I, long, 64) \
214 ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \
215 ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
216#endif
217
218ATOMIC_OPS(inc, add, +, 1)
219ATOMIC_OPS(dec, add, +, -1)
220
221#undef ATOMIC_OPS
222#undef ATOMIC_OP
223#undef ATOMIC_FETCH_OP
224#undef ATOMIC_OP_RETURN
225
226#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
227static __always_inline bool atomic##prefix##_##op(atomic##prefix##_t *v) \
228{ \
229 return atomic##prefix##_##func_op##_return(v) comp_op I; \
230}
231
232ATOMIC_OP(inc_and_test, inc, ==, 0, )
233ATOMIC_OP(dec_and_test, dec, ==, 0, )
234#ifndef CONFIG_GENERIC_ATOMIC64
235ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
236ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
237#endif
238
239#undef ATOMIC_OP
240
241/* This is required to provide a barrier on success. */
242static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
243{
244 int prev, rc;
245
246 __asm__ __volatile__ (
247 "0:\n\t"
248 "lr.w.aqrl %[p], %[c]\n\t"
249 "beq %[p], %[u], 1f\n\t"
250 "add %[rc], %[p], %[a]\n\t"
251 "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
252 "bnez %[rc], 0b\n\t"
253 "1:"
254 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
255 : [a]"r" (a), [u]"r" (u)
256 : "memory");
257 return prev;
258}
259
260#ifndef CONFIG_GENERIC_ATOMIC64
261static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
262{
263 long prev, rc;
264
265 __asm__ __volatile__ (
266 "0:\n\t"
267 "lr.d.aqrl %[p], %[c]\n\t"
268 "beq %[p], %[u], 1f\n\t"
269 "add %[rc], %[p], %[a]\n\t"
270 "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
271 "bnez %[rc], 0b\n\t"
272 "1:"
273 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
274 : [a]"r" (a), [u]"r" (u)
275 : "memory");
276 return prev;
277}
278
279static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
280{
281 return __atomic64_add_unless(v, a, u) != u;
282}
283#endif
284
285/*
286 * The extra atomic operations that are constructed from one of the core
287 * LR/SC-based operations above.
288 */
289static __always_inline int atomic_inc_not_zero(atomic_t *v)
290{
291 return __atomic_add_unless(v, 1, 0);
292}
293
294#ifndef CONFIG_GENERIC_ATOMIC64
295static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
296{
297 return atomic64_add_unless(v, 1, 0);
298}
299#endif
300
301/*
302 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
303 * {cmp,}xchg and the operations that return, so they need a barrier. We just
304 * use the other implementations directly.
305 */
306#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \
307static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \
308{ \
309 return __cmpxchg(&(v->counter), o, n, size, asm_or, asm_or); \
310} \
311static __always_inline c_t atomic##prefix##_xchg##c_or(atomic##prefix##_t *v, c_t n) \
312{ \
313 return __xchg(n, &(v->counter), size, asm_or); \
314}
315
316#ifdef CONFIG_GENERIC_ATOMIC64
317#define ATOMIC_OPS(c_or, asm_or) \
318 ATOMIC_OP( int, , c_or, 4, asm_or)
319#else
320#define ATOMIC_OPS(c_or, asm_or) \
321 ATOMIC_OP( int, , c_or, 4, asm_or) \
322 ATOMIC_OP(long, 64, c_or, 8, asm_or)
323#endif
324
325ATOMIC_OPS( , .aqrl)
326ATOMIC_OPS(_acquire, .aq)
327ATOMIC_OPS(_release, .rl)
328ATOMIC_OPS(_relaxed, )
329
330#undef ATOMIC_OPS
331#undef ATOMIC_OP
332
333static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
334{
335 int prev, rc;
336
337 __asm__ __volatile__ (
338 "0:\n\t"
339 "lr.w.aqrl %[p], %[c]\n\t"
340 "sub %[rc], %[p], %[o]\n\t"
341 "bltz %[rc], 1f\n\t"
342 "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
343 "bnez %[rc], 0b\n\t"
344 "1:"
345 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
346 : [o]"r" (offset)
347 : "memory");
348 return prev - offset;
349}
350
351#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
352
353#ifndef CONFIG_GENERIC_ATOMIC64
354static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
355{
356 long prev, rc;
357
358 __asm__ __volatile__ (
359 "0:\n\t"
360 "lr.d.aqrl %[p], %[c]\n\t"
361 "sub %[rc], %[p], %[o]\n\t"
362 "bltz %[rc], 1f\n\t"
363 "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
364 "bnez %[rc], 0b\n\t"
365 "1:"
366 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
367 : [o]"r" (offset)
368 : "memory");
369 return prev - offset;
370}
371
372#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
373#endif
374
375#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
new file mode 100644
index 000000000000..183534b7c39b
--- /dev/null
+++ b/arch/riscv/include/asm/barrier.h
@@ -0,0 +1,68 @@
1/*
2 * Based on arch/arm/include/asm/barrier.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Copyright (C) 2013 Regents of the University of California
6 * Copyright (C) 2017 SiFive
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef _ASM_RISCV_BARRIER_H
22#define _ASM_RISCV_BARRIER_H
23
24#ifndef __ASSEMBLY__
25
26#define nop() __asm__ __volatile__ ("nop")
27
28#define RISCV_FENCE(p, s) \
29 __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
30
31/* These barriers need to enforce ordering on both devices or memory. */
32#define mb() RISCV_FENCE(iorw,iorw)
33#define rmb() RISCV_FENCE(ir,ir)
34#define wmb() RISCV_FENCE(ow,ow)
35
36/* These barriers do not need to enforce ordering on devices, just memory. */
37#define smp_mb() RISCV_FENCE(rw,rw)
38#define smp_rmb() RISCV_FENCE(r,r)
39#define smp_wmb() RISCV_FENCE(w,w)
40
41/*
42 * These fences exist to enforce ordering around the relaxed AMOs. The
43 * documentation defines that
44 * "
45 * atomic_fetch_add();
46 * is equivalent to:
47 * smp_mb__before_atomic();
48 * atomic_fetch_add_relaxed();
49 * smp_mb__after_atomic();
50 * "
51 * So we emit full fences on both sides.
52 */
53#define __smb_mb__before_atomic() smp_mb()
54#define __smb_mb__after_atomic() smp_mb()
55
56/*
57 * These barriers prevent accesses performed outside a spinlock from being moved
58 * inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only
59 * enforce release consistency, we need full fences here.
60 */
61#define smb_mb__before_spinlock() smp_mb()
62#define smb_mb__after_spinlock() smp_mb()
63
64#include <asm-generic/barrier.h>
65
66#endif /* __ASSEMBLY__ */
67
68#endif /* _ASM_RISCV_BARRIER_H */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
new file mode 100644
index 000000000000..7c281ef1d583
--- /dev/null
+++ b/arch/riscv/include/asm/bitops.h
@@ -0,0 +1,218 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_BITOPS_H
15#define _ASM_RISCV_BITOPS_H
16
17#ifndef _LINUX_BITOPS_H
18#error "Only <linux/bitops.h> can be included directly"
19#endif /* _LINUX_BITOPS_H */
20
21#include <linux/compiler.h>
22#include <linux/irqflags.h>
23#include <asm/barrier.h>
24#include <asm/bitsperlong.h>
25
26#ifndef smp_mb__before_clear_bit
27#define smp_mb__before_clear_bit() smp_mb()
28#define smp_mb__after_clear_bit() smp_mb()
29#endif /* smp_mb__before_clear_bit */
30
31#include <asm-generic/bitops/__ffs.h>
32#include <asm-generic/bitops/ffz.h>
33#include <asm-generic/bitops/fls.h>
34#include <asm-generic/bitops/__fls.h>
35#include <asm-generic/bitops/fls64.h>
36#include <asm-generic/bitops/find.h>
37#include <asm-generic/bitops/sched.h>
38#include <asm-generic/bitops/ffs.h>
39
40#include <asm-generic/bitops/hweight.h>
41
42#if (BITS_PER_LONG == 64)
43#define __AMO(op) "amo" #op ".d"
44#elif (BITS_PER_LONG == 32)
45#define __AMO(op) "amo" #op ".w"
46#else
47#error "Unexpected BITS_PER_LONG"
48#endif
49
50#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
51({ \
52 unsigned long __res, __mask; \
53 __mask = BIT_MASK(nr); \
54 __asm__ __volatile__ ( \
55 __AMO(op) #ord " %0, %2, %1" \
56 : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
57 : "r" (mod(__mask)) \
58 : "memory"); \
59 ((__res & __mask) != 0); \
60})
61
62#define __op_bit_ord(op, mod, nr, addr, ord) \
63 __asm__ __volatile__ ( \
64 __AMO(op) #ord " zero, %1, %0" \
65 : "+A" (addr[BIT_WORD(nr)]) \
66 : "r" (mod(BIT_MASK(nr))) \
67 : "memory");
68
69#define __test_and_op_bit(op, mod, nr, addr) \
70 __test_and_op_bit_ord(op, mod, nr, addr, )
71#define __op_bit(op, mod, nr, addr) \
72 __op_bit_ord(op, mod, nr, addr, )
73
74/* Bitmask modifiers */
75#define __NOP(x) (x)
76#define __NOT(x) (~(x))
77
78/**
79 * test_and_set_bit - Set a bit and return its old value
80 * @nr: Bit to set
81 * @addr: Address to count from
82 *
83 * This operation may be reordered on other architectures than x86.
84 */
85static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
86{
87 return __test_and_op_bit(or, __NOP, nr, addr);
88}
89
90/**
91 * test_and_clear_bit - Clear a bit and return its old value
92 * @nr: Bit to clear
93 * @addr: Address to count from
94 *
95 * This operation can be reordered on other architectures other than x86.
96 */
97static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
98{
99 return __test_and_op_bit(and, __NOT, nr, addr);
100}
101
102/**
103 * test_and_change_bit - Change a bit and return its old value
104 * @nr: Bit to change
105 * @addr: Address to count from
106 *
107 * This operation is atomic and cannot be reordered.
108 * It also implies a memory barrier.
109 */
110static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
111{
112 return __test_and_op_bit(xor, __NOP, nr, addr);
113}
114
115/**
116 * set_bit - Atomically set a bit in memory
117 * @nr: the bit to set
118 * @addr: the address to start counting from
119 *
120 * Note: there are no guarantees that this function will not be reordered
121 * on non x86 architectures, so if you are writing portable code,
122 * make sure not to rely on its reordering guarantees.
123 *
124 * Note that @nr may be almost arbitrarily large; this function is not
125 * restricted to acting on a single-word quantity.
126 */
127static inline void set_bit(int nr, volatile unsigned long *addr)
128{
129 __op_bit(or, __NOP, nr, addr);
130}
131
132/**
133 * clear_bit - Clears a bit in memory
134 * @nr: Bit to clear
135 * @addr: Address to start counting from
136 *
137 * Note: there are no guarantees that this function will not be reordered
138 * on non x86 architectures, so if you are writing portable code,
139 * make sure not to rely on its reordering guarantees.
140 */
141static inline void clear_bit(int nr, volatile unsigned long *addr)
142{
143 __op_bit(and, __NOT, nr, addr);
144}
145
146/**
147 * change_bit - Toggle a bit in memory
148 * @nr: Bit to change
149 * @addr: Address to start counting from
150 *
151 * change_bit() may be reordered on other architectures than x86.
152 * Note that @nr may be almost arbitrarily large; this function is not
153 * restricted to acting on a single-word quantity.
154 */
155static inline void change_bit(int nr, volatile unsigned long *addr)
156{
157 __op_bit(xor, __NOP, nr, addr);
158}
159
160/**
161 * test_and_set_bit_lock - Set a bit and return its old value, for lock
162 * @nr: Bit to set
163 * @addr: Address to count from
164 *
165 * This operation is atomic and provides acquire barrier semantics.
166 * It can be used to implement bit locks.
167 */
168static inline int test_and_set_bit_lock(
169 unsigned long nr, volatile unsigned long *addr)
170{
171 return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
172}
173
174/**
175 * clear_bit_unlock - Clear a bit in memory, for unlock
176 * @nr: the bit to set
177 * @addr: the address to start counting from
178 *
179 * This operation is atomic and provides release barrier semantics.
180 */
181static inline void clear_bit_unlock(
182 unsigned long nr, volatile unsigned long *addr)
183{
184 __op_bit_ord(and, __NOT, nr, addr, .rl);
185}
186
187/**
188 * __clear_bit_unlock - Clear a bit in memory, for unlock
189 * @nr: the bit to set
190 * @addr: the address to start counting from
191 *
192 * This operation is like clear_bit_unlock, however it is not atomic.
193 * It does provide release barrier semantics so it can be used to unlock
194 * a bit lock, however it would only be used if no other CPU can modify
195 * any bits in the memory until the lock is released (a good example is
196 * if the bit lock itself protects access to the other bits in the word).
197 *
198 * On RISC-V systems there seems to be no benefit to taking advantage of the
199 * non-atomic property here: it's a lot more instructions and we still have to
200 * provide release semantics anyway.
201 */
202static inline void __clear_bit_unlock(
203 unsigned long nr, volatile unsigned long *addr)
204{
205 clear_bit_unlock(nr, addr);
206}
207
208#undef __test_and_op_bit
209#undef __op_bit
210#undef __NOP
211#undef __NOT
212#undef __AMO
213
214#include <asm-generic/bitops/non-atomic.h>
215#include <asm-generic/bitops/le.h>
216#include <asm-generic/bitops/ext2-atomic.h>
217
218#endif /* _ASM_RISCV_BITOPS_H */
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
new file mode 100644
index 000000000000..c3e13764a943
--- /dev/null
+++ b/arch/riscv/include/asm/bug.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_BUG_H
15#define _ASM_RISCV_BUG_H
16
17#include <linux/compiler.h>
18#include <linux/const.h>
19#include <linux/types.h>
20
21#include <asm/asm.h>
22
23#ifdef CONFIG_GENERIC_BUG
24#define __BUG_INSN _AC(0x00100073, UL) /* ebreak */
25
26#ifndef __ASSEMBLY__
27typedef u32 bug_insn_t;
28
29#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
30#define __BUG_ENTRY_ADDR INT " 1b - 2b"
31#define __BUG_ENTRY_FILE INT " %0 - 2b"
32#else
33#define __BUG_ENTRY_ADDR RISCV_PTR " 1b"
34#define __BUG_ENTRY_FILE RISCV_PTR " %0"
35#endif
36
37#ifdef CONFIG_DEBUG_BUGVERBOSE
38#define __BUG_ENTRY \
39 __BUG_ENTRY_ADDR "\n\t" \
40 __BUG_ENTRY_FILE "\n\t" \
41 SHORT " %1"
42#else
43#define __BUG_ENTRY \
44 __BUG_ENTRY_ADDR
45#endif
46
47#define BUG() \
48do { \
49 __asm__ __volatile__ ( \
50 "1:\n\t" \
51 "ebreak\n" \
52 ".pushsection __bug_table,\"a\"\n\t" \
53 "2:\n\t" \
54 __BUG_ENTRY "\n\t" \
55 ".org 2b + %2\n\t" \
56 ".popsection" \
57 : \
58 : "i" (__FILE__), "i" (__LINE__), \
59 "i" (sizeof(struct bug_entry))); \
60 unreachable(); \
61} while (0)
62#endif /* !__ASSEMBLY__ */
63#else /* CONFIG_GENERIC_BUG */
64#ifndef __ASSEMBLY__
65#define BUG() \
66do { \
67 __asm__ __volatile__ ("ebreak\n"); \
68 unreachable(); \
69} while (0)
70#endif /* !__ASSEMBLY__ */
71#endif /* CONFIG_GENERIC_BUG */
72
73#define HAVE_ARCH_BUG
74
75#include <asm-generic/bug.h>
76
77#ifndef __ASSEMBLY__
78
79struct pt_regs;
80struct task_struct;
81
82extern void die(struct pt_regs *regs, const char *str);
83extern void do_trap(struct pt_regs *regs, int signo, int code,
84 unsigned long addr, struct task_struct *tsk);
85
86#endif /* !__ASSEMBLY__ */
87
88#endif /* _ASM_RISCV_BUG_H */
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
new file mode 100644
index 000000000000..e8f0d1110d74
--- /dev/null
+++ b/arch/riscv/include/asm/cache.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com>
3 * Copyright (C) 2012 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ASM_RISCV_CACHE_H
16#define _ASM_RISCV_CACHE_H
17
18#define L1_CACHE_SHIFT 6
19
20#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
21
22#endif /* _ASM_RISCV_CACHE_H */
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
new file mode 100644
index 000000000000..0595585013b0
--- /dev/null
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_CACHEFLUSH_H
15#define _ASM_RISCV_CACHEFLUSH_H
16
17#include <asm-generic/cacheflush.h>
18
19#undef flush_icache_range
20#undef flush_icache_user_range
21
22static inline void local_flush_icache_all(void)
23{
24 asm volatile ("fence.i" ::: "memory");
25}
26
27#ifndef CONFIG_SMP
28
29#define flush_icache_range(start, end) local_flush_icache_all()
30#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
31
32#else /* CONFIG_SMP */
33
34#define flush_icache_range(start, end) sbi_remote_fence_i(0)
35#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
36
37#endif /* CONFIG_SMP */
38
39#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..db249dbc7b97
--- /dev/null
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -0,0 +1,134 @@
1/*
2 * Copyright (C) 2014 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_CMPXCHG_H
15#define _ASM_RISCV_CMPXCHG_H
16
17#include <linux/bug.h>
18
19#include <asm/barrier.h>
20
21#define __xchg(new, ptr, size, asm_or) \
22({ \
23 __typeof__(ptr) __ptr = (ptr); \
24 __typeof__(new) __new = (new); \
25 __typeof__(*(ptr)) __ret; \
26 switch (size) { \
27 case 4: \
28 __asm__ __volatile__ ( \
29 "amoswap.w" #asm_or " %0, %2, %1" \
30 : "=r" (__ret), "+A" (*__ptr) \
31 : "r" (__new) \
32 : "memory"); \
33 break; \
34 case 8: \
35 __asm__ __volatile__ ( \
36 "amoswap.d" #asm_or " %0, %2, %1" \
37 : "=r" (__ret), "+A" (*__ptr) \
38 : "r" (__new) \
39 : "memory"); \
40 break; \
41 default: \
42 BUILD_BUG(); \
43 } \
44 __ret; \
45})
46
47#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)), .aqrl))
48
49#define xchg32(ptr, x) \
50({ \
51 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
52 xchg((ptr), (x)); \
53})
54
55#define xchg64(ptr, x) \
56({ \
57 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
58 xchg((ptr), (x)); \
59})
60
61/*
62 * Atomic compare and exchange. Compare OLD with MEM, if identical,
63 * store NEW in MEM. Return the initial value in MEM. Success is
64 * indicated by comparing RETURN with OLD.
65 */
66#define __cmpxchg(ptr, old, new, size, lrb, scb) \
67({ \
68 __typeof__(ptr) __ptr = (ptr); \
69 __typeof__(*(ptr)) __old = (old); \
70 __typeof__(*(ptr)) __new = (new); \
71 __typeof__(*(ptr)) __ret; \
72 register unsigned int __rc; \
73 switch (size) { \
74 case 4: \
75 __asm__ __volatile__ ( \
76 "0:" \
77 "lr.w" #scb " %0, %2\n" \
78 "bne %0, %z3, 1f\n" \
79 "sc.w" #lrb " %1, %z4, %2\n" \
80 "bnez %1, 0b\n" \
81 "1:" \
82 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
83 : "rJ" (__old), "rJ" (__new) \
84 : "memory"); \
85 break; \
86 case 8: \
87 __asm__ __volatile__ ( \
88 "0:" \
89 "lr.d" #scb " %0, %2\n" \
90 "bne %0, %z3, 1f\n" \
91 "sc.d" #lrb " %1, %z4, %2\n" \
92 "bnez %1, 0b\n" \
93 "1:" \
94 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
95 : "rJ" (__old), "rJ" (__new) \
96 : "memory"); \
97 break; \
98 default: \
99 BUILD_BUG(); \
100 } \
101 __ret; \
102})
103
104#define cmpxchg(ptr, o, n) \
105 (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), .aqrl, .aqrl))
106
107#define cmpxchg_local(ptr, o, n) \
108 (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), , ))
109
110#define cmpxchg32(ptr, o, n) \
111({ \
112 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
113 cmpxchg((ptr), (o), (n)); \
114})
115
116#define cmpxchg32_local(ptr, o, n) \
117({ \
118 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
119 cmpxchg_local((ptr), (o), (n)); \
120})
121
122#define cmpxchg64(ptr, o, n) \
123({ \
124 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
125 cmpxchg((ptr), (o), (n)); \
126})
127
128#define cmpxchg64_local(ptr, o, n) \
129({ \
130 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
131 cmpxchg_local((ptr), (o), (n)); \
132})
133
134#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
new file mode 100644
index 000000000000..044aecff8854
--- /dev/null
+++ b/arch/riscv/include/asm/compat.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_COMPAT_H
17#define __ASM_COMPAT_H
18#ifdef CONFIG_COMPAT
19
20#if defined(CONFIG_64BIT)
21#define COMPAT_UTS_MACHINE "riscv64\0\0"
22#elif defined(CONFIG_32BIT)
23#define COMPAT_UTS_MACHINE "riscv32\0\0"
24#else
25#error "Unknown RISC-V base ISA"
26#endif
27
28#endif /*CONFIG_COMPAT*/
29#endif /*__ASM_COMPAT_H*/
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
new file mode 100644
index 000000000000..0d64bc9f4f91
--- /dev/null
+++ b/arch/riscv/include/asm/csr.h
@@ -0,0 +1,132 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_CSR_H
15#define _ASM_RISCV_CSR_H
16
17#include <linux/const.h>
18
19/* Status register flags */
20#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */
21#define SR_PIE _AC(0x00000020, UL) /* Previous IE */
22#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */
23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
24
25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
26#define SR_FS_OFF _AC(0x00000000, UL)
27#define SR_FS_INITIAL _AC(0x00002000, UL)
28#define SR_FS_CLEAN _AC(0x00004000, UL)
29#define SR_FS_DIRTY _AC(0x00006000, UL)
30
31#define SR_XS _AC(0x00018000, UL) /* Extension Status */
32#define SR_XS_OFF _AC(0x00000000, UL)
33#define SR_XS_INITIAL _AC(0x00008000, UL)
34#define SR_XS_CLEAN _AC(0x00010000, UL)
35#define SR_XS_DIRTY _AC(0x00018000, UL)
36
37#ifndef CONFIG_64BIT
38#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
39#else
40#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
41#endif
42
43/* SPTBR flags */
44#if __riscv_xlen == 32
45#define SPTBR_PPN _AC(0x003FFFFF, UL)
46#define SPTBR_MODE_32 _AC(0x80000000, UL)
47#define SPTBR_MODE SPTBR_MODE_32
48#else
49#define SPTBR_PPN _AC(0x00000FFFFFFFFFFF, UL)
50#define SPTBR_MODE_39 _AC(0x8000000000000000, UL)
51#define SPTBR_MODE SPTBR_MODE_39
52#endif
53
54/* Interrupt Enable and Interrupt Pending flags */
55#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */
56#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */
57
58#define EXC_INST_MISALIGNED 0
59#define EXC_INST_ACCESS 1
60#define EXC_BREAKPOINT 3
61#define EXC_LOAD_ACCESS 5
62#define EXC_STORE_ACCESS 7
63#define EXC_SYSCALL 8
64#define EXC_INST_PAGE_FAULT 12
65#define EXC_LOAD_PAGE_FAULT 13
66#define EXC_STORE_PAGE_FAULT 15
67
68#ifndef __ASSEMBLY__
69
70#define csr_swap(csr, val) \
71({ \
72 unsigned long __v = (unsigned long)(val); \
73 __asm__ __volatile__ ("csrrw %0, " #csr ", %1" \
74 : "=r" (__v) : "rK" (__v) \
75 : "memory"); \
76 __v; \
77})
78
79#define csr_read(csr) \
80({ \
81 register unsigned long __v; \
82 __asm__ __volatile__ ("csrr %0, " #csr \
83 : "=r" (__v) : \
84 : "memory"); \
85 __v; \
86})
87
88#define csr_write(csr, val) \
89({ \
90 unsigned long __v = (unsigned long)(val); \
91 __asm__ __volatile__ ("csrw " #csr ", %0" \
92 : : "rK" (__v) \
93 : "memory"); \
94})
95
96#define csr_read_set(csr, val) \
97({ \
98 unsigned long __v = (unsigned long)(val); \
99 __asm__ __volatile__ ("csrrs %0, " #csr ", %1" \
100 : "=r" (__v) : "rK" (__v) \
101 : "memory"); \
102 __v; \
103})
104
105#define csr_set(csr, val) \
106({ \
107 unsigned long __v = (unsigned long)(val); \
108 __asm__ __volatile__ ("csrs " #csr ", %0" \
109 : : "rK" (__v) \
110 : "memory"); \
111})
112
113#define csr_read_clear(csr, val) \
114({ \
115 unsigned long __v = (unsigned long)(val); \
116 __asm__ __volatile__ ("csrrc %0, " #csr ", %1" \
117 : "=r" (__v) : "rK" (__v) \
118 : "memory"); \
119 __v; \
120})
121
122#define csr_clear(csr, val) \
123({ \
124 unsigned long __v = (unsigned long)(val); \
125 __asm__ __volatile__ ("csrc " #csr ", %0" \
126 : : "rK" (__v) \
127 : "memory"); \
128})
129
130#endif /* __ASSEMBLY__ */
131
132#endif /* _ASM_RISCV_CSR_H */
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
new file mode 100644
index 000000000000..2cf6336ef600
--- /dev/null
+++ b/arch/riscv/include/asm/current.h
@@ -0,0 +1,45 @@
1/*
2 * Based on arm/arm64/include/asm/current.h
3 *
4 * Copyright (C) 2016 ARM
5 * Copyright (C) 2017 SiFive
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17
18#ifndef __ASM_CURRENT_H
19#define __ASM_CURRENT_H
20
21#include <linux/bug.h>
22#include <linux/compiler.h>
23
24#ifndef __ASSEMBLY__
25
26struct task_struct;
27
28/*
29 * This only works because "struct thread_info" is at offset 0 from "struct
30 * task_struct". This constraint seems to be necessary on other architectures
31 * as well, but __switch_to enforces it. We can't check TASK_TI here because
32 * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
33 * task_struct" here due to some header ordering problems.
34 */
35static __always_inline struct task_struct *get_current(void)
36{
37 register struct task_struct *tp __asm__("tp");
38 return tp;
39}
40
41#define current get_current()
42
43#endif /* __ASSEMBLY__ */
44
45#endif /* __ASM_CURRENT_H */
diff --git a/arch/riscv/include/asm/delay.h b/arch/riscv/include/asm/delay.h
new file mode 100644
index 000000000000..cbb0c9eb96cb
--- /dev/null
+++ b/arch/riscv/include/asm/delay.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
3 * Copyright (C) 2016 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ASM_RISCV_DELAY_H
16#define _ASM_RISCV_DELAY_H
17
18extern unsigned long riscv_timebase;
19
20#define udelay udelay
21extern void udelay(unsigned long usecs);
22
23#define ndelay ndelay
24extern void ndelay(unsigned long nsecs);
25
26extern void __delay(unsigned long cycles);
27
28#endif /* _ASM_RISCV_DELAY_H */
diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..3eec1000196d
--- /dev/null
+++ b/arch/riscv/include/asm/dma-mapping.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2003-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2012 ARM Ltd.
5 * Copyright (C) 2016 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_RISCV_DMA_MAPPING_H
20#define __ASM_RISCV_DMA_MAPPING_H
21
22/* Use ops->dma_mapping_error (if it exists) or assume success */
23// #undef DMA_ERROR_CODE
24
25static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
26{
27 return &dma_noop_ops;
28}
29
30static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
31{
32 if (!dev->dma_mask)
33 return false;
34
35 return addr + size - 1 <= *dev->dma_mask;
36}
37
38#endif /* __ASM_RISCV_DMA_MAPPING_H */
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
new file mode 100644
index 000000000000..a1ef503d616e
--- /dev/null
+++ b/arch/riscv/include/asm/elf.h
@@ -0,0 +1,84 @@
1/*
2 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
3 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
4 * Copyright (C) 2012 Regents of the University of California
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef _ASM_RISCV_ELF_H
13#define _ASM_RISCV_ELF_H
14
15#include <uapi/asm/elf.h>
16#include <asm/auxvec.h>
17#include <asm/byteorder.h>
18
19/* TODO: Move definition into include/uapi/linux/elf-em.h */
20#define EM_RISCV 0xF3
21
22/*
23 * These are used to set parameters in the core dumps.
24 */
25#define ELF_ARCH EM_RISCV
26
27#ifdef CONFIG_64BIT
28#define ELF_CLASS ELFCLASS64
29#else
30#define ELF_CLASS ELFCLASS32
31#endif
32
33#if defined(__LITTLE_ENDIAN)
34#define ELF_DATA ELFDATA2LSB
35#elif defined(__BIG_ENDIAN)
36#define ELF_DATA ELFDATA2MSB
37#else
38#error "Unknown endianness"
39#endif
40
41/*
42 * This is used to ensure we don't load something for the wrong architecture.
43 */
44#define elf_check_arch(x) ((x)->e_machine == EM_RISCV)
45
46#define CORE_DUMP_USE_REGSET
47#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
48
49/*
50 * This is the location that an ET_DYN program is loaded if exec'ed. Typical
51 * use of this is to invoke "./ld.so someprog" to test out a new version of
52 * the loader. We need to make sure that it is out of the way of the program
53 * that it will "exec", and that there is sufficient room for the brk.
54 */
55#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
56
57/*
58 * This yields a mask that user programs can use to figure out what
59 * instruction set this CPU supports. This could be done in user space,
60 * but it's not easy, and we've already done it here.
61 */
62#define ELF_HWCAP (elf_hwcap)
63extern unsigned long elf_hwcap;
64
65/*
66 * This yields a string that ld.so will use to load implementation
67 * specific libraries for optimization. This is more specific in
68 * intent than poking at uname or /proc/cpuinfo.
69 */
70#define ELF_PLATFORM (NULL)
71
72#define ARCH_DLINFO \
73do { \
74 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
75 (elf_addr_t)current->mm->context.vdso); \
76} while (0)
77
78
79#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
80struct linux_binprm;
81extern int arch_setup_additional_pages(struct linux_binprm *bprm,
82 int uses_interp);
83
84#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
new file mode 100644
index 000000000000..8a4ed7bbcbea
--- /dev/null
+++ b/arch/riscv/include/asm/hwcap.h
@@ -0,0 +1,37 @@
1/*
2 * Copied from arch/arm64/include/asm/hwcap.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Copyright (C) 2017 SiFive
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_HWCAP_H
20#define __ASM_HWCAP_H
21
22#include <uapi/asm/hwcap.h>
23
24#ifndef __ASSEMBLY__
25/*
26 * This yields a mask that user programs can use to figure out what
27 * instruction set this cpu supports.
28 */
29#define ELF_HWCAP (elf_hwcap)
30
31enum {
32 CAP_HWCAP = 1,
33};
34
35extern unsigned long elf_hwcap;
36#endif
37#endif
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
new file mode 100644
index 000000000000..c1f32cfcc79b
--- /dev/null
+++ b/arch/riscv/include/asm/io.h
@@ -0,0 +1,303 @@
1/*
2 * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
3 * which was based on arch/arm/include/io.h
4 *
5 * Copyright (C) 1996-2000 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2014 Regents of the University of California
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, version 2.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#ifndef _ASM_RISCV_IO_H
20#define _ASM_RISCV_IO_H
21
22#ifdef CONFIG_MMU
23
24extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
25
26/*
27 * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
28 * change the properties of memory regions. This should be fixed by the
29 * upcoming platform spec.
30 */
31#define ioremap_nocache(addr, size) ioremap((addr), (size))
32#define ioremap_wc(addr, size) ioremap((addr), (size))
33#define ioremap_wt(addr, size) ioremap((addr), (size))
34
35extern void iounmap(void __iomem *addr);
36
37#endif /* CONFIG_MMU */
38
39/* Generic IO read/write. These perform native-endian accesses. */
40#define __raw_writeb __raw_writeb
41static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
42{
43 asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
44}
45
46#define __raw_writew __raw_writew
47static inline void __raw_writew(u16 val, volatile void __iomem *addr)
48{
49 asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
50}
51
52#define __raw_writel __raw_writel
53static inline void __raw_writel(u32 val, volatile void __iomem *addr)
54{
55 asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
56}
57
58#ifdef CONFIG_64BIT
59#define __raw_writeq __raw_writeq
60static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
61{
62 asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
63}
64#endif
65
66#define __raw_readb __raw_readb
67static inline u8 __raw_readb(const volatile void __iomem *addr)
68{
69 u8 val;
70
71 asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
72 return val;
73}
74
75#define __raw_readw __raw_readw
76static inline u16 __raw_readw(const volatile void __iomem *addr)
77{
78 u16 val;
79
80 asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
81 return val;
82}
83
84#define __raw_readl __raw_readl
85static inline u32 __raw_readl(const volatile void __iomem *addr)
86{
87 u32 val;
88
89 asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
90 return val;
91}
92
93#ifdef CONFIG_64BIT
94#define __raw_readq __raw_readq
95static inline u64 __raw_readq(const volatile void __iomem *addr)
96{
97 u64 val;
98
99 asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
100 return val;
101}
102#endif
103
104/*
105 * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
106 * the ordering with I/O on spinlocks like PowerPC does. The worry is that
107 * drivers won't get this correct, but I also don't want to introduce a fence
108 * into the lock code that otherwise only uses AMOs (and is essentially defined
109 * by the ISA to be correct). For now I'm leaving this here: "o,w" is
110 * sufficient to ensure that all writes to the device have completed before the
111 * write to the spinlock is allowed to commit. I surmised this from reading
112 * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
113 */
114#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
115
116/*
117 * Unordered I/O memory access primitives. These are even more relaxed than
118 * the relaxed versions, as they don't even order accesses between successive
119 * operations to the I/O regions.
120 */
121#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
122#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
123#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
124
125#define writeb_cpu(v,c) ((void)__raw_writeb((v),(c)))
126#define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
127#define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
128
129#ifdef CONFIG_64BIT
130#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
131#define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
132#endif
133
134/*
135 * Relaxed I/O memory access primitives. These follow the Device memory
136 * ordering rules but do not guarantee any ordering relative to Normal memory
137 * accesses. These are defined to order the indicated access (either a read or
138 * write) with all other I/O memory accesses. Since the platform specification
139 * defines that all I/O regions are strongly ordered on channel 2, no explicit
140 * fences are required to enforce this ordering.
141 */
142/* FIXME: These are now the same as asm-generic */
143#define __io_rbr() do {} while (0)
144#define __io_rar() do {} while (0)
145#define __io_rbw() do {} while (0)
146#define __io_raw() do {} while (0)
147
148#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
149#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
150#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
151
152#define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); })
153#define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); })
154#define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); })
155
156#ifdef CONFIG_64BIT
157#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
158#define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); })
159#endif
160
161/*
162 * I/O memory access primitives. Reads are ordered relative to any
163 * following Normal memory access. Writes are ordered relative to any prior
164 * Normal memory access. The memory barriers here are necessary as RISC-V
165 * doesn't define any ordering between the memory space and the I/O space.
166 */
167#define __io_br() do {} while (0)
168#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory");
169#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
170#define __io_aw() do {} while (0)
171
172#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(); __v; })
173#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(); __v; })
174#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(); __v; })
175
176#define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
177#define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
178#define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
179
180#ifdef CONFIG_64BIT
181#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(); __v; })
182#define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
183#endif
184
185/*
186 * Emulation routines for the port-mapped IO space used by some PCI drivers.
187 * These are defined as being "fully synchronous", but also "not guaranteed to
188 * be fully ordered with respect to other memory and I/O operations". We're
189 * going to be on the safe side here and just make them:
190 * - Fully ordered WRT each other, by bracketing them with two fences. The
191 * outer set contains both I/O so inX is ordered with outX, while the inner just
192 * needs the type of the access (I for inX and O for outX).
193 * - Ordered in the same manner as readX/writeX WRT memory by subsuming their
194 * fences.
195 * - Ordered WRT timer reads, so udelay and friends don't get elided by the
196 * implementation.
197 * Note that there is no way to actually enforce that outX is a non-posted
198 * operation on RISC-V, but hopefully the timer ordering constraint is
199 * sufficient to ensure this works sanely on controllers that support I/O
200 * writes.
201 */
202#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
203#define __io_par() __asm__ __volatile__ ("fence i,ior" : : : "memory");
204#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
205#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
206
207#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
208#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
209#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
210
211#define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
212#define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
213#define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
214
215#ifdef CONFIG_64BIT
216#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(); __v; })
217#define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
218#endif
219
220/*
221 * Accesses from a single hart to a single I/O address must be ordered. This
222 * allows us to use the raw read macros, but we still need to fence before and
223 * after the block to ensure ordering WRT other macros. These are defined to
224 * perform host-endian accesses so we use __raw instead of __cpu.
225 */
226#define __io_reads_ins(port, ctype, len, bfence, afence) \
227 static inline void __ ## port ## len(const volatile void __iomem *addr, \
228 void *buffer, \
229 unsigned int count) \
230 { \
231 bfence; \
232 if (count) { \
233 ctype *buf = buffer; \
234 \
235 do { \
236 ctype x = __raw_read ## len(addr); \
237 *buf++ = x; \
238 } while (--count); \
239 } \
240 afence; \
241 }
242
243#define __io_writes_outs(port, ctype, len, bfence, afence) \
244 static inline void __ ## port ## len(volatile void __iomem *addr, \
245 const void *buffer, \
246 unsigned int count) \
247 { \
248 bfence; \
249 if (count) { \
250 const ctype *buf = buffer; \
251 \
252 do { \
253 __raw_writeq(*buf++, addr); \
254 } while (--count); \
255 } \
256 afence; \
257 }
258
259__io_reads_ins(reads, u8, b, __io_br(), __io_ar())
260__io_reads_ins(reads, u16, w, __io_br(), __io_ar())
261__io_reads_ins(reads, u32, l, __io_br(), __io_ar())
262#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
263#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
264#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
265
266__io_reads_ins(ins, u8, b, __io_pbr(), __io_par())
267__io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
268__io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
269#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count)
270#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count)
271#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count)
272
273__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
274__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
275__io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
276#define writesb(addr, buffer, count) __writesb(addr, buffer, count)
277#define writesw(addr, buffer, count) __writesw(addr, buffer, count)
278#define writesl(addr, buffer, count) __writesl(addr, buffer, count)
279
280__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
281__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
282__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
283#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count)
284#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count)
285#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count)
286
287#ifdef CONFIG_64BIT
288__io_reads_ins(reads, u64, q, __io_br(), __io_ar())
289#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
290
291__io_reads_ins(ins, u64, q, __io_pbr(), __io_par())
292#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
293
294__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
295#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
296
297__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
298#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
299#endif
300
301#include <asm-generic/io.h>
302
303#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
new file mode 100644
index 000000000000..4dee9d4c13c0
--- /dev/null
+++ b/arch/riscv/include/asm/irq.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ASM_RISCV_IRQ_H
16#define _ASM_RISCV_IRQ_H
17
18#define NR_IRQS 0
19
20#define INTERRUPT_CAUSE_SOFTWARE 1
21#define INTERRUPT_CAUSE_TIMER 5
22#define INTERRUPT_CAUSE_EXTERNAL 9
23
24void riscv_timer_interrupt(void);
25
26#include <asm-generic/irq.h>
27
28#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
new file mode 100644
index 000000000000..6fdc860d7f84
--- /dev/null
+++ b/arch/riscv/include/asm/irqflags.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14
15#ifndef _ASM_RISCV_IRQFLAGS_H
16#define _ASM_RISCV_IRQFLAGS_H
17
18#include <asm/processor.h>
19#include <asm/csr.h>
20
21/* read interrupt enabled status */
22static inline unsigned long arch_local_save_flags(void)
23{
24 return csr_read(sstatus);
25}
26
27/* unconditionally enable interrupts */
28static inline void arch_local_irq_enable(void)
29{
30 csr_set(sstatus, SR_IE);
31}
32
33/* unconditionally disable interrupts */
34static inline void arch_local_irq_disable(void)
35{
36 csr_clear(sstatus, SR_IE);
37}
38
39/* get status and disable interrupts */
40static inline unsigned long arch_local_irq_save(void)
41{
42 return csr_read_clear(sstatus, SR_IE);
43}
44
45/* test flags */
46static inline int arch_irqs_disabled_flags(unsigned long flags)
47{
48 return !(flags & SR_IE);
49}
50
51/* test hardware interrupt enable bit */
52static inline int arch_irqs_disabled(void)
53{
54 return arch_irqs_disabled_flags(arch_local_save_flags());
55}
56
57/* set interrupt enabled status */
58static inline void arch_local_irq_restore(unsigned long flags)
59{
60 csr_set(sstatus, flags & SR_IE);
61}
62
63#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
new file mode 100644
index 000000000000..c7eb010d1528
--- /dev/null
+++ b/arch/riscv/include/asm/kprobes.h
@@ -0,0 +1,22 @@
1/*
2 * Copied from arch/arm64/include/asm/kprobes.h
3 *
4 * Copyright (C) 2013 Linaro Limited
5 * Copyright (C) 2017 SiFive
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#ifndef _RISCV_KPROBES_H
18#define _RISCV_KPROBES_H
19
20#include <asm-generic/kprobes.h>
21
22#endif /* _RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/linkage.h b/arch/riscv/include/asm/linkage.h
new file mode 100644
index 000000000000..b7b304ca89c4
--- /dev/null
+++ b/arch/riscv/include/asm/linkage.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_LINKAGE_H
15#define _ASM_RISCV_LINKAGE_H
16
17#define __ALIGN .balign 4
18#define __ALIGN_STR ".balign 4"
19
20#endif /* _ASM_RISCV_LINKAGE_H */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
new file mode 100644
index 000000000000..66805cba9a27
--- /dev/null
+++ b/arch/riscv/include/asm/mmu.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14
15#ifndef _ASM_RISCV_MMU_H
16#define _ASM_RISCV_MMU_H
17
18#ifndef __ASSEMBLY__
19
20typedef struct {
21 void *vdso;
22} mm_context_t;
23
24#endif /* __ASSEMBLY__ */
25
26#endif /* _ASM_RISCV_MMU_H */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
new file mode 100644
index 000000000000..de1fc1631fc4
--- /dev/null
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_MMU_CONTEXT_H
15#define _ASM_RISCV_MMU_CONTEXT_H
16
17#include <asm-generic/mm_hooks.h>
18
19#include <linux/mm.h>
20#include <linux/sched.h>
21#include <asm/tlbflush.h>
22
23static inline void enter_lazy_tlb(struct mm_struct *mm,
24 struct task_struct *task)
25{
26}
27
28/* Initialize context-related info for a new mm_struct */
29static inline int init_new_context(struct task_struct *task,
30 struct mm_struct *mm)
31{
32 return 0;
33}
34
35static inline void destroy_context(struct mm_struct *mm)
36{
37}
38
39static inline pgd_t *current_pgdir(void)
40{
41 return pfn_to_virt(csr_read(sptbr) & SPTBR_PPN);
42}
43
44static inline void set_pgdir(pgd_t *pgd)
45{
46 csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
47}
48
49static inline void switch_mm(struct mm_struct *prev,
50 struct mm_struct *next, struct task_struct *task)
51{
52 if (likely(prev != next)) {
53 set_pgdir(next->pgd);
54 local_flush_tlb_all();
55 }
56}
57
58static inline void activate_mm(struct mm_struct *prev,
59 struct mm_struct *next)
60{
61 switch_mm(prev, next, NULL);
62}
63
64static inline void deactivate_mm(struct task_struct *task,
65 struct mm_struct *mm)
66{
67}
68
69#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
new file mode 100644
index 000000000000..06cfbb3aacbb
--- /dev/null
+++ b/arch/riscv/include/asm/page.h
@@ -0,0 +1,130 @@
1/*
2 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, version 2.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef _ASM_RISCV_PAGE_H
18#define _ASM_RISCV_PAGE_H
19
20#include <linux/pfn.h>
21#include <linux/const.h>
22
23#define PAGE_SHIFT (12)
24#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
25#define PAGE_MASK (~(PAGE_SIZE - 1))
26
27/*
28 * PAGE_OFFSET -- the first address of the first page of memory.
29 * When not using MMU this corresponds to the first free page in
30 * physical memory (aligned on a page boundary).
31 */
32#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
33
34#define KERN_VIRT_SIZE (-PAGE_OFFSET)
35
36#ifndef __ASSEMBLY__
37
38#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
39#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
40
41/* align addr on a size boundary - adjust address up/down if needed */
42#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
43#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
44
45/* align addr on a size boundary - adjust address up if needed */
46#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
47
48#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
49#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
50
51#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
52#define copy_user_page(vto, vfrom, vaddr, topg) \
53 memcpy((vto), (vfrom), PAGE_SIZE)
54
55/*
56 * Use struct definitions to apply C type checking
57 */
58
59/* Page Global Directory entry */
60typedef struct {
61 unsigned long pgd;
62} pgd_t;
63
64/* Page Table entry */
65typedef struct {
66 unsigned long pte;
67} pte_t;
68
69typedef struct {
70 unsigned long pgprot;
71} pgprot_t;
72
73typedef struct page *pgtable_t;
74
75#define pte_val(x) ((x).pte)
76#define pgd_val(x) ((x).pgd)
77#define pgprot_val(x) ((x).pgprot)
78
79#define __pte(x) ((pte_t) { (x) })
80#define __pgd(x) ((pgd_t) { (x) })
81#define __pgprot(x) ((pgprot_t) { (x) })
82
83#ifdef CONFIG_64BITS
84#define PTE_FMT "%016lx"
85#else
86#define PTE_FMT "%08lx"
87#endif
88
89extern unsigned long va_pa_offset;
90extern unsigned long pfn_base;
91
92extern unsigned long max_low_pfn;
93extern unsigned long min_low_pfn;
94
95#define __pa(x) ((unsigned long)(x) - va_pa_offset)
96#define __va(x) ((void *)((unsigned long) (x) + va_pa_offset))
97
98#define phys_to_pfn(phys) (PFN_DOWN(phys))
99#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
100
101#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
102#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
103
104#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
105#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
106
107#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
108#define page_to_bus(page) (page_to_phys(page))
109#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
110
111#define pfn_valid(pfn) \
112 (((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
113
114#define ARCH_PFN_OFFSET (pfn_base)
115
116#endif /* __ASSEMBLY__ */
117
118#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
119
120#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
121 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
122
123#include <asm-generic/memory_model.h>
124#include <asm-generic/getorder.h>
125
126/* vDSO support */
127/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
128#define __HAVE_ARCH_GATE_AREA
129
130#endif /* _ASM_RISCV_PAGE_H */
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
new file mode 100644
index 000000000000..0f2fc9ef20fc
--- /dev/null
+++ b/arch/riscv/include/asm/pci.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2016 SiFive
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __ASM_RISCV_PCI_H
15#define __ASM_RISCV_PCI_H
16
17#include <linux/types.h>
18#include <linux/slab.h>
19#include <linux/dma-mapping.h>
20
21#include <asm/io.h>
22
23#define PCIBIOS_MIN_IO 0
24#define PCIBIOS_MIN_MEM 0
25
26/* RISC-V shim does not initialize PCI bus */
27#define pcibios_assign_all_busses() 1
28
29/* We do not have an IOMMU */
30#define PCI_DMA_BUS_IS_PHYS 1
31
32extern int isa_dma_bridge_buggy;
33
34#ifdef CONFIG_PCI
35static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
36{
37 /* no legacy IRQ on risc-v */
38 return -ENODEV;
39}
40
41static inline int pci_proc_domain(struct pci_bus *bus)
42{
43 /* always show the domain in /proc */
44 return 1;
45}
46#endif /* CONFIG_PCI */
47
48#endif /* __ASM_PCI_H */
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
new file mode 100644
index 000000000000..a79ed5faff3a
--- /dev/null
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
3 * Copyright (C) 2012 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ASM_RISCV_PGALLOC_H
16#define _ASM_RISCV_PGALLOC_H
17
18#include <linux/mm.h>
19#include <asm/tlb.h>
20
21static inline void pmd_populate_kernel(struct mm_struct *mm,
22 pmd_t *pmd, pte_t *pte)
23{
24 unsigned long pfn = virt_to_pfn(pte);
25
26 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
27}
28
29static inline void pmd_populate(struct mm_struct *mm,
30 pmd_t *pmd, pgtable_t pte)
31{
32 unsigned long pfn = virt_to_pfn(page_address(pte));
33
34 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
35}
36
37#ifndef __PAGETABLE_PMD_FOLDED
38static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
39{
40 unsigned long pfn = virt_to_pfn(pmd);
41
42 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
43}
44#endif /* __PAGETABLE_PMD_FOLDED */
45
46#define pmd_pgtable(pmd) pmd_page(pmd)
47
48static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49{
50 pgd_t *pgd;
51
52 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
53 if (likely(pgd != NULL)) {
54 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
55 /* Copy kernel mappings */
56 memcpy(pgd + USER_PTRS_PER_PGD,
57 init_mm.pgd + USER_PTRS_PER_PGD,
58 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
59 }
60 return pgd;
61}
62
63static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
64{
65 free_page((unsigned long)pgd);
66}
67
68#ifndef __PAGETABLE_PMD_FOLDED
69
70static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
71{
72 return (pmd_t *)__get_free_page(
73 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
74}
75
76static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
77{
78 free_page((unsigned long)pmd);
79}
80
81#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
82
83#endif /* __PAGETABLE_PMD_FOLDED */
84
85static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
86 unsigned long address)
87{
88 return (pte_t *)__get_free_page(
89 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
90}
91
92static inline struct page *pte_alloc_one(struct mm_struct *mm,
93 unsigned long address)
94{
95 struct page *pte;
96
97 pte = alloc_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
98 if (likely(pte != NULL))
99 pgtable_page_ctor(pte);
100 return pte;
101}
102
103static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
104{
105 free_page((unsigned long)pte);
106}
107
108static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
109{
110 pgtable_page_dtor(pte);
111 __free_page(pte);
112}
113
114#define __pte_free_tlb(tlb, pte, buf) \
115do { \
116 pgtable_page_dtor(pte); \
117 tlb_remove_page((tlb), pte); \
118} while (0)
119
120static inline void check_pgt_cache(void)
121{
122}
123
124#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
new file mode 100644
index 000000000000..d61974b74182
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_PGTABLE_32_H
15#define _ASM_RISCV_PGTABLE_32_H
16
17#include <asm-generic/pgtable-nopmd.h>
18#include <linux/const.h>
19
20/* Size of region mapped by a page global directory */
21#define PGDIR_SHIFT 22
22#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
23#define PGDIR_MASK (~(PGDIR_SIZE - 1))
24
25#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
new file mode 100644
index 000000000000..7aa0ea9bd8bb
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -0,0 +1,84 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_PGTABLE_64_H
15#define _ASM_RISCV_PGTABLE_64_H
16
17#include <linux/const.h>
18
19#define PGDIR_SHIFT 30
20/* Size of region mapped by a page global directory */
21#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
22#define PGDIR_MASK (~(PGDIR_SIZE - 1))
23
24#define PMD_SHIFT 21
25/* Size of region mapped by a page middle directory */
26#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
27#define PMD_MASK (~(PMD_SIZE - 1))
28
29/* Page Middle Directory entry */
30typedef struct {
31 unsigned long pmd;
32} pmd_t;
33
34#define pmd_val(x) ((x).pmd)
35#define __pmd(x) ((pmd_t) { (x) })
36
37#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
38
39static inline int pud_present(pud_t pud)
40{
41 return (pud_val(pud) & _PAGE_PRESENT);
42}
43
44static inline int pud_none(pud_t pud)
45{
46 return (pud_val(pud) == 0);
47}
48
49static inline int pud_bad(pud_t pud)
50{
51 return !pud_present(pud);
52}
53
54static inline void set_pud(pud_t *pudp, pud_t pud)
55{
56 *pudp = pud;
57}
58
59static inline void pud_clear(pud_t *pudp)
60{
61 set_pud(pudp, __pud(0));
62}
63
64static inline unsigned long pud_page_vaddr(pud_t pud)
65{
66 return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
67}
68
69#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
70
71static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
72{
73 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
74}
75
76static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
77{
78 return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
79}
80
81#define pmd_ERROR(e) \
82 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
83
84#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..997ddbb1d370
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_PGTABLE_BITS_H
15#define _ASM_RISCV_PGTABLE_BITS_H
16
17/*
18 * PTE format:
19 * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
20 * PFN reserved for SW D A G U X W R V
21 */
22
23#define _PAGE_ACCESSED_OFFSET 6
24
25#define _PAGE_PRESENT (1 << 0)
26#define _PAGE_READ (1 << 1) /* Readable */
27#define _PAGE_WRITE (1 << 2) /* Writable */
28#define _PAGE_EXEC (1 << 3) /* Executable */
29#define _PAGE_USER (1 << 4) /* User */
30#define _PAGE_GLOBAL (1 << 5) /* Global */
31#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
32#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
33#define _PAGE_SOFT (1 << 8) /* Reserved for software */
34
35#define _PAGE_SPECIAL _PAGE_SOFT
36#define _PAGE_TABLE _PAGE_PRESENT
37
38#define _PAGE_PFN_SHIFT 10
39
40/* Set of bits to preserve across pte_modify() */
41#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
42 _PAGE_WRITE | _PAGE_EXEC | \
43 _PAGE_USER | _PAGE_GLOBAL))
44
45/* Advertise support for _PAGE_SPECIAL */
46#define __HAVE_ARCH_PTE_SPECIAL
47
48#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
new file mode 100644
index 000000000000..3399257780b2
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable.h
@@ -0,0 +1,430 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_PGTABLE_H
15#define _ASM_RISCV_PGTABLE_H
16
17#include <linux/mmzone.h>
18
19#include <asm/pgtable-bits.h>
20
21#ifndef __ASSEMBLY__
22
23#ifdef CONFIG_MMU
24
25/* Page Upper Directory not used in RISC-V */
26#include <asm-generic/pgtable-nopud.h>
27#include <asm/page.h>
28#include <asm/tlbflush.h>
29#include <linux/mm_types.h>
30
31#ifdef CONFIG_64BIT
32#include <asm/pgtable-64.h>
33#else
34#include <asm/pgtable-32.h>
35#endif /* CONFIG_64BIT */
36
37/* Number of entries in the page global directory */
38#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
39/* Number of entries in the page table */
40#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
41
42/* Number of PGD entries that a user-mode program can use */
43#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
44#define FIRST_USER_ADDRESS 0
45
46/* Page protection bits */
47#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
48
49#define PAGE_NONE __pgprot(0)
50#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
51#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
52#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
53#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
54#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
55 _PAGE_EXEC | _PAGE_WRITE)
56
57#define PAGE_COPY PAGE_READ
58#define PAGE_COPY_EXEC PAGE_EXEC
59#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
60#define PAGE_SHARED PAGE_WRITE
61#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
62
63#define _PAGE_KERNEL (_PAGE_READ \
64 | _PAGE_WRITE \
65 | _PAGE_PRESENT \
66 | _PAGE_ACCESSED \
67 | _PAGE_DIRTY)
68
69#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
70#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
71
72extern pgd_t swapper_pg_dir[];
73
74/* MAP_PRIVATE permissions: xwr (copy-on-write) */
75#define __P000 PAGE_NONE
76#define __P001 PAGE_READ
77#define __P010 PAGE_COPY
78#define __P011 PAGE_COPY
79#define __P100 PAGE_EXEC
80#define __P101 PAGE_READ_EXEC
81#define __P110 PAGE_COPY_EXEC
82#define __P111 PAGE_COPY_READ_EXEC
83
84/* MAP_SHARED permissions: xwr */
85#define __S000 PAGE_NONE
86#define __S001 PAGE_READ
87#define __S010 PAGE_SHARED
88#define __S011 PAGE_SHARED
89#define __S100 PAGE_EXEC
90#define __S101 PAGE_READ_EXEC
91#define __S110 PAGE_SHARED_EXEC
92#define __S111 PAGE_SHARED_EXEC
93
94/*
95 * ZERO_PAGE is a global shared page that is always zero,
96 * used for zero-mapped memory areas, etc.
97 */
98extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
99#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
100
101static inline int pmd_present(pmd_t pmd)
102{
103 return (pmd_val(pmd) & _PAGE_PRESENT);
104}
105
106static inline int pmd_none(pmd_t pmd)
107{
108 return (pmd_val(pmd) == 0);
109}
110
111static inline int pmd_bad(pmd_t pmd)
112{
113 return !pmd_present(pmd);
114}
115
116static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
117{
118 *pmdp = pmd;
119}
120
121static inline void pmd_clear(pmd_t *pmdp)
122{
123 set_pmd(pmdp, __pmd(0));
124}
125
126
127static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
128{
129 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
130}
131
132#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
133
134/* Locate an entry in the page global directory */
135static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
136{
137 return mm->pgd + pgd_index(addr);
138}
139/* Locate an entry in the kernel page global directory */
140#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
141
142static inline struct page *pmd_page(pmd_t pmd)
143{
144 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
145}
146
147static inline unsigned long pmd_page_vaddr(pmd_t pmd)
148{
149 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
150}
151
152/* Yields the page frame number (PFN) of a page table entry */
153static inline unsigned long pte_pfn(pte_t pte)
154{
155 return (pte_val(pte) >> _PAGE_PFN_SHIFT);
156}
157
158#define pte_page(x) pfn_to_page(pte_pfn(x))
159
160/* Constructs a page table entry */
161static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
162{
163 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
164}
165
166static inline pte_t mk_pte(struct page *page, pgprot_t prot)
167{
168 return pfn_pte(page_to_pfn(page), prot);
169}
170
171#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
172
173static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
174{
175 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
176}
177
178#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
179#define pte_unmap(pte) ((void)(pte))
180
181/*
182 * Certain architectures need to do special things when PTEs within
183 * a page table are directly modified. Thus, the following hook is
184 * made available.
185 */
186static inline void set_pte(pte_t *ptep, pte_t pteval)
187{
188 *ptep = pteval;
189}
190
191static inline void set_pte_at(struct mm_struct *mm,
192 unsigned long addr, pte_t *ptep, pte_t pteval)
193{
194 set_pte(ptep, pteval);
195}
196
197static inline void pte_clear(struct mm_struct *mm,
198 unsigned long addr, pte_t *ptep)
199{
200 set_pte_at(mm, addr, ptep, __pte(0));
201}
202
203static inline int pte_present(pte_t pte)
204{
205 return (pte_val(pte) & _PAGE_PRESENT);
206}
207
208static inline int pte_none(pte_t pte)
209{
210 return (pte_val(pte) == 0);
211}
212
213/* static inline int pte_read(pte_t pte) */
214
215static inline int pte_write(pte_t pte)
216{
217 return pte_val(pte) & _PAGE_WRITE;
218}
219
220static inline int pte_huge(pte_t pte)
221{
222 return pte_present(pte)
223 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
224}
225
226/* static inline int pte_exec(pte_t pte) */
227
228static inline int pte_dirty(pte_t pte)
229{
230 return pte_val(pte) & _PAGE_DIRTY;
231}
232
233static inline int pte_young(pte_t pte)
234{
235 return pte_val(pte) & _PAGE_ACCESSED;
236}
237
238static inline int pte_special(pte_t pte)
239{
240 return pte_val(pte) & _PAGE_SPECIAL;
241}
242
243/* static inline pte_t pte_rdprotect(pte_t pte) */
244
245static inline pte_t pte_wrprotect(pte_t pte)
246{
247 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
248}
249
250/* static inline pte_t pte_mkread(pte_t pte) */
251
252static inline pte_t pte_mkwrite(pte_t pte)
253{
254 return __pte(pte_val(pte) | _PAGE_WRITE);
255}
256
257/* static inline pte_t pte_mkexec(pte_t pte) */
258
259static inline pte_t pte_mkdirty(pte_t pte)
260{
261 return __pte(pte_val(pte) | _PAGE_DIRTY);
262}
263
264static inline pte_t pte_mkclean(pte_t pte)
265{
266 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
267}
268
269static inline pte_t pte_mkyoung(pte_t pte)
270{
271 return __pte(pte_val(pte) | _PAGE_ACCESSED);
272}
273
274static inline pte_t pte_mkold(pte_t pte)
275{
276 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
277}
278
279static inline pte_t pte_mkspecial(pte_t pte)
280{
281 return __pte(pte_val(pte) | _PAGE_SPECIAL);
282}
283
284/* Modify page protection bits */
285static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
286{
287 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
288}
289
290#define pgd_ERROR(e) \
291 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
292
293
294/* Commit new configuration to MMU hardware */
295static inline void update_mmu_cache(struct vm_area_struct *vma,
296 unsigned long address, pte_t *ptep)
297{
298 /*
299 * The kernel assumes that TLBs don't cache invalid entries, but
300 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
301 * cache flush; it is necessary even after writing invalid entries.
302 * Relying on flush_tlb_fix_spurious_fault would suffice, but
303 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
304 */
305 local_flush_tlb_page(address);
306}
307
308#define __HAVE_ARCH_PTE_SAME
309static inline int pte_same(pte_t pte_a, pte_t pte_b)
310{
311 return pte_val(pte_a) == pte_val(pte_b);
312}
313
314#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
315static inline int ptep_set_access_flags(struct vm_area_struct *vma,
316 unsigned long address, pte_t *ptep,
317 pte_t entry, int dirty)
318{
319 if (!pte_same(*ptep, entry))
320 set_pte_at(vma->vm_mm, address, ptep, entry);
321 /*
322 * update_mmu_cache will unconditionally execute, handling both
323 * the case that the PTE changed and the spurious fault case.
324 */
325 return true;
326}
327
328#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
329static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
330 unsigned long address, pte_t *ptep)
331{
332 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
333}
334
335#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
336static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
337 unsigned long address,
338 pte_t *ptep)
339{
340 if (!pte_young(*ptep))
341 return 0;
342 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
343}
344
345#define __HAVE_ARCH_PTEP_SET_WRPROTECT
346static inline void ptep_set_wrprotect(struct mm_struct *mm,
347 unsigned long address, pte_t *ptep)
348{
349 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
350}
351
352#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
353static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
354 unsigned long address, pte_t *ptep)
355{
356 /*
357 * This comment is borrowed from x86, but applies equally to RISC-V:
358 *
359 * Clearing the accessed bit without a TLB flush
360 * doesn't cause data corruption. [ It could cause incorrect
361 * page aging and the (mistaken) reclaim of hot pages, but the
362 * chance of that should be relatively low. ]
363 *
364 * So as a performance optimization don't flush the TLB when
365 * clearing the accessed bit, it will eventually be flushed by
366 * a context switch or a VM operation anyway. [ In the rare
367 * event of it not getting flushed for a long time the delay
368 * shouldn't really matter because there's no real memory
369 * pressure for swapout to react to. ]
370 */
371 return ptep_test_and_clear_young(vma, address, ptep);
372}
373
374/*
375 * Encode and decode a swap entry
376 *
377 * Format of swap PTE:
378 * bit 0: _PAGE_PRESENT (zero)
379 * bit 1: reserved for future use (zero)
380 * bits 2 to 6: swap type
381 * bits 7 to XLEN-1: swap offset
382 */
383#define __SWP_TYPE_SHIFT 2
384#define __SWP_TYPE_BITS 5
385#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
386#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
387
388#define MAX_SWAPFILES_CHECK() \
389 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
390
391#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
392#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
393#define __swp_entry(type, offset) ((swp_entry_t) \
394 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
395
396#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
397#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
398
399#ifdef CONFIG_FLATMEM
400#define kern_addr_valid(addr) (1) /* FIXME */
401#endif
402
403extern void paging_init(void);
404
405static inline void pgtable_cache_init(void)
406{
407 /* No page table caches to initialize */
408}
409
410#endif /* CONFIG_MMU */
411
412#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
413#define VMALLOC_END (PAGE_OFFSET - 1)
414#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
415
416/*
417 * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
418 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
419 */
420#ifdef CONFIG_64BIT
421#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
422#else
423#define TASK_SIZE VMALLOC_START
424#endif
425
426#include <asm-generic/pgtable.h>
427
428#endif /* !__ASSEMBLY__ */
429
430#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
new file mode 100644
index 000000000000..3fe4af8147d2
--- /dev/null
+++ b/arch/riscv/include/asm/processor.h
@@ -0,0 +1,97 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_PROCESSOR_H
15#define _ASM_RISCV_PROCESSOR_H
16
17#include <linux/const.h>
18
19#include <asm/ptrace.h>
20
21/*
22 * This decides where the kernel will search for a free chunk of vm
23 * space during mmap's.
24 */
25#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
26
27#define STACK_TOP TASK_SIZE
28#define STACK_TOP_MAX STACK_TOP
29#define STACK_ALIGN 16
30
31#ifndef __ASSEMBLY__
32
33struct task_struct;
34struct pt_regs;
35
36/*
37 * Default implementation of macro that returns current
38 * instruction pointer ("program counter").
39 */
40#define current_text_addr() ({ __label__ _l; _l: &&_l; })
41
42/* CPU-specific state of a task */
43struct thread_struct {
44 /* Callee-saved registers */
45 unsigned long ra;
46 unsigned long sp; /* Kernel mode stack */
47 unsigned long s[12]; /* s[0]: frame pointer */
48 struct __riscv_d_ext_state fstate;
49};
50
51#define INIT_THREAD { \
52 .sp = sizeof(init_stack) + (long)&init_stack, \
53}
54
55#define task_pt_regs(tsk) \
56 ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
57 - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
58
59#define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc)
60#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
61
62
63/* Do necessary setup to start up a newly executed thread. */
64extern void start_thread(struct pt_regs *regs,
65 unsigned long pc, unsigned long sp);
66
67/* Free all resources held by a thread. */
68static inline void release_thread(struct task_struct *dead_task)
69{
70}
71
72extern unsigned long get_wchan(struct task_struct *p);
73
74
75static inline void cpu_relax(void)
76{
77#ifdef __riscv_muldiv
78 int dummy;
79 /* In lieu of a halt instruction, induce a long-latency stall. */
80 __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
81#endif
82 barrier();
83}
84
85static inline void wait_for_interrupt(void)
86{
87 __asm__ __volatile__ ("wfi");
88}
89
90struct device_node;
91extern int riscv_of_processor_hart(struct device_node *node);
92
93extern void riscv_fill_hwcap(void);
94
95#endif /* __ASSEMBLY__ */
96
97#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
new file mode 100644
index 000000000000..93b8956e25e4
--- /dev/null
+++ b/arch/riscv/include/asm/ptrace.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_PTRACE_H
15#define _ASM_RISCV_PTRACE_H
16
17#include <uapi/asm/ptrace.h>
18#include <asm/csr.h>
19
20#ifndef __ASSEMBLY__
21
22struct pt_regs {
23 unsigned long sepc;
24 unsigned long ra;
25 unsigned long sp;
26 unsigned long gp;
27 unsigned long tp;
28 unsigned long t0;
29 unsigned long t1;
30 unsigned long t2;
31 unsigned long s0;
32 unsigned long s1;
33 unsigned long a0;
34 unsigned long a1;
35 unsigned long a2;
36 unsigned long a3;
37 unsigned long a4;
38 unsigned long a5;
39 unsigned long a6;
40 unsigned long a7;
41 unsigned long s2;
42 unsigned long s3;
43 unsigned long s4;
44 unsigned long s5;
45 unsigned long s6;
46 unsigned long s7;
47 unsigned long s8;
48 unsigned long s9;
49 unsigned long s10;
50 unsigned long s11;
51 unsigned long t3;
52 unsigned long t4;
53 unsigned long t5;
54 unsigned long t6;
55 /* Supervisor CSRs */
56 unsigned long sstatus;
57 unsigned long sbadaddr;
58 unsigned long scause;
59 /* a0 value before the syscall */
60 unsigned long orig_a0;
61};
62
63#ifdef CONFIG_64BIT
64#define REG_FMT "%016lx"
65#else
66#define REG_FMT "%08lx"
67#endif
68
69#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
70
71
72/* Helpers for working with the instruction pointer */
73#define GET_IP(regs) ((regs)->sepc)
74#define SET_IP(regs, val) (GET_IP(regs) = (val))
75
76static inline unsigned long instruction_pointer(struct pt_regs *regs)
77{
78 return GET_IP(regs);
79}
80static inline void instruction_pointer_set(struct pt_regs *regs,
81 unsigned long val)
82{
83 SET_IP(regs, val);
84}
85
86#define profile_pc(regs) instruction_pointer(regs)
87
88/* Helpers for working with the user stack pointer */
89#define GET_USP(regs) ((regs)->sp)
90#define SET_USP(regs, val) (GET_USP(regs) = (val))
91
92static inline unsigned long user_stack_pointer(struct pt_regs *regs)
93{
94 return GET_USP(regs);
95}
96static inline void user_stack_pointer_set(struct pt_regs *regs,
97 unsigned long val)
98{
99 SET_USP(regs, val);
100}
101
102/* Helpers for working with the frame pointer */
103#define GET_FP(regs) ((regs)->s0)
104#define SET_FP(regs, val) (GET_FP(regs) = (val))
105
106static inline unsigned long frame_pointer(struct pt_regs *regs)
107{
108 return GET_FP(regs);
109}
110static inline void frame_pointer_set(struct pt_regs *regs,
111 unsigned long val)
112{
113 SET_FP(regs, val);
114}
115
116#endif /* __ASSEMBLY__ */
117
118#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
new file mode 100644
index 000000000000..b6bb10b92fe2
--- /dev/null
+++ b/arch/riscv/include/asm/sbi.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_SBI_H
15#define _ASM_RISCV_SBI_H
16
17#include <linux/types.h>
18
19#define SBI_SET_TIMER 0
20#define SBI_CONSOLE_PUTCHAR 1
21#define SBI_CONSOLE_GETCHAR 2
22#define SBI_CLEAR_IPI 3
23#define SBI_SEND_IPI 4
24#define SBI_REMOTE_FENCE_I 5
25#define SBI_REMOTE_SFENCE_VMA 6
26#define SBI_REMOTE_SFENCE_VMA_ASID 7
27#define SBI_SHUTDOWN 8
28
29#define SBI_CALL(which, arg0, arg1, arg2) ({ \
30 register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
31 register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); \
32 register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); \
33 register uintptr_t a7 asm ("a7") = (uintptr_t)(which); \
34 asm volatile ("ecall" \
35 : "+r" (a0) \
36 : "r" (a1), "r" (a2), "r" (a7) \
37 : "memory"); \
38 a0; \
39})
40
41/* Lazy implementations until SBI is finalized */
42#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0)
43#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0)
44#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0)
45
46static inline void sbi_console_putchar(int ch)
47{
48 SBI_CALL_1(SBI_CONSOLE_PUTCHAR, ch);
49}
50
51static inline int sbi_console_getchar(void)
52{
53 return SBI_CALL_0(SBI_CONSOLE_GETCHAR);
54}
55
56static inline void sbi_set_timer(uint64_t stime_value)
57{
58#if __riscv_xlen == 32
59 SBI_CALL_2(SBI_SET_TIMER, stime_value, stime_value >> 32);
60#else
61 SBI_CALL_1(SBI_SET_TIMER, stime_value);
62#endif
63}
64
65static inline void sbi_shutdown(void)
66{
67 SBI_CALL_0(SBI_SHUTDOWN);
68}
69
70static inline void sbi_clear_ipi(void)
71{
72 SBI_CALL_0(SBI_CLEAR_IPI);
73}
74
75static inline void sbi_send_ipi(const unsigned long *hart_mask)
76{
77 SBI_CALL_1(SBI_SEND_IPI, hart_mask);
78}
79
80static inline void sbi_remote_fence_i(const unsigned long *hart_mask)
81{
82 SBI_CALL_1(SBI_REMOTE_FENCE_I, hart_mask);
83}
84
85static inline void sbi_remote_sfence_vma(const unsigned long *hart_mask,
86 unsigned long start,
87 unsigned long size)
88{
89 SBI_CALL_1(SBI_REMOTE_SFENCE_VMA, hart_mask);
90}
91
92static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
93 unsigned long start,
94 unsigned long size,
95 unsigned long asid)
96{
97 SBI_CALL_1(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask);
98}
99
100#endif
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
new file mode 100644
index 000000000000..85e4220839b0
--- /dev/null
+++ b/arch/riscv/include/asm/smp.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_SMP_H
15#define _ASM_RISCV_SMP_H
16
17/* This both needs asm-offsets.h and is used when generating it. */
18#ifndef GENERATING_ASM_OFFSETS
19#include <asm/asm-offsets.h>
20#endif
21
22#include <linux/cpumask.h>
23#include <linux/irqreturn.h>
24
25#ifdef CONFIG_SMP
26
27/* SMP initialization hook for setup_arch */
28void __init init_clockevent(void);
29
30/* SMP initialization hook for setup_arch */
31void __init setup_smp(void);
32
33/* Hook for the generic smp_call_function_many() routine. */
34void arch_send_call_function_ipi_mask(struct cpumask *mask);
35
36/* Hook for the generic smp_call_function_single() routine. */
37void arch_send_call_function_single_ipi(int cpu);
38
39/*
40 * This is particularly ugly: it appears we can't actually get the definition
41 * of task_struct here, but we need access to the CPU this task is running on.
42 * Instead of using C we're using asm-offsets.h to get the current processor
43 * ID.
44 */
45#define raw_smp_processor_id() (*((int*)((char*)get_current() + TASK_TI_CPU)))
46
47/* Interprocessor interrupt handler */
48irqreturn_t handle_ipi(void);
49
50#endif /* CONFIG_SMP */
51
52#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
new file mode 100644
index 000000000000..04c71d938afd
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ASM_RISCV_SPINLOCK_H
16#define _ASM_RISCV_SPINLOCK_H
17
18#include <linux/kernel.h>
19#include <asm/current.h>
20
21/*
22 * Simple spin lock operations. These provide no fairness guarantees.
23 */
24
25/* FIXME: Replace this with a ticket lock, like MIPS. */
26
27#define arch_spin_is_locked(x) ((x)->lock != 0)
28
29static inline void arch_spin_unlock(arch_spinlock_t *lock)
30{
31 __asm__ __volatile__ (
32 "amoswap.w.rl x0, x0, %0"
33 : "=A" (lock->lock)
34 :: "memory");
35}
36
37static inline int arch_spin_trylock(arch_spinlock_t *lock)
38{
39 int tmp = 1, busy;
40
41 __asm__ __volatile__ (
42 "amoswap.w.aq %0, %2, %1"
43 : "=r" (busy), "+A" (lock->lock)
44 : "r" (tmp)
45 : "memory");
46
47 return !busy;
48}
49
50static inline void arch_spin_lock(arch_spinlock_t *lock)
51{
52 while (1) {
53 if (arch_spin_is_locked(lock))
54 continue;
55
56 if (arch_spin_trylock(lock))
57 break;
58 }
59}
60
61static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
62{
63 smp_rmb();
64 do {
65 cpu_relax();
66 } while (arch_spin_is_locked(lock));
67 smp_acquire__after_ctrl_dep();
68}
69
70/***********************************************************/
71
72static inline void arch_read_lock(arch_rwlock_t *lock)
73{
74 int tmp;
75
76 __asm__ __volatile__(
77 "1: lr.w %1, %0\n"
78 " bltz %1, 1b\n"
79 " addi %1, %1, 1\n"
80 " sc.w.aq %1, %1, %0\n"
81 " bnez %1, 1b\n"
82 : "+A" (lock->lock), "=&r" (tmp)
83 :: "memory");
84}
85
86static inline void arch_write_lock(arch_rwlock_t *lock)
87{
88 int tmp;
89
90 __asm__ __volatile__(
91 "1: lr.w %1, %0\n"
92 " bnez %1, 1b\n"
93 " li %1, -1\n"
94 " sc.w.aq %1, %1, %0\n"
95 " bnez %1, 1b\n"
96 : "+A" (lock->lock), "=&r" (tmp)
97 :: "memory");
98}
99
100static inline int arch_read_trylock(arch_rwlock_t *lock)
101{
102 int busy;
103
104 __asm__ __volatile__(
105 "1: lr.w %1, %0\n"
106 " bltz %1, 1f\n"
107 " addi %1, %1, 1\n"
108 " sc.w.aq %1, %1, %0\n"
109 " bnez %1, 1b\n"
110 "1:\n"
111 : "+A" (lock->lock), "=&r" (busy)
112 :: "memory");
113
114 return !busy;
115}
116
117static inline int arch_write_trylock(arch_rwlock_t *lock)
118{
119 int busy;
120
121 __asm__ __volatile__(
122 "1: lr.w %1, %0\n"
123 " bnez %1, 1f\n"
124 " li %1, -1\n"
125 " sc.w.aq %1, %1, %0\n"
126 " bnez %1, 1b\n"
127 "1:\n"
128 : "+A" (lock->lock), "=&r" (busy)
129 :: "memory");
130
131 return !busy;
132}
133
134static inline void arch_read_unlock(arch_rwlock_t *lock)
135{
136 __asm__ __volatile__(
137 "amoadd.w.rl x0, %1, %0"
138 : "+A" (lock->lock)
139 : "r" (-1)
140 : "memory");
141}
142
143static inline void arch_write_unlock(arch_rwlock_t *lock)
144{
145 __asm__ __volatile__ (
146 "amoswap.w.rl x0, x0, %0"
147 : "=A" (lock->lock)
148 :: "memory");
149}
150
151#endif /* _ASM_RISCV_SPINLOCK_H */
diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..83ac4ac9e2ac
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock_types.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_SPINLOCK_TYPES_H
15#define _ASM_RISCV_SPINLOCK_TYPES_H
16
17#ifndef __LINUX_SPINLOCK_TYPES_H
18# error "please don't include this file directly"
19#endif
20
21typedef struct {
22 volatile unsigned int lock;
23} arch_spinlock_t;
24
25#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
26
27typedef struct {
28 volatile unsigned int lock;
29} arch_rwlock_t;
30
31#define __ARCH_RW_LOCK_UNLOCKED { 0 }
32
33#endif
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
new file mode 100644
index 000000000000..9210fcf4ff52
--- /dev/null
+++ b/arch/riscv/include/asm/string.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2013 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_STRING_H
15#define _ASM_RISCV_STRING_H
16
17#include <linux/types.h>
18#include <linux/linkage.h>
19
20#define __HAVE_ARCH_MEMSET
21extern asmlinkage void *memset(void *, int, size_t);
22
23#define __HAVE_ARCH_MEMCPY
24extern asmlinkage void *memcpy(void *, const void *, size_t);
25
26#endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
new file mode 100644
index 000000000000..dd6b05bff75b
--- /dev/null
+++ b/arch/riscv/include/asm/switch_to.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_SWITCH_TO_H
15#define _ASM_RISCV_SWITCH_TO_H
16
17#include <asm/processor.h>
18#include <asm/ptrace.h>
19#include <asm/csr.h>
20
21extern void __fstate_save(struct task_struct *save_to);
22extern void __fstate_restore(struct task_struct *restore_from);
23
24static inline void __fstate_clean(struct pt_regs *regs)
25{
26 regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
27}
28
29static inline void fstate_save(struct task_struct *task,
30 struct pt_regs *regs)
31{
32 if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) {
33 __fstate_save(task);
34 __fstate_clean(regs);
35 }
36}
37
38static inline void fstate_restore(struct task_struct *task,
39 struct pt_regs *regs)
40{
41 if ((regs->sstatus & SR_FS) != SR_FS_OFF) {
42 __fstate_restore(task);
43 __fstate_clean(regs);
44 }
45}
46
47static inline void __switch_to_aux(struct task_struct *prev,
48 struct task_struct *next)
49{
50 struct pt_regs *regs;
51
52 regs = task_pt_regs(prev);
53 if (unlikely(regs->sstatus & SR_SD))
54 fstate_save(prev, regs);
55 fstate_restore(next, task_pt_regs(next));
56}
57
58extern struct task_struct *__switch_to(struct task_struct *,
59 struct task_struct *);
60
61#define switch_to(prev, next, last) \
62do { \
63 struct task_struct *__prev = (prev); \
64 struct task_struct *__next = (next); \
65 __switch_to_aux(__prev, __next); \
66 ((last) = __switch_to(__prev, __next)); \
67} while (0)
68
69#endif /* _ASM_RISCV_SWITCH_TO_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
new file mode 100644
index 000000000000..8d25f8904c00
--- /dev/null
+++ b/arch/riscv/include/asm/syscall.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
3 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * Copyright 2015 Regents of the University of California, Berkeley
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * See asm-generic/syscall.h for descriptions of what we must do here.
16 */
17
18#ifndef _ASM_RISCV_SYSCALL_H
19#define _ASM_RISCV_SYSCALL_H
20
21#include <linux/sched.h>
22#include <linux/err.h>
23
24/* The array of function pointers for syscalls. */
25extern void *sys_call_table[];
26
27/*
28 * Only the low 32 bits of orig_r0 are meaningful, so we return int.
29 * This importantly ignores the high bits on 64-bit, so comparisons
30 * sign-extend the low 32 bits.
31 */
32static inline int syscall_get_nr(struct task_struct *task,
33 struct pt_regs *regs)
34{
35 return regs->a7;
36}
37
38static inline void syscall_set_nr(struct task_struct *task,
39 struct pt_regs *regs,
40 int sysno)
41{
42 regs->a7 = sysno;
43}
44
45static inline void syscall_rollback(struct task_struct *task,
46 struct pt_regs *regs)
47{
48 regs->a0 = regs->orig_a0;
49}
50
51static inline long syscall_get_error(struct task_struct *task,
52 struct pt_regs *regs)
53{
54 unsigned long error = regs->a0;
55
56 return IS_ERR_VALUE(error) ? error : 0;
57}
58
59static inline long syscall_get_return_value(struct task_struct *task,
60 struct pt_regs *regs)
61{
62 return regs->a0;
63}
64
65static inline void syscall_set_return_value(struct task_struct *task,
66 struct pt_regs *regs,
67 int error, long val)
68{
69 regs->a0 = (long) error ?: val;
70}
71
72static inline void syscall_get_arguments(struct task_struct *task,
73 struct pt_regs *regs,
74 unsigned int i, unsigned int n,
75 unsigned long *args)
76{
77 BUG_ON(i + n > 6);
78 if (i == 0) {
79 args[0] = regs->orig_a0;
80 args++;
81 i++;
82 n--;
83 }
84 memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
85}
86
87static inline void syscall_set_arguments(struct task_struct *task,
88 struct pt_regs *regs,
89 unsigned int i, unsigned int n,
90 const unsigned long *args)
91{
92 BUG_ON(i + n > 6);
93 if (i == 0) {
94 regs->orig_a0 = args[0];
95 args++;
96 i++;
97 n--;
98 }
99 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
100}
101
102#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
new file mode 100644
index 000000000000..22c3536ed281
--- /dev/null
+++ b/arch/riscv/include/asm/thread_info.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef _ASM_RISCV_THREAD_INFO_H
17#define _ASM_RISCV_THREAD_INFO_H
18
19#include <asm/page.h>
20#include <linux/const.h>
21
22/* thread information allocation */
23#define THREAD_SIZE_ORDER (1)
24#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
25
26#ifndef __ASSEMBLY__
27
28#include <asm/processor.h>
29#include <asm/csr.h>
30
31typedef unsigned long mm_segment_t;
32
33/*
34 * low level task data that entry.S needs immediate access to
35 * - this struct should fit entirely inside of one cache line
36 * - if the members of this struct changes, the assembly constants
37 * in asm-offsets.c must be updated accordingly
38 * - thread_info is included in task_struct at an offset of 0. This means that
39 * tp points to both thread_info and task_struct.
40 */
41struct thread_info {
42 unsigned long flags; /* low level flags */
43 int preempt_count; /* 0=>preemptible, <0=>BUG */
44 mm_segment_t addr_limit;
45 /*
46 * These stack pointers are overwritten on every system call or
47 * exception. SP is also saved to the stack it can be recovered when
48 * overwritten.
49 */
50 long kernel_sp; /* Kernel stack pointer */
51 long user_sp; /* User stack pointer */
52 int cpu;
53};
54
55/*
56 * macros/functions for gaining access to the thread information structure
57 *
58 * preempt_count needs to be 1 initially, until the scheduler is functional.
59 */
60#define INIT_THREAD_INFO(tsk) \
61{ \
62 .flags = 0, \
63 .preempt_count = INIT_PREEMPT_COUNT, \
64 .addr_limit = KERNEL_DS, \
65}
66
67#define init_stack (init_thread_union.stack)
68
69#endif /* !__ASSEMBLY__ */
70
71/*
72 * thread information flags
73 * - these are process state flags that various assembly files may need to
74 * access
75 * - pending work-to-be-done flags are in lowest half-word
76 * - other flags in upper half-word(s)
77 */
78#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
79#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
80#define TIF_SIGPENDING 2 /* signal pending */
81#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
82#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
83#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
84#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
85
86#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
87#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
88#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
89#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
90
91#define _TIF_WORK_MASK \
92 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
93
94#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
new file mode 100644
index 000000000000..3df4932d8964
--- /dev/null
+++ b/arch/riscv/include/asm/timex.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_TIMEX_H
15#define _ASM_RISCV_TIMEX_H
16
17#include <asm/param.h>
18
19typedef unsigned long cycles_t;
20
21static inline cycles_t get_cycles(void)
22{
23 cycles_t n;
24
25 __asm__ __volatile__ (
26 "rdtime %0"
27 : "=r" (n));
28 return n;
29}
30
31#ifdef CONFIG_64BIT
32static inline uint64_t get_cycles64(void)
33{
34 return get_cycles();
35}
36#else
37static inline uint64_t get_cycles64(void)
38{
39 u32 lo, hi, tmp;
40 __asm__ __volatile__ (
41 "1:\n"
42 "rdtimeh %0\n"
43 "rdtime %1\n"
44 "rdtimeh %2\n"
45 "bne %0, %2, 1b"
46 : "=&r" (hi), "=&r" (lo), "=&r" (tmp));
47 return ((u64)hi << 32) | lo;
48}
49#endif
50
51#define ARCH_HAS_READ_CURRENT_TIMER
52
53static inline int read_current_timer(unsigned long *timer_val)
54{
55 *timer_val = get_cycles();
56 return 0;
57}
58
59#endif /* _ASM_RISCV_TIMEX_H */
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
new file mode 100644
index 000000000000..c229509288ea
--- /dev/null
+++ b/arch/riscv/include/asm/tlb.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_TLB_H
15#define _ASM_RISCV_TLB_H
16
17#include <asm-generic/tlb.h>
18
19static inline void tlb_flush(struct mmu_gather *tlb)
20{
21 flush_tlb_mm(tlb->mm);
22}
23
24#endif /* _ASM_RISCV_TLB_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
new file mode 100644
index 000000000000..5ee4ae370b5e
--- /dev/null
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
3 * Copyright (C) 2012 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ASM_RISCV_TLBFLUSH_H
16#define _ASM_RISCV_TLBFLUSH_H
17
18#ifdef CONFIG_MMU
19
20/* Flush entire local TLB */
21static inline void local_flush_tlb_all(void)
22{
23 __asm__ __volatile__ ("sfence.vma" : : : "memory");
24}
25
26/* Flush one page from local TLB */
27static inline void local_flush_tlb_page(unsigned long addr)
28{
29 __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
30}
31
32#ifndef CONFIG_SMP
33
34#define flush_tlb_all() local_flush_tlb_all()
35#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
36#define flush_tlb_range(vma, start, end) local_flush_tlb_all()
37
38#else /* CONFIG_SMP */
39
40#include <asm/sbi.h>
41
42#define flush_tlb_all() sbi_remote_sfence_vma(0, 0, -1)
43#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
44#define flush_tlb_range(vma, start, end) \
45 sbi_remote_sfence_vma(0, start, (end) - (start))
46
47#endif /* CONFIG_SMP */
48
49/* Flush the TLB entries of the specified mm context */
50static inline void flush_tlb_mm(struct mm_struct *mm)
51{
52 flush_tlb_all();
53}
54
55/* Flush a range of kernel pages */
56static inline void flush_tlb_kernel_range(unsigned long start,
57 unsigned long end)
58{
59 flush_tlb_all();
60}
61
62#endif /* CONFIG_MMU */
63
64#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
new file mode 100644
index 000000000000..27b90d64814b
--- /dev/null
+++ b/arch/riscv/include/asm/uaccess.h
@@ -0,0 +1,513 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * This file was copied from include/asm-generic/uaccess.h
14 */
15
16#ifndef _ASM_RISCV_UACCESS_H
17#define _ASM_RISCV_UACCESS_H
18
19/*
20 * User space memory access functions
21 */
22#include <linux/errno.h>
23#include <linux/compiler.h>
24#include <linux/thread_info.h>
25#include <asm/byteorder.h>
26#include <asm/asm.h>
27
28#define __enable_user_access() \
29 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
30#define __disable_user_access() \
31 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
32
33/*
34 * The fs value determines whether argument validity checking should be
35 * performed or not. If get_fs() == USER_DS, checking is performed, with
36 * get_fs() == KERNEL_DS, checking is bypassed.
37 *
38 * For historical reasons, these macros are grossly misnamed.
39 */
40
41#define KERNEL_DS (~0UL)
42#define USER_DS (TASK_SIZE)
43
44#define get_ds() (KERNEL_DS)
45#define get_fs() (current_thread_info()->addr_limit)
46
47static inline void set_fs(mm_segment_t fs)
48{
49 current_thread_info()->addr_limit = fs;
50}
51
52#define segment_eq(a, b) ((a) == (b))
53
54#define user_addr_max() (get_fs())
55
56
57#define VERIFY_READ 0
58#define VERIFY_WRITE 1
59
60/**
61 * access_ok: - Checks if a user space pointer is valid
62 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
63 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
64 * to write to a block, it is always safe to read from it.
65 * @addr: User space pointer to start of block to check
66 * @size: Size of block to check
67 *
68 * Context: User context only. This function may sleep.
69 *
70 * Checks if a pointer to a block of memory in user space is valid.
71 *
72 * Returns true (nonzero) if the memory block may be valid, false (zero)
73 * if it is definitely invalid.
74 *
75 * Note that, depending on architecture, this function probably just
76 * checks that the pointer is in the user space range - after calling
77 * this function, memory access functions may still return -EFAULT.
78 */
79#define access_ok(type, addr, size) ({ \
80 __chk_user_ptr(addr); \
81 likely(__access_ok((unsigned long __force)(addr), (size))); \
82})
83
84/*
85 * Ensure that the range [addr, addr+size) is within the process's
86 * address space
87 */
88static inline int __access_ok(unsigned long addr, unsigned long size)
89{
90 const mm_segment_t fs = get_fs();
91
92 return (size <= fs) && (addr <= (fs - size));
93}
94
95/*
96 * The exception table consists of pairs of addresses: the first is the
97 * address of an instruction that is allowed to fault, and the second is
98 * the address at which the program should continue. No registers are
99 * modified, so it is entirely up to the continuation code to figure out
100 * what to do.
101 *
102 * All the routines below use bits of fixup code that are out of line
103 * with the main instruction path. This means when everything is well,
104 * we don't even have to jump over them. Further, they do not intrude
105 * on our cache or tlb entries.
106 */
107
108struct exception_table_entry {
109 unsigned long insn, fixup;
110};
111
112extern int fixup_exception(struct pt_regs *state);
113
114#if defined(__LITTLE_ENDIAN)
115#define __MSW 1
116#define __LSW 0
117#elif defined(__BIG_ENDIAN)
118#define __MSW 0
119#define __LSW 1
120#else
121#error "Unknown endianness"
122#endif
123
124/*
125 * The "__xxx" versions of the user access functions do not verify the address
126 * space - it must have been done previously with a separate "access_ok()"
127 * call.
128 */
129
130#ifdef CONFIG_MMU
131#define __get_user_asm(insn, x, ptr, err) \
132do { \
133 uintptr_t __tmp; \
134 __typeof__(x) __x; \
135 __enable_user_access(); \
136 __asm__ __volatile__ ( \
137 "1:\n" \
138 " " insn " %1, %3\n" \
139 "2:\n" \
140 " .section .fixup,\"ax\"\n" \
141 " .balign 4\n" \
142 "3:\n" \
143 " li %0, %4\n" \
144 " li %1, 0\n" \
145 " jump 2b, %2\n" \
146 " .previous\n" \
147 " .section __ex_table,\"a\"\n" \
148 " .balign " RISCV_SZPTR "\n" \
149 " " RISCV_PTR " 1b, 3b\n" \
150 " .previous" \
151 : "+r" (err), "=&r" (__x), "=r" (__tmp) \
152 : "m" (*(ptr)), "i" (-EFAULT)); \
153 __disable_user_access(); \
154 (x) = __x; \
155} while (0)
156#endif /* CONFIG_MMU */
157
158#ifdef CONFIG_64BIT
159#define __get_user_8(x, ptr, err) \
160 __get_user_asm("ld", x, ptr, err)
161#else /* !CONFIG_64BIT */
162#ifdef CONFIG_MMU
163#define __get_user_8(x, ptr, err) \
164do { \
165 u32 __user *__ptr = (u32 __user *)(ptr); \
166 u32 __lo, __hi; \
167 uintptr_t __tmp; \
168 __enable_user_access(); \
169 __asm__ __volatile__ ( \
170 "1:\n" \
171 " lw %1, %4\n" \
172 "2:\n" \
173 " lw %2, %5\n" \
174 "3:\n" \
175 " .section .fixup,\"ax\"\n" \
176 " .balign 4\n" \
177 "4:\n" \
178 " li %0, %6\n" \
179 " li %1, 0\n" \
180 " li %2, 0\n" \
181 " jump 3b, %3\n" \
182 " .previous\n" \
183 " .section __ex_table,\"a\"\n" \
184 " .balign " RISCV_SZPTR "\n" \
185 " " RISCV_PTR " 1b, 4b\n" \
186 " " RISCV_PTR " 2b, 4b\n" \
187 " .previous" \
188 : "+r" (err), "=&r" (__lo), "=r" (__hi), \
189 "=r" (__tmp) \
190 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
191 "i" (-EFAULT)); \
192 __disable_user_access(); \
193 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
194 (((u64)__hi << 32) | __lo))); \
195} while (0)
196#endif /* CONFIG_MMU */
197#endif /* CONFIG_64BIT */
198
199
200/**
201 * __get_user: - Get a simple variable from user space, with less checking.
202 * @x: Variable to store result.
203 * @ptr: Source address, in user space.
204 *
205 * Context: User context only. This function may sleep.
206 *
207 * This macro copies a single simple variable from user space to kernel
208 * space. It supports simple types like char and int, but not larger
209 * data types like structures or arrays.
210 *
211 * @ptr must have pointer-to-simple-variable type, and the result of
212 * dereferencing @ptr must be assignable to @x without a cast.
213 *
214 * Caller must check the pointer with access_ok() before calling this
215 * function.
216 *
217 * Returns zero on success, or -EFAULT on error.
218 * On error, the variable @x is set to zero.
219 */
220#define __get_user(x, ptr) \
221({ \
222 register long __gu_err = 0; \
223 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
224 __chk_user_ptr(__gu_ptr); \
225 switch (sizeof(*__gu_ptr)) { \
226 case 1: \
227 __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
228 break; \
229 case 2: \
230 __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
231 break; \
232 case 4: \
233 __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
234 break; \
235 case 8: \
236 __get_user_8((x), __gu_ptr, __gu_err); \
237 break; \
238 default: \
239 BUILD_BUG(); \
240 } \
241 __gu_err; \
242})
243
244/**
245 * get_user: - Get a simple variable from user space.
246 * @x: Variable to store result.
247 * @ptr: Source address, in user space.
248 *
249 * Context: User context only. This function may sleep.
250 *
251 * This macro copies a single simple variable from user space to kernel
252 * space. It supports simple types like char and int, but not larger
253 * data types like structures or arrays.
254 *
255 * @ptr must have pointer-to-simple-variable type, and the result of
256 * dereferencing @ptr must be assignable to @x without a cast.
257 *
258 * Returns zero on success, or -EFAULT on error.
259 * On error, the variable @x is set to zero.
260 */
261#define get_user(x, ptr) \
262({ \
263 const __typeof__(*(ptr)) __user *__p = (ptr); \
264 might_fault(); \
265 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
266 __get_user((x), __p) : \
267 ((x) = 0, -EFAULT); \
268})
269
270
271#ifdef CONFIG_MMU
272#define __put_user_asm(insn, x, ptr, err) \
273do { \
274 uintptr_t __tmp; \
275 __typeof__(*(ptr)) __x = x; \
276 __enable_user_access(); \
277 __asm__ __volatile__ ( \
278 "1:\n" \
279 " " insn " %z3, %2\n" \
280 "2:\n" \
281 " .section .fixup,\"ax\"\n" \
282 " .balign 4\n" \
283 "3:\n" \
284 " li %0, %4\n" \
285 " jump 2b, %1\n" \
286 " .previous\n" \
287 " .section __ex_table,\"a\"\n" \
288 " .balign " RISCV_SZPTR "\n" \
289 " " RISCV_PTR " 1b, 3b\n" \
290 " .previous" \
291 : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
292 : "rJ" (__x), "i" (-EFAULT)); \
293 __disable_user_access(); \
294} while (0)
295#endif /* CONFIG_MMU */
296
297
298#ifdef CONFIG_64BIT
299#define __put_user_8(x, ptr, err) \
300 __put_user_asm("sd", x, ptr, err)
301#else /* !CONFIG_64BIT */
302#ifdef CONFIG_MMU
303#define __put_user_8(x, ptr, err) \
304do { \
305 u32 __user *__ptr = (u32 __user *)(ptr); \
306 u64 __x = (__typeof__((x)-(x)))(x); \
307 uintptr_t __tmp; \
308 __enable_user_access(); \
309 __asm__ __volatile__ ( \
310 "1:\n" \
311 " sw %z4, %2\n" \
312 "2:\n" \
313 " sw %z5, %3\n" \
314 "3:\n" \
315 " .section .fixup,\"ax\"\n" \
316 " .balign 4\n" \
317 "4:\n" \
318 " li %0, %6\n" \
319 " jump 2b, %1\n" \
320 " .previous\n" \
321 " .section __ex_table,\"a\"\n" \
322 " .balign " RISCV_SZPTR "\n" \
323 " " RISCV_PTR " 1b, 4b\n" \
324 " " RISCV_PTR " 2b, 4b\n" \
325 " .previous" \
326 : "+r" (err), "=r" (__tmp), \
327 "=m" (__ptr[__LSW]), \
328 "=m" (__ptr[__MSW]) \
329 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
330 __disable_user_access(); \
331} while (0)
332#endif /* CONFIG_MMU */
333#endif /* CONFIG_64BIT */
334
335
336/**
337 * __put_user: - Write a simple value into user space, with less checking.
338 * @x: Value to copy to user space.
339 * @ptr: Destination address, in user space.
340 *
341 * Context: User context only. This function may sleep.
342 *
343 * This macro copies a single simple value from kernel space to user
344 * space. It supports simple types like char and int, but not larger
345 * data types like structures or arrays.
346 *
347 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
348 * to the result of dereferencing @ptr.
349 *
350 * Caller must check the pointer with access_ok() before calling this
351 * function.
352 *
353 * Returns zero on success, or -EFAULT on error.
354 */
355#define __put_user(x, ptr) \
356({ \
357 register long __pu_err = 0; \
358 __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
359 __chk_user_ptr(__gu_ptr); \
360 switch (sizeof(*__gu_ptr)) { \
361 case 1: \
362 __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
363 break; \
364 case 2: \
365 __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
366 break; \
367 case 4: \
368 __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
369 break; \
370 case 8: \
371 __put_user_8((x), __gu_ptr, __pu_err); \
372 break; \
373 default: \
374 BUILD_BUG(); \
375 } \
376 __pu_err; \
377})
378
379/**
380 * put_user: - Write a simple value into user space.
381 * @x: Value to copy to user space.
382 * @ptr: Destination address, in user space.
383 *
384 * Context: User context only. This function may sleep.
385 *
386 * This macro copies a single simple value from kernel space to user
387 * space. It supports simple types like char and int, but not larger
388 * data types like structures or arrays.
389 *
390 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
391 * to the result of dereferencing @ptr.
392 *
393 * Returns zero on success, or -EFAULT on error.
394 */
395#define put_user(x, ptr) \
396({ \
397 __typeof__(*(ptr)) __user *__p = (ptr); \
398 might_fault(); \
399 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
400 __put_user((x), __p) : \
401 -EFAULT; \
402})
403
404
405extern unsigned long __must_check __copy_user(void __user *to,
406 const void __user *from, unsigned long n);
407
408static inline unsigned long
409raw_copy_from_user(void *to, const void __user *from, unsigned long n)
410{
411 return __copy_user(to, from, n);
412}
413
414static inline unsigned long
415raw_copy_to_user(void __user *to, const void *from, unsigned long n)
416{
417 return __copy_user(to, from, n);
418}
419
420extern long strncpy_from_user(char *dest, const char __user *src, long count);
421
422extern long __must_check strlen_user(const char __user *str);
423extern long __must_check strnlen_user(const char __user *str, long n);
424
425extern
426unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
427
428static inline
429unsigned long __must_check clear_user(void __user *to, unsigned long n)
430{
431 might_fault();
432 return access_ok(VERIFY_WRITE, to, n) ?
433 __clear_user(to, n) : n;
434}
435
436/*
437 * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
438 * will set "err" to -EFAULT, while successful accesses return the previous
439 * value.
440 */
441#ifdef CONFIG_MMU
442#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
443({ \
444 __typeof__(ptr) __ptr = (ptr); \
445 __typeof__(*(ptr)) __old = (old); \
446 __typeof__(*(ptr)) __new = (new); \
447 __typeof__(*(ptr)) __ret; \
448 __typeof__(err) __err = 0; \
449 register unsigned int __rc; \
450 __enable_user_access(); \
451 switch (size) { \
452 case 4: \
453 __asm__ __volatile__ ( \
454 "0:\n" \
455 " lr.w" #scb " %[ret], %[ptr]\n" \
456 " bne %[ret], %z[old], 1f\n" \
457 " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
458 " bnez %[rc], 0b\n" \
459 "1:\n" \
460 ".section .fixup,\"ax\"\n" \
461 ".balign 4\n" \
462 "2:\n" \
463 " li %[err], %[efault]\n" \
464 " jump 1b, %[rc]\n" \
465 ".previous\n" \
466 ".section __ex_table,\"a\"\n" \
467 ".balign " RISCV_SZPTR "\n" \
468 " " RISCV_PTR " 1b, 2b\n" \
469 ".previous\n" \
470 : [ret] "=&r" (__ret), \
471 [rc] "=&r" (__rc), \
472 [ptr] "+A" (*__ptr), \
473 [err] "=&r" (__err) \
474 : [old] "rJ" (__old), \
475 [new] "rJ" (__new), \
476 [efault] "i" (-EFAULT)); \
477 break; \
478 case 8: \
479 __asm__ __volatile__ ( \
480 "0:\n" \
481 " lr.d" #scb " %[ret], %[ptr]\n" \
482 " bne %[ret], %z[old], 1f\n" \
483 " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
484 " bnez %[rc], 0b\n" \
485 "1:\n" \
486 ".section .fixup,\"ax\"\n" \
487 ".balign 4\n" \
488 "2:\n" \
489 " li %[err], %[efault]\n" \
490 " jump 1b, %[rc]\n" \
491 ".previous\n" \
492 ".section __ex_table,\"a\"\n" \
493 ".balign " RISCV_SZPTR "\n" \
494 " " RISCV_PTR " 1b, 2b\n" \
495 ".previous\n" \
496 : [ret] "=&r" (__ret), \
497 [rc] "=&r" (__rc), \
498 [ptr] "+A" (*__ptr), \
499 [err] "=&r" (__err) \
500 : [old] "rJ" (__old), \
501 [new] "rJ" (__new), \
502 [efault] "i" (-EFAULT)); \
503 break; \
504 default: \
505 BUILD_BUG(); \
506 } \
507 __disable_user_access(); \
508 (err) = __err; \
509 __ret; \
510})
511#endif /* CONFIG_MMU */
512
513#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
new file mode 100644
index 000000000000..9f250ed007cd
--- /dev/null
+++ b/arch/riscv/include/asm/unistd.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define __ARCH_HAVE_MMU
15#define __ARCH_WANT_SYS_CLONE
16#include <uapi/asm/unistd.h>
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
new file mode 100644
index 000000000000..602f61257553
--- /dev/null
+++ b/arch/riscv/include/asm/vdso.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2012 ARM Limited
3 * Copyright (C) 2014 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _ASM_RISCV_VDSO_H
20#define _ASM_RISCV_VDSO_H
21
22#include <linux/types.h>
23
24struct vdso_data {
25};
26
27/*
28 * The VDSO symbols are mapped into Linux so we can just use regular symbol
29 * addressing to get their offsets in userspace. The symbols are mapped at an
30 * offset of 0, but since the linker must support setting weak undefined
31 * symbols to the absolute address 0 it also happens to support other low
32 * addresses even when the code model suggests those low addresses would not
33 * otherwise be availiable.
34 */
35#define VDSO_SYMBOL(base, name) \
36({ \
37 extern const char __vdso_##name[]; \
38 (void __user *)((unsigned long)(base) + __vdso_##name); \
39})
40
41#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..aa6238791d3e
--- /dev/null
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 * Derived from arch/x86/include/asm/word-at-a-time.h
13 */
14
15#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
16#define _ASM_RISCV_WORD_AT_A_TIME_H
17
18
19#include <linux/kernel.h>
20
21struct word_at_a_time {
22 const unsigned long one_bits, high_bits;
23};
24
25#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
26
27static inline unsigned long has_zero(unsigned long val,
28 unsigned long *bits, const struct word_at_a_time *c)
29{
30 unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
31 *bits = mask;
32 return mask;
33}
34
35static inline unsigned long prep_zero_mask(unsigned long val,
36 unsigned long bits, const struct word_at_a_time *c)
37{
38 return bits;
39}
40
41static inline unsigned long create_zero_mask(unsigned long bits)
42{
43 bits = (bits - 1) & ~bits;
44 return bits >> 7;
45}
46
47static inline unsigned long find_zero(unsigned long mask)
48{
49 return fls64(mask) >> 3;
50}
51
52/* The mask we created is directly usable as a bytemask */
53#define zero_bytemask(mask) (mask)
54
55#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..5ded96b06352
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/Kbuild
@@ -0,0 +1,27 @@
1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm
3
4generic-y += setup.h
5generic-y += unistd.h
6generic-y += errno.h
7generic-y += fcntl.h
8generic-y += ioctl.h
9generic-y += ioctls.h
10generic-y += ipcbuf.h
11generic-y += mman.h
12generic-y += msgbuf.h
13generic-y += param.h
14generic-y += poll.h
15generic-y += posix_types.h
16generic-y += resource.h
17generic-y += sembuf.h
18generic-y += shmbuf.h
19generic-y += signal.h
20generic-y += socket.h
21generic-y += sockios.h
22generic-y += stat.h
23generic-y += statfs.h
24generic-y += swab.h
25generic-y += termbits.h
26generic-y += termios.h
27generic-y += types.h
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000000..1376515547cd
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Copyright (C) 2015 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _UAPI_ASM_RISCV_AUXVEC_H
19#define _UAPI_ASM_RISCV_AUXVEC_H
20
21/* vDSO location */
22#define AT_SYSINFO_EHDR 33
23
24#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..0b3cb52fd29d
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Copyright (C) 2015 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
19#define _UAPI_ASM_RISCV_BITSPERLONG_H
20
21#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
22
23#include <asm-generic/bitsperlong.h>
24
25#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..4ca38af2cd32
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/byteorder.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Copyright (C) 2015 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _UAPI_ASM_RISCV_BYTEORDER_H
19#define _UAPI_ASM_RISCV_BYTEORDER_H
20
21#include <linux/byteorder/little_endian.h>
22
23#endif /* _UAPI_ASM_RISCV_BYTEORDER_H */
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
new file mode 100644
index 000000000000..a510edfa8226
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
3 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
4 * Copyright (C) 2012 Regents of the University of California
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef _UAPI_ASM_ELF_H
13#define _UAPI_ASM_ELF_H
14
15#include <asm/ptrace.h>
16
17/* ELF register definitions */
18typedef unsigned long elf_greg_t;
19typedef struct user_regs_struct elf_gregset_t;
20#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t))
21
22typedef union __riscv_fp_state elf_fpregset_t;
23
24#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
25#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
26
27/*
28 * RISC-V relocation types
29 */
30
31/* Relocation types used by the dynamic linker */
32#define R_RISCV_NONE 0
33#define R_RISCV_32 1
34#define R_RISCV_64 2
35#define R_RISCV_RELATIVE 3
36#define R_RISCV_COPY 4
37#define R_RISCV_JUMP_SLOT 5
38#define R_RISCV_TLS_DTPMOD32 6
39#define R_RISCV_TLS_DTPMOD64 7
40#define R_RISCV_TLS_DTPREL32 8
41#define R_RISCV_TLS_DTPREL64 9
42#define R_RISCV_TLS_TPREL32 10
43#define R_RISCV_TLS_TPREL64 11
44
45/* Relocation types not used by the dynamic linker */
46#define R_RISCV_BRANCH 16
47#define R_RISCV_JAL 17
48#define R_RISCV_CALL 18
49#define R_RISCV_CALL_PLT 19
50#define R_RISCV_GOT_HI20 20
51#define R_RISCV_TLS_GOT_HI20 21
52#define R_RISCV_TLS_GD_HI20 22
53#define R_RISCV_PCREL_HI20 23
54#define R_RISCV_PCREL_LO12_I 24
55#define R_RISCV_PCREL_LO12_S 25
56#define R_RISCV_HI20 26
57#define R_RISCV_LO12_I 27
58#define R_RISCV_LO12_S 28
59#define R_RISCV_TPREL_HI20 29
60#define R_RISCV_TPREL_LO12_I 30
61#define R_RISCV_TPREL_LO12_S 31
62#define R_RISCV_TPREL_ADD 32
63#define R_RISCV_ADD8 33
64#define R_RISCV_ADD16 34
65#define R_RISCV_ADD32 35
66#define R_RISCV_ADD64 36
67#define R_RISCV_SUB8 37
68#define R_RISCV_SUB16 38
69#define R_RISCV_SUB32 39
70#define R_RISCV_SUB64 40
71#define R_RISCV_GNU_VTINHERIT 41
72#define R_RISCV_GNU_VTENTRY 42
73#define R_RISCV_ALIGN 43
74#define R_RISCV_RVC_BRANCH 44
75#define R_RISCV_RVC_JUMP 45
76#define R_RISCV_LUI 46
77#define R_RISCV_GPREL_I 47
78#define R_RISCV_GPREL_S 48
79#define R_RISCV_TPREL_I 49
80#define R_RISCV_TPREL_S 50
81#define R_RISCV_RELAX 51
82
83#endif /* _UAPI_ASM_ELF_H */
diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h
new file mode 100644
index 000000000000..f333221c9ab2
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/hwcap.h
@@ -0,0 +1,36 @@
1/*
2 * Copied from arch/arm64/include/asm/hwcap.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Copyright (C) 2017 SiFive
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __UAPI_ASM_HWCAP_H
20#define __UAPI_ASM_HWCAP_H
21
22/*
23 * Linux saves the floating-point registers according to the ISA Linux is
24 * executing on, as opposed to the ISA the user program is compiled for. This
25 * is necessary for a handful of esoteric use cases: for example, userpsace
26 * threading libraries must be able to examine the actual machine state in
27 * order to fully reconstruct the state of a thread.
28 */
29#define COMPAT_HWCAP_ISA_I (1 << ('I' - 'A'))
30#define COMPAT_HWCAP_ISA_M (1 << ('M' - 'A'))
31#define COMPAT_HWCAP_ISA_A (1 << ('A' - 'A'))
32#define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A'))
33#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
34#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
35
36#endif
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..1a9e4cdd37e2
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -0,0 +1,90 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _UAPI_ASM_RISCV_PTRACE_H
15#define _UAPI_ASM_RISCV_PTRACE_H
16
17#ifndef __ASSEMBLY__
18
19#include <linux/types.h>
20
21/*
22 * User-mode register state for core dumps, ptrace, sigcontext
23 *
24 * This decouples struct pt_regs from the userspace ABI.
25 * struct user_regs_struct must form a prefix of struct pt_regs.
26 */
27struct user_regs_struct {
28 unsigned long pc;
29 unsigned long ra;
30 unsigned long sp;
31 unsigned long gp;
32 unsigned long tp;
33 unsigned long t0;
34 unsigned long t1;
35 unsigned long t2;
36 unsigned long s0;
37 unsigned long s1;
38 unsigned long a0;
39 unsigned long a1;
40 unsigned long a2;
41 unsigned long a3;
42 unsigned long a4;
43 unsigned long a5;
44 unsigned long a6;
45 unsigned long a7;
46 unsigned long s2;
47 unsigned long s3;
48 unsigned long s4;
49 unsigned long s5;
50 unsigned long s6;
51 unsigned long s7;
52 unsigned long s8;
53 unsigned long s9;
54 unsigned long s10;
55 unsigned long s11;
56 unsigned long t3;
57 unsigned long t4;
58 unsigned long t5;
59 unsigned long t6;
60};
61
62struct __riscv_f_ext_state {
63 __u32 f[32];
64 __u32 fcsr;
65};
66
67struct __riscv_d_ext_state {
68 __u64 f[32];
69 __u32 fcsr;
70};
71
72struct __riscv_q_ext_state {
73 __u64 f[64] __attribute__((aligned(16)));
74 __u32 fcsr;
75 /*
76 * Reserved for expansion of sigcontext structure. Currently zeroed
77 * upon signal, and must be zero upon sigreturn.
78 */
79 __u32 reserved[3];
80};
81
82union __riscv_fp_state {
83 struct __riscv_f_ext_state f;
84 struct __riscv_d_ext_state d;
85 struct __riscv_q_ext_state q;
86};
87
88#endif /* __ASSEMBLY__ */
89
90#endif /* _UAPI_ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..ed7372b277fa
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/sigcontext.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _UAPI_ASM_RISCV_SIGCONTEXT_H
15#define _UAPI_ASM_RISCV_SIGCONTEXT_H
16
17#include <asm/ptrace.h>
18
19/*
20 * Signal context structure
21 *
22 * This contains the context saved before a signal handler is invoked;
23 * it is restored by sys_sigreturn / sys_rt_sigreturn.
24 */
25struct sigcontext {
26 struct user_regs_struct sc_regs;
27 union __riscv_fp_state sc_fpregs;
28};
29
30#endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
diff --git a/arch/riscv/include/uapi/asm/siginfo.h b/arch/riscv/include/uapi/asm/siginfo.h
new file mode 100644
index 000000000000..f96849aac662
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/siginfo.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Copyright (C) 2016 SiFive, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17#ifndef __ASM_SIGINFO_H
18#define __ASM_SIGINFO_H
19
20#define __ARCH_SI_PREAMBLE_SIZE (__SIZEOF_POINTER__ == 4 ? 12 : 16)
21
22#include <asm-generic/siginfo.h>
23
24#endif
diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000000..1fae8b1697e0
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/ucontext.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Copyright (C) 2017 SiFive, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file was copied from arch/arm64/include/uapi/asm/ucontext.h
18 */
19#ifndef _UAPI__ASM_UCONTEXT_H
20#define _UAPI__ASM_UCONTEXT_H
21
22#include <linux/types.h>
23
24struct ucontext {
25 unsigned long uc_flags;
26 struct ucontext *uc_link;
27 stack_t uc_stack;
28 sigset_t uc_sigmask;
29 /* There's some padding here to allow sigset_t to be expanded in the
30 * future. Though this is unlikely, other architectures put uc_sigmask
31 * at the end of this structure and explicitly state it can be
32 * expanded, so we didn't want to box ourselves in here. */
33 __u8 __unused[1024 / 8 - sizeof(sigset_t)];
34 /* We can't put uc_sigmask at the end of this structure because we need
35 * to be able to expand sigcontext in the future. For example, the
36 * vector ISA extension will almost certainly add ISA state. We want
37 * to ensure all user-visible ISA state can be saved and restored via a
38 * ucontext, so we're putting this at the end in order to allow for
39 * infinite extensibility. Since we know this will be extended and we
40 * assume sigset_t won't be extended an extreme amount, we're
41 * prioritizing this. */
42 struct sigcontext uc_mcontext;
43};
44
45#endif /* _UAPI__ASM_UCONTEXT_H */
diff --git a/arch/riscv/kernel/.gitignore b/arch/riscv/kernel/.gitignore
new file mode 100644
index 000000000000..b51634f6a7cd
--- /dev/null
+++ b/arch/riscv/kernel/.gitignore
@@ -0,0 +1 @@
/vmlinux.lds
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
new file mode 100644
index 000000000000..ab8baf7bd142
--- /dev/null
+++ b/arch/riscv/kernel/Makefile
@@ -0,0 +1,33 @@
1#
2# Makefile for the RISC-V Linux kernel
3#
4
5extra-y += head.o
6extra-y += vmlinux.lds
7
8obj-y += cpu.o
9obj-y += cpufeature.o
10obj-y += entry.o
11obj-y += irq.o
12obj-y += process.o
13obj-y += ptrace.o
14obj-y += reset.o
15obj-y += setup.o
16obj-y += signal.o
17obj-y += syscall_table.o
18obj-y += sys_riscv.o
19obj-y += time.o
20obj-y += traps.o
21obj-y += riscv_ksyms.o
22obj-y += stacktrace.o
23obj-y += vdso.o
24obj-y += cacheinfo.o
25obj-y += vdso/
26
27CFLAGS_setup.o := -mcmodel=medany
28
29obj-$(CONFIG_SMP) += smpboot.o
30obj-$(CONFIG_SMP) += smp.o
31obj-$(CONFIG_MODULES) += module.o
32
33clean:
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
new file mode 100644
index 000000000000..6a92a2fe198e
--- /dev/null
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#define GENERATING_ASM_OFFSETS
16
17#include <linux/kbuild.h>
18#include <linux/sched.h>
19#include <asm/thread_info.h>
20#include <asm/ptrace.h>
21
22void asm_offsets(void)
23{
24 OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
25 OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
26 OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
27 OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
28 OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
29 OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
30 OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
31 OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
32 OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
33 OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
34 OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
35 OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
36 OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
37 OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
38 OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
39 OFFSET(TASK_STACK, task_struct, stack);
40 OFFSET(TASK_TI, task_struct, thread_info);
41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
42 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
43 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
44 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
45
46 OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
47 OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
48 OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
49 OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]);
50 OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]);
51 OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]);
52 OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]);
53 OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]);
54 OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]);
55 OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]);
56 OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
57 OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
58 OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
59 OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
60 OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
61 OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
62 OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
63 OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
64 OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
65 OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
66 OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
67 OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
68 OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
69 OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
70 OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
71 OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
72 OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
73 OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
74 OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
75 OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
76 OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
77 OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
78 OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
79
80 DEFINE(PT_SIZE, sizeof(struct pt_regs));
81 OFFSET(PT_SEPC, pt_regs, sepc);
82 OFFSET(PT_RA, pt_regs, ra);
83 OFFSET(PT_FP, pt_regs, s0);
84 OFFSET(PT_S0, pt_regs, s0);
85 OFFSET(PT_S1, pt_regs, s1);
86 OFFSET(PT_S2, pt_regs, s2);
87 OFFSET(PT_S3, pt_regs, s3);
88 OFFSET(PT_S4, pt_regs, s4);
89 OFFSET(PT_S5, pt_regs, s5);
90 OFFSET(PT_S6, pt_regs, s6);
91 OFFSET(PT_S7, pt_regs, s7);
92 OFFSET(PT_S8, pt_regs, s8);
93 OFFSET(PT_S9, pt_regs, s9);
94 OFFSET(PT_S10, pt_regs, s10);
95 OFFSET(PT_S11, pt_regs, s11);
96 OFFSET(PT_SP, pt_regs, sp);
97 OFFSET(PT_TP, pt_regs, tp);
98 OFFSET(PT_A0, pt_regs, a0);
99 OFFSET(PT_A1, pt_regs, a1);
100 OFFSET(PT_A2, pt_regs, a2);
101 OFFSET(PT_A3, pt_regs, a3);
102 OFFSET(PT_A4, pt_regs, a4);
103 OFFSET(PT_A5, pt_regs, a5);
104 OFFSET(PT_A6, pt_regs, a6);
105 OFFSET(PT_A7, pt_regs, a7);
106 OFFSET(PT_T0, pt_regs, t0);
107 OFFSET(PT_T1, pt_regs, t1);
108 OFFSET(PT_T2, pt_regs, t2);
109 OFFSET(PT_T3, pt_regs, t3);
110 OFFSET(PT_T4, pt_regs, t4);
111 OFFSET(PT_T5, pt_regs, t5);
112 OFFSET(PT_T6, pt_regs, t6);
113 OFFSET(PT_GP, pt_regs, gp);
114 OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
115 OFFSET(PT_SSTATUS, pt_regs, sstatus);
116 OFFSET(PT_SBADADDR, pt_regs, sbadaddr);
117 OFFSET(PT_SCAUSE, pt_regs, scause);
118
119 /*
120 * THREAD_{F,X}* might be larger than a S-type offset can handle, but
121 * these are used in performance-sensitive assembly so we can't resort
122 * to loading the long immediate every time.
123 */
124 DEFINE(TASK_THREAD_RA_RA,
125 offsetof(struct task_struct, thread.ra)
126 - offsetof(struct task_struct, thread.ra)
127 );
128 DEFINE(TASK_THREAD_SP_RA,
129 offsetof(struct task_struct, thread.sp)
130 - offsetof(struct task_struct, thread.ra)
131 );
132 DEFINE(TASK_THREAD_S0_RA,
133 offsetof(struct task_struct, thread.s[0])
134 - offsetof(struct task_struct, thread.ra)
135 );
136 DEFINE(TASK_THREAD_S1_RA,
137 offsetof(struct task_struct, thread.s[1])
138 - offsetof(struct task_struct, thread.ra)
139 );
140 DEFINE(TASK_THREAD_S2_RA,
141 offsetof(struct task_struct, thread.s[2])
142 - offsetof(struct task_struct, thread.ra)
143 );
144 DEFINE(TASK_THREAD_S3_RA,
145 offsetof(struct task_struct, thread.s[3])
146 - offsetof(struct task_struct, thread.ra)
147 );
148 DEFINE(TASK_THREAD_S4_RA,
149 offsetof(struct task_struct, thread.s[4])
150 - offsetof(struct task_struct, thread.ra)
151 );
152 DEFINE(TASK_THREAD_S5_RA,
153 offsetof(struct task_struct, thread.s[5])
154 - offsetof(struct task_struct, thread.ra)
155 );
156 DEFINE(TASK_THREAD_S6_RA,
157 offsetof(struct task_struct, thread.s[6])
158 - offsetof(struct task_struct, thread.ra)
159 );
160 DEFINE(TASK_THREAD_S7_RA,
161 offsetof(struct task_struct, thread.s[7])
162 - offsetof(struct task_struct, thread.ra)
163 );
164 DEFINE(TASK_THREAD_S8_RA,
165 offsetof(struct task_struct, thread.s[8])
166 - offsetof(struct task_struct, thread.ra)
167 );
168 DEFINE(TASK_THREAD_S9_RA,
169 offsetof(struct task_struct, thread.s[9])
170 - offsetof(struct task_struct, thread.ra)
171 );
172 DEFINE(TASK_THREAD_S10_RA,
173 offsetof(struct task_struct, thread.s[10])
174 - offsetof(struct task_struct, thread.ra)
175 );
176 DEFINE(TASK_THREAD_S11_RA,
177 offsetof(struct task_struct, thread.s[11])
178 - offsetof(struct task_struct, thread.ra)
179 );
180
181 DEFINE(TASK_THREAD_F0_F0,
182 offsetof(struct task_struct, thread.fstate.f[0])
183 - offsetof(struct task_struct, thread.fstate.f[0])
184 );
185 DEFINE(TASK_THREAD_F1_F0,
186 offsetof(struct task_struct, thread.fstate.f[1])
187 - offsetof(struct task_struct, thread.fstate.f[0])
188 );
189 DEFINE(TASK_THREAD_F2_F0,
190 offsetof(struct task_struct, thread.fstate.f[2])
191 - offsetof(struct task_struct, thread.fstate.f[0])
192 );
193 DEFINE(TASK_THREAD_F3_F0,
194 offsetof(struct task_struct, thread.fstate.f[3])
195 - offsetof(struct task_struct, thread.fstate.f[0])
196 );
197 DEFINE(TASK_THREAD_F4_F0,
198 offsetof(struct task_struct, thread.fstate.f[4])
199 - offsetof(struct task_struct, thread.fstate.f[0])
200 );
201 DEFINE(TASK_THREAD_F5_F0,
202 offsetof(struct task_struct, thread.fstate.f[5])
203 - offsetof(struct task_struct, thread.fstate.f[0])
204 );
205 DEFINE(TASK_THREAD_F6_F0,
206 offsetof(struct task_struct, thread.fstate.f[6])
207 - offsetof(struct task_struct, thread.fstate.f[0])
208 );
209 DEFINE(TASK_THREAD_F7_F0,
210 offsetof(struct task_struct, thread.fstate.f[7])
211 - offsetof(struct task_struct, thread.fstate.f[0])
212 );
213 DEFINE(TASK_THREAD_F8_F0,
214 offsetof(struct task_struct, thread.fstate.f[8])
215 - offsetof(struct task_struct, thread.fstate.f[0])
216 );
217 DEFINE(TASK_THREAD_F9_F0,
218 offsetof(struct task_struct, thread.fstate.f[9])
219 - offsetof(struct task_struct, thread.fstate.f[0])
220 );
221 DEFINE(TASK_THREAD_F10_F0,
222 offsetof(struct task_struct, thread.fstate.f[10])
223 - offsetof(struct task_struct, thread.fstate.f[0])
224 );
225 DEFINE(TASK_THREAD_F11_F0,
226 offsetof(struct task_struct, thread.fstate.f[11])
227 - offsetof(struct task_struct, thread.fstate.f[0])
228 );
229 DEFINE(TASK_THREAD_F12_F0,
230 offsetof(struct task_struct, thread.fstate.f[12])
231 - offsetof(struct task_struct, thread.fstate.f[0])
232 );
233 DEFINE(TASK_THREAD_F13_F0,
234 offsetof(struct task_struct, thread.fstate.f[13])
235 - offsetof(struct task_struct, thread.fstate.f[0])
236 );
237 DEFINE(TASK_THREAD_F14_F0,
238 offsetof(struct task_struct, thread.fstate.f[14])
239 - offsetof(struct task_struct, thread.fstate.f[0])
240 );
241 DEFINE(TASK_THREAD_F15_F0,
242 offsetof(struct task_struct, thread.fstate.f[15])
243 - offsetof(struct task_struct, thread.fstate.f[0])
244 );
245 DEFINE(TASK_THREAD_F16_F0,
246 offsetof(struct task_struct, thread.fstate.f[16])
247 - offsetof(struct task_struct, thread.fstate.f[0])
248 );
249 DEFINE(TASK_THREAD_F17_F0,
250 offsetof(struct task_struct, thread.fstate.f[17])
251 - offsetof(struct task_struct, thread.fstate.f[0])
252 );
253 DEFINE(TASK_THREAD_F18_F0,
254 offsetof(struct task_struct, thread.fstate.f[18])
255 - offsetof(struct task_struct, thread.fstate.f[0])
256 );
257 DEFINE(TASK_THREAD_F19_F0,
258 offsetof(struct task_struct, thread.fstate.f[19])
259 - offsetof(struct task_struct, thread.fstate.f[0])
260 );
261 DEFINE(TASK_THREAD_F20_F0,
262 offsetof(struct task_struct, thread.fstate.f[20])
263 - offsetof(struct task_struct, thread.fstate.f[0])
264 );
265 DEFINE(TASK_THREAD_F21_F0,
266 offsetof(struct task_struct, thread.fstate.f[21])
267 - offsetof(struct task_struct, thread.fstate.f[0])
268 );
269 DEFINE(TASK_THREAD_F22_F0,
270 offsetof(struct task_struct, thread.fstate.f[22])
271 - offsetof(struct task_struct, thread.fstate.f[0])
272 );
273 DEFINE(TASK_THREAD_F23_F0,
274 offsetof(struct task_struct, thread.fstate.f[23])
275 - offsetof(struct task_struct, thread.fstate.f[0])
276 );
277 DEFINE(TASK_THREAD_F24_F0,
278 offsetof(struct task_struct, thread.fstate.f[24])
279 - offsetof(struct task_struct, thread.fstate.f[0])
280 );
281 DEFINE(TASK_THREAD_F25_F0,
282 offsetof(struct task_struct, thread.fstate.f[25])
283 - offsetof(struct task_struct, thread.fstate.f[0])
284 );
285 DEFINE(TASK_THREAD_F26_F0,
286 offsetof(struct task_struct, thread.fstate.f[26])
287 - offsetof(struct task_struct, thread.fstate.f[0])
288 );
289 DEFINE(TASK_THREAD_F27_F0,
290 offsetof(struct task_struct, thread.fstate.f[27])
291 - offsetof(struct task_struct, thread.fstate.f[0])
292 );
293 DEFINE(TASK_THREAD_F28_F0,
294 offsetof(struct task_struct, thread.fstate.f[28])
295 - offsetof(struct task_struct, thread.fstate.f[0])
296 );
297 DEFINE(TASK_THREAD_F29_F0,
298 offsetof(struct task_struct, thread.fstate.f[29])
299 - offsetof(struct task_struct, thread.fstate.f[0])
300 );
301 DEFINE(TASK_THREAD_F30_F0,
302 offsetof(struct task_struct, thread.fstate.f[30])
303 - offsetof(struct task_struct, thread.fstate.f[0])
304 );
305 DEFINE(TASK_THREAD_F31_F0,
306 offsetof(struct task_struct, thread.fstate.f[31])
307 - offsetof(struct task_struct, thread.fstate.f[0])
308 );
309 DEFINE(TASK_THREAD_FCSR_F0,
310 offsetof(struct task_struct, thread.fstate.fcsr)
311 - offsetof(struct task_struct, thread.fstate.f[0])
312 );
313
314 /* The assembler needs access to THREAD_SIZE as well. */
315 DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
316
317 /*
318 * We allocate a pt_regs on the stack when entering the kernel. This
319 * ensures the alignment is sane.
320 */
321 DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
322}
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
new file mode 100644
index 000000000000..10ed2749e246
--- /dev/null
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright (C) 2017 SiFive
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/cacheinfo.h>
15#include <linux/cpu.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18
19static void ci_leaf_init(struct cacheinfo *this_leaf,
20 struct device_node *node,
21 enum cache_type type, unsigned int level)
22{
23 this_leaf->of_node = node;
24 this_leaf->level = level;
25 this_leaf->type = type;
26 /* not a sector cache */
27 this_leaf->physical_line_partition = 1;
28 /* TODO: Add to DTS */
29 this_leaf->attributes =
30 CACHE_WRITE_BACK
31 | CACHE_READ_ALLOCATE
32 | CACHE_WRITE_ALLOCATE;
33}
34
35static int __init_cache_level(unsigned int cpu)
36{
37 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
38 struct device_node *np = of_cpu_device_node_get(cpu);
39 int levels = 0, leaves = 0, level;
40
41 if (of_property_read_bool(np, "cache-size"))
42 ++leaves;
43 if (of_property_read_bool(np, "i-cache-size"))
44 ++leaves;
45 if (of_property_read_bool(np, "d-cache-size"))
46 ++leaves;
47 if (leaves > 0)
48 levels = 1;
49
50 while ((np = of_find_next_cache_node(np))) {
51 if (!of_device_is_compatible(np, "cache"))
52 break;
53 if (of_property_read_u32(np, "cache-level", &level))
54 break;
55 if (level <= levels)
56 break;
57 if (of_property_read_bool(np, "cache-size"))
58 ++leaves;
59 if (of_property_read_bool(np, "i-cache-size"))
60 ++leaves;
61 if (of_property_read_bool(np, "d-cache-size"))
62 ++leaves;
63 levels = level;
64 }
65
66 this_cpu_ci->num_levels = levels;
67 this_cpu_ci->num_leaves = leaves;
68 return 0;
69}
70
71static int __populate_cache_leaves(unsigned int cpu)
72{
73 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
74 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
75 struct device_node *np = of_cpu_device_node_get(cpu);
76 int levels = 1, level = 1;
77
78 if (of_property_read_bool(np, "cache-size"))
79 ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
80 if (of_property_read_bool(np, "i-cache-size"))
81 ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
82 if (of_property_read_bool(np, "d-cache-size"))
83 ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
84
85 while ((np = of_find_next_cache_node(np))) {
86 if (!of_device_is_compatible(np, "cache"))
87 break;
88 if (of_property_read_u32(np, "cache-level", &level))
89 break;
90 if (level <= levels)
91 break;
92 if (of_property_read_bool(np, "cache-size"))
93 ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
94 if (of_property_read_bool(np, "i-cache-size"))
95 ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
96 if (of_property_read_bool(np, "d-cache-size"))
97 ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
98 levels = level;
99 }
100
101 return 0;
102}
103
104DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
105DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
new file mode 100644
index 000000000000..ca6c81e54e37
--- /dev/null
+++ b/arch/riscv/kernel/cpu.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/seq_file.h>
16#include <linux/of.h>
17
18/* Return -1 if not a valid hart */
19int riscv_of_processor_hart(struct device_node *node)
20{
21 const char *isa, *status;
22 u32 hart;
23
24 if (!of_device_is_compatible(node, "riscv")) {
25 pr_warn("Found incompatible CPU\n");
26 return -(ENODEV);
27 }
28
29 if (of_property_read_u32(node, "reg", &hart)) {
30 pr_warn("Found CPU without hart ID\n");
31 return -(ENODEV);
32 }
33 if (hart >= NR_CPUS) {
34 pr_info("Found hart ID %d, which is above NR_CPUs. Disabling this hart\n", hart);
35 return -(ENODEV);
36 }
37
38 if (of_property_read_string(node, "status", &status)) {
39 pr_warn("CPU with hartid=%d has no \"status\" property\n", hart);
40 return -(ENODEV);
41 }
42 if (strcmp(status, "okay")) {
43 pr_info("CPU with hartid=%d has a non-okay status of \"%s\"\n", hart, status);
44 return -(ENODEV);
45 }
46
47 if (of_property_read_string(node, "riscv,isa", &isa)) {
48 pr_warn("CPU with hartid=%d has no \"riscv,isa\" property\n", hart);
49 return -(ENODEV);
50 }
51 if (isa[0] != 'r' || isa[1] != 'v') {
52 pr_warn("CPU with hartid=%d has an invalid ISA of \"%s\"\n", hart, isa);
53 return -(ENODEV);
54 }
55
56 return hart;
57}
58
59#ifdef CONFIG_PROC_FS
60
61static void *c_start(struct seq_file *m, loff_t *pos)
62{
63 *pos = cpumask_next(*pos - 1, cpu_online_mask);
64 if ((*pos) < nr_cpu_ids)
65 return (void *)(uintptr_t)(1 + *pos);
66 return NULL;
67}
68
69static void *c_next(struct seq_file *m, void *v, loff_t *pos)
70{
71 (*pos)++;
72 return c_start(m, pos);
73}
74
75static void c_stop(struct seq_file *m, void *v)
76{
77}
78
79static int c_show(struct seq_file *m, void *v)
80{
81 unsigned long hart_id = (unsigned long)v - 1;
82 struct device_node *node = of_get_cpu_node(hart_id, NULL);
83 const char *compat, *isa, *mmu;
84
85 seq_printf(m, "hart\t: %lu\n", hart_id);
86 if (!of_property_read_string(node, "riscv,isa", &isa)
87 && isa[0] == 'r'
88 && isa[1] == 'v')
89 seq_printf(m, "isa\t: %s\n", isa);
90 if (!of_property_read_string(node, "mmu-type", &mmu)
91 && !strncmp(mmu, "riscv,", 6))
92 seq_printf(m, "mmu\t: %s\n", mmu+6);
93 if (!of_property_read_string(node, "compatible", &compat)
94 && strcmp(compat, "riscv"))
95 seq_printf(m, "uarch\t: %s\n", compat);
96 seq_puts(m, "\n");
97
98 return 0;
99}
100
101const struct seq_operations cpuinfo_op = {
102 .start = c_start,
103 .next = c_next,
104 .stop = c_stop,
105 .show = c_show
106};
107
108#endif /* CONFIG_PROC_FS */
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
new file mode 100644
index 000000000000..17011a870044
--- /dev/null
+++ b/arch/riscv/kernel/cpufeature.c
@@ -0,0 +1,61 @@
1/*
2 * Copied from arch/arm64/kernel/cpufeature.c
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 * Copyright (C) 2017 SiFive
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/of.h>
21#include <asm/processor.h>
22#include <asm/hwcap.h>
23
24unsigned long elf_hwcap __read_mostly;
25
26void riscv_fill_hwcap(void)
27{
28 struct device_node *node;
29 const char *isa;
30 size_t i;
31 static unsigned long isa2hwcap[256] = {0};
32
33 isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
34 isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
35 isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A;
36 isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
37 isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
38 isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
39
40 elf_hwcap = 0;
41
42 /*
43 * We don't support running Linux on hertergenous ISA systems. For
44 * now, we just check the ISA of the first processor.
45 */
46 node = of_find_node_by_type(NULL, "cpu");
47 if (!node) {
48 pr_warning("Unable to find \"cpu\" devicetree entry");
49 return;
50 }
51
52 if (of_property_read_string(node, "riscv,isa", &isa)) {
53 pr_warning("Unable to find \"riscv,isa\" devicetree entry");
54 return;
55 }
56
57 for (i = 0; i < strlen(isa); ++i)
58 elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
59
60 pr_info("elf_hwcap is 0x%lx", elf_hwcap);
61}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
new file mode 100644
index 000000000000..20ee86f782a9
--- /dev/null
+++ b/arch/riscv/kernel/entry.S
@@ -0,0 +1,464 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/init.h>
16#include <linux/linkage.h>
17
18#include <asm/asm.h>
19#include <asm/csr.h>
20#include <asm/unistd.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23
24 .text
25 .altmacro
26
27/*
28 * Prepares to enter a system call or exception by saving all registers to the
29 * stack.
30 */
31 .macro SAVE_ALL
32 LOCAL _restore_kernel_tpsp
33 LOCAL _save_context
34
35 /*
36 * If coming from userspace, preserve the user thread pointer and load
37 * the kernel thread pointer. If we came from the kernel, sscratch
38 * will contain 0, and we should continue on the current TP.
39 */
40 csrrw tp, sscratch, tp
41 bnez tp, _save_context
42
43_restore_kernel_tpsp:
44 csrr tp, sscratch
45 REG_S sp, TASK_TI_KERNEL_SP(tp)
46_save_context:
47 REG_S sp, TASK_TI_USER_SP(tp)
48 REG_L sp, TASK_TI_KERNEL_SP(tp)
49 addi sp, sp, -(PT_SIZE_ON_STACK)
50 REG_S x1, PT_RA(sp)
51 REG_S x3, PT_GP(sp)
52 REG_S x5, PT_T0(sp)
53 REG_S x6, PT_T1(sp)
54 REG_S x7, PT_T2(sp)
55 REG_S x8, PT_S0(sp)
56 REG_S x9, PT_S1(sp)
57 REG_S x10, PT_A0(sp)
58 REG_S x11, PT_A1(sp)
59 REG_S x12, PT_A2(sp)
60 REG_S x13, PT_A3(sp)
61 REG_S x14, PT_A4(sp)
62 REG_S x15, PT_A5(sp)
63 REG_S x16, PT_A6(sp)
64 REG_S x17, PT_A7(sp)
65 REG_S x18, PT_S2(sp)
66 REG_S x19, PT_S3(sp)
67 REG_S x20, PT_S4(sp)
68 REG_S x21, PT_S5(sp)
69 REG_S x22, PT_S6(sp)
70 REG_S x23, PT_S7(sp)
71 REG_S x24, PT_S8(sp)
72 REG_S x25, PT_S9(sp)
73 REG_S x26, PT_S10(sp)
74 REG_S x27, PT_S11(sp)
75 REG_S x28, PT_T3(sp)
76 REG_S x29, PT_T4(sp)
77 REG_S x30, PT_T5(sp)
78 REG_S x31, PT_T6(sp)
79
80 /*
81 * Disable FPU to detect illegal usage of
82 * floating point in kernel space
83 */
84 li t0, SR_FS
85
86 REG_L s0, TASK_TI_USER_SP(tp)
87 csrrc s1, sstatus, t0
88 csrr s2, sepc
89 csrr s3, sbadaddr
90 csrr s4, scause
91 csrr s5, sscratch
92 REG_S s0, PT_SP(sp)
93 REG_S s1, PT_SSTATUS(sp)
94 REG_S s2, PT_SEPC(sp)
95 REG_S s3, PT_SBADADDR(sp)
96 REG_S s4, PT_SCAUSE(sp)
97 REG_S s5, PT_TP(sp)
98 .endm
99
100/*
101 * Prepares to return from a system call or exception by restoring all
102 * registers from the stack.
103 */
104 .macro RESTORE_ALL
105 REG_L a0, PT_SSTATUS(sp)
106 REG_L a2, PT_SEPC(sp)
107 csrw sstatus, a0
108 csrw sepc, a2
109
110 REG_L x1, PT_RA(sp)
111 REG_L x3, PT_GP(sp)
112 REG_L x4, PT_TP(sp)
113 REG_L x5, PT_T0(sp)
114 REG_L x6, PT_T1(sp)
115 REG_L x7, PT_T2(sp)
116 REG_L x8, PT_S0(sp)
117 REG_L x9, PT_S1(sp)
118 REG_L x10, PT_A0(sp)
119 REG_L x11, PT_A1(sp)
120 REG_L x12, PT_A2(sp)
121 REG_L x13, PT_A3(sp)
122 REG_L x14, PT_A4(sp)
123 REG_L x15, PT_A5(sp)
124 REG_L x16, PT_A6(sp)
125 REG_L x17, PT_A7(sp)
126 REG_L x18, PT_S2(sp)
127 REG_L x19, PT_S3(sp)
128 REG_L x20, PT_S4(sp)
129 REG_L x21, PT_S5(sp)
130 REG_L x22, PT_S6(sp)
131 REG_L x23, PT_S7(sp)
132 REG_L x24, PT_S8(sp)
133 REG_L x25, PT_S9(sp)
134 REG_L x26, PT_S10(sp)
135 REG_L x27, PT_S11(sp)
136 REG_L x28, PT_T3(sp)
137 REG_L x29, PT_T4(sp)
138 REG_L x30, PT_T5(sp)
139 REG_L x31, PT_T6(sp)
140
141 REG_L x2, PT_SP(sp)
142 .endm
143
144ENTRY(handle_exception)
145 SAVE_ALL
146
147 /*
148 * Set sscratch register to 0, so that if a recursive exception
149 * occurs, the exception vector knows it came from the kernel
150 */
151 csrw sscratch, x0
152
153 /* Load the global pointer */
154.option push
155.option norelax
156 la gp, __global_pointer$
157.option pop
158
159 la ra, ret_from_exception
160 /*
161 * MSB of cause differentiates between
162 * interrupts and exceptions
163 */
164 bge s4, zero, 1f
165
166 /* Handle interrupts */
167 slli a0, s4, 1
168 srli a0, a0, 1
169 move a1, sp /* pt_regs */
170 tail do_IRQ
1711:
172 /* Handle syscalls */
173 li t0, EXC_SYSCALL
174 beq s4, t0, handle_syscall
175
176 /* Handle other exceptions */
177 slli t0, s4, RISCV_LGPTR
178 la t1, excp_vect_table
179 la t2, excp_vect_table_end
180 move a0, sp /* pt_regs */
181 add t0, t1, t0
182 /* Check if exception code lies within bounds */
183 bgeu t0, t2, 1f
184 REG_L t0, 0(t0)
185 jr t0
1861:
187 tail do_trap_unknown
188
189handle_syscall:
190 /* save the initial A0 value (needed in signal handlers) */
191 REG_S a0, PT_ORIG_A0(sp)
192 /*
193 * Advance SEPC to avoid executing the original
194 * scall instruction on sret
195 */
196 addi s2, s2, 0x4
197 REG_S s2, PT_SEPC(sp)
198 /* System calls run with interrupts enabled */
199 csrs sstatus, SR_IE
200 /* Trace syscalls, but only if requested by the user. */
201 REG_L t0, TASK_TI_FLAGS(tp)
202 andi t0, t0, _TIF_SYSCALL_TRACE
203 bnez t0, handle_syscall_trace_enter
204check_syscall_nr:
205 /* Check to make sure we don't jump to a bogus syscall number. */
206 li t0, __NR_syscalls
207 la s0, sys_ni_syscall
208 /* Syscall number held in a7 */
209 bgeu a7, t0, 1f
210 la s0, sys_call_table
211 slli t0, a7, RISCV_LGPTR
212 add s0, s0, t0
213 REG_L s0, 0(s0)
2141:
215 jalr s0
216
217ret_from_syscall:
218 /* Set user a0 to kernel a0 */
219 REG_S a0, PT_A0(sp)
220 /* Trace syscalls, but only if requested by the user. */
221 REG_L t0, TASK_TI_FLAGS(tp)
222 andi t0, t0, _TIF_SYSCALL_TRACE
223 bnez t0, handle_syscall_trace_exit
224
225ret_from_exception:
226 REG_L s0, PT_SSTATUS(sp)
227 csrc sstatus, SR_IE
228 andi s0, s0, SR_PS
229 bnez s0, restore_all
230
231resume_userspace:
232 /* Interrupts must be disabled here so flags are checked atomically */
233 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
234 andi s1, s0, _TIF_WORK_MASK
235 bnez s1, work_pending
236
237 /* Save unwound kernel stack pointer in thread_info */
238 addi s0, sp, PT_SIZE_ON_STACK
239 REG_S s0, TASK_TI_KERNEL_SP(tp)
240
241 /*
242 * Save TP into sscratch, so we can find the kernel data structures
243 * again.
244 */
245 csrw sscratch, tp
246
247restore_all:
248 RESTORE_ALL
249 sret
250
251work_pending:
252 /* Enter slow path for supplementary processing */
253 la ra, ret_from_exception
254 andi s1, s0, _TIF_NEED_RESCHED
255 bnez s1, work_resched
256work_notifysig:
257 /* Handle pending signals and notify-resume requests */
258 csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
259 move a0, sp /* pt_regs */
260 move a1, s0 /* current_thread_info->flags */
261 tail do_notify_resume
262work_resched:
263 tail schedule
264
265/* Slow paths for ptrace. */
266handle_syscall_trace_enter:
267 move a0, sp
268 call do_syscall_trace_enter
269 REG_L a0, PT_A0(sp)
270 REG_L a1, PT_A1(sp)
271 REG_L a2, PT_A2(sp)
272 REG_L a3, PT_A3(sp)
273 REG_L a4, PT_A4(sp)
274 REG_L a5, PT_A5(sp)
275 REG_L a6, PT_A6(sp)
276 REG_L a7, PT_A7(sp)
277 j check_syscall_nr
278handle_syscall_trace_exit:
279 move a0, sp
280 call do_syscall_trace_exit
281 j ret_from_exception
282
283END(handle_exception)
284
285ENTRY(ret_from_fork)
286 la ra, ret_from_exception
287 tail schedule_tail
288ENDPROC(ret_from_fork)
289
290ENTRY(ret_from_kernel_thread)
291 call schedule_tail
292 /* Call fn(arg) */
293 la ra, ret_from_exception
294 move a0, s1
295 jr s0
296ENDPROC(ret_from_kernel_thread)
297
298
299/*
300 * Integer register context switch
301 * The callee-saved registers must be saved and restored.
302 *
303 * a0: previous task_struct (must be preserved across the switch)
304 * a1: next task_struct
305 *
306 * The value of a0 and a1 must be preserved by this function, as that's how
307 * arguments are passed to schedule_tail.
308 */
309ENTRY(__switch_to)
310 /* Save context into prev->thread */
311 li a4, TASK_THREAD_RA
312 add a3, a0, a4
313 add a4, a1, a4
314 REG_S ra, TASK_THREAD_RA_RA(a3)
315 REG_S sp, TASK_THREAD_SP_RA(a3)
316 REG_S s0, TASK_THREAD_S0_RA(a3)
317 REG_S s1, TASK_THREAD_S1_RA(a3)
318 REG_S s2, TASK_THREAD_S2_RA(a3)
319 REG_S s3, TASK_THREAD_S3_RA(a3)
320 REG_S s4, TASK_THREAD_S4_RA(a3)
321 REG_S s5, TASK_THREAD_S5_RA(a3)
322 REG_S s6, TASK_THREAD_S6_RA(a3)
323 REG_S s7, TASK_THREAD_S7_RA(a3)
324 REG_S s8, TASK_THREAD_S8_RA(a3)
325 REG_S s9, TASK_THREAD_S9_RA(a3)
326 REG_S s10, TASK_THREAD_S10_RA(a3)
327 REG_S s11, TASK_THREAD_S11_RA(a3)
328 /* Restore context from next->thread */
329 REG_L ra, TASK_THREAD_RA_RA(a4)
330 REG_L sp, TASK_THREAD_SP_RA(a4)
331 REG_L s0, TASK_THREAD_S0_RA(a4)
332 REG_L s1, TASK_THREAD_S1_RA(a4)
333 REG_L s2, TASK_THREAD_S2_RA(a4)
334 REG_L s3, TASK_THREAD_S3_RA(a4)
335 REG_L s4, TASK_THREAD_S4_RA(a4)
336 REG_L s5, TASK_THREAD_S5_RA(a4)
337 REG_L s6, TASK_THREAD_S6_RA(a4)
338 REG_L s7, TASK_THREAD_S7_RA(a4)
339 REG_L s8, TASK_THREAD_S8_RA(a4)
340 REG_L s9, TASK_THREAD_S9_RA(a4)
341 REG_L s10, TASK_THREAD_S10_RA(a4)
342 REG_L s11, TASK_THREAD_S11_RA(a4)
343 /* Swap the CPU entry around. */
344 lw a3, TASK_TI_CPU(a0)
345 lw a4, TASK_TI_CPU(a1)
346 sw a3, TASK_TI_CPU(a1)
347 sw a4, TASK_TI_CPU(a0)
348#if TASK_TI != 0
349#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
350 addi tp, a1, TASK_TI
351#else
352 move tp, a1
353#endif
354 ret
355ENDPROC(__switch_to)
356
357ENTRY(__fstate_save)
358 li a2, TASK_THREAD_F0
359 add a0, a0, a2
360 li t1, SR_FS
361 csrs sstatus, t1
362 frcsr t0
363 fsd f0, TASK_THREAD_F0_F0(a0)
364 fsd f1, TASK_THREAD_F1_F0(a0)
365 fsd f2, TASK_THREAD_F2_F0(a0)
366 fsd f3, TASK_THREAD_F3_F0(a0)
367 fsd f4, TASK_THREAD_F4_F0(a0)
368 fsd f5, TASK_THREAD_F5_F0(a0)
369 fsd f6, TASK_THREAD_F6_F0(a0)
370 fsd f7, TASK_THREAD_F7_F0(a0)
371 fsd f8, TASK_THREAD_F8_F0(a0)
372 fsd f9, TASK_THREAD_F9_F0(a0)
373 fsd f10, TASK_THREAD_F10_F0(a0)
374 fsd f11, TASK_THREAD_F11_F0(a0)
375 fsd f12, TASK_THREAD_F12_F0(a0)
376 fsd f13, TASK_THREAD_F13_F0(a0)
377 fsd f14, TASK_THREAD_F14_F0(a0)
378 fsd f15, TASK_THREAD_F15_F0(a0)
379 fsd f16, TASK_THREAD_F16_F0(a0)
380 fsd f17, TASK_THREAD_F17_F0(a0)
381 fsd f18, TASK_THREAD_F18_F0(a0)
382 fsd f19, TASK_THREAD_F19_F0(a0)
383 fsd f20, TASK_THREAD_F20_F0(a0)
384 fsd f21, TASK_THREAD_F21_F0(a0)
385 fsd f22, TASK_THREAD_F22_F0(a0)
386 fsd f23, TASK_THREAD_F23_F0(a0)
387 fsd f24, TASK_THREAD_F24_F0(a0)
388 fsd f25, TASK_THREAD_F25_F0(a0)
389 fsd f26, TASK_THREAD_F26_F0(a0)
390 fsd f27, TASK_THREAD_F27_F0(a0)
391 fsd f28, TASK_THREAD_F28_F0(a0)
392 fsd f29, TASK_THREAD_F29_F0(a0)
393 fsd f30, TASK_THREAD_F30_F0(a0)
394 fsd f31, TASK_THREAD_F31_F0(a0)
395 sw t0, TASK_THREAD_FCSR_F0(a0)
396 csrc sstatus, t1
397 ret
398ENDPROC(__fstate_save)
399
400ENTRY(__fstate_restore)
401 li a2, TASK_THREAD_F0
402 add a0, a0, a2
403 li t1, SR_FS
404 lw t0, TASK_THREAD_FCSR_F0(a0)
405 csrs sstatus, t1
406 fld f0, TASK_THREAD_F0_F0(a0)
407 fld f1, TASK_THREAD_F1_F0(a0)
408 fld f2, TASK_THREAD_F2_F0(a0)
409 fld f3, TASK_THREAD_F3_F0(a0)
410 fld f4, TASK_THREAD_F4_F0(a0)
411 fld f5, TASK_THREAD_F5_F0(a0)
412 fld f6, TASK_THREAD_F6_F0(a0)
413 fld f7, TASK_THREAD_F7_F0(a0)
414 fld f8, TASK_THREAD_F8_F0(a0)
415 fld f9, TASK_THREAD_F9_F0(a0)
416 fld f10, TASK_THREAD_F10_F0(a0)
417 fld f11, TASK_THREAD_F11_F0(a0)
418 fld f12, TASK_THREAD_F12_F0(a0)
419 fld f13, TASK_THREAD_F13_F0(a0)
420 fld f14, TASK_THREAD_F14_F0(a0)
421 fld f15, TASK_THREAD_F15_F0(a0)
422 fld f16, TASK_THREAD_F16_F0(a0)
423 fld f17, TASK_THREAD_F17_F0(a0)
424 fld f18, TASK_THREAD_F18_F0(a0)
425 fld f19, TASK_THREAD_F19_F0(a0)
426 fld f20, TASK_THREAD_F20_F0(a0)
427 fld f21, TASK_THREAD_F21_F0(a0)
428 fld f22, TASK_THREAD_F22_F0(a0)
429 fld f23, TASK_THREAD_F23_F0(a0)
430 fld f24, TASK_THREAD_F24_F0(a0)
431 fld f25, TASK_THREAD_F25_F0(a0)
432 fld f26, TASK_THREAD_F26_F0(a0)
433 fld f27, TASK_THREAD_F27_F0(a0)
434 fld f28, TASK_THREAD_F28_F0(a0)
435 fld f29, TASK_THREAD_F29_F0(a0)
436 fld f30, TASK_THREAD_F30_F0(a0)
437 fld f31, TASK_THREAD_F31_F0(a0)
438 fscsr t0
439 csrc sstatus, t1
440 ret
441ENDPROC(__fstate_restore)
442
443
444 .section ".rodata"
445 /* Exception vector table */
446ENTRY(excp_vect_table)
447 RISCV_PTR do_trap_insn_misaligned
448 RISCV_PTR do_trap_insn_fault
449 RISCV_PTR do_trap_insn_illegal
450 RISCV_PTR do_trap_break
451 RISCV_PTR do_trap_load_misaligned
452 RISCV_PTR do_trap_load_fault
453 RISCV_PTR do_trap_store_misaligned
454 RISCV_PTR do_trap_store_fault
455 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
456 RISCV_PTR do_trap_ecall_s
457 RISCV_PTR do_trap_unknown
458 RISCV_PTR do_trap_ecall_m
459 RISCV_PTR do_page_fault /* instruction page fault */
460 RISCV_PTR do_page_fault /* load page fault */
461 RISCV_PTR do_trap_unknown
462 RISCV_PTR do_page_fault /* store page fault */
463excp_vect_table_end:
464END(excp_vect_table)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
new file mode 100644
index 000000000000..76af908f87c1
--- /dev/null
+++ b/arch/riscv/kernel/head.S
@@ -0,0 +1,157 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <asm/thread_info.h>
15#include <asm/asm-offsets.h>
16#include <asm/asm.h>
17#include <linux/init.h>
18#include <linux/linkage.h>
19#include <asm/thread_info.h>
20#include <asm/page.h>
21#include <asm/csr.h>
22
23__INIT
24ENTRY(_start)
25 /* Mask all interrupts */
26 csrw sie, zero
27
28 /* Load the global pointer */
29.option push
30.option norelax
31 la gp, __global_pointer$
32.option pop
33
34 /*
35 * Disable FPU to detect illegal usage of
36 * floating point in kernel space
37 */
38 li t0, SR_FS
39 csrc sstatus, t0
40
41 /* Pick one hart to run the main boot sequence */
42 la a3, hart_lottery
43 li a2, 1
44 amoadd.w a3, a2, (a3)
45 bnez a3, .Lsecondary_start
46
47 /* Save hart ID and DTB physical address */
48 mv s0, a0
49 mv s1, a1
50
51 /* Initialize page tables and relocate to virtual addresses */
52 la sp, init_thread_union + THREAD_SIZE
53 call setup_vm
54 call relocate
55
56 /* Restore C environment */
57 la tp, init_task
58 sw s0, TASK_TI_CPU(tp)
59
60 la sp, init_thread_union
61 li a0, ASM_THREAD_SIZE
62 add sp, sp, a0
63
64 /* Start the kernel */
65 mv a0, s0
66 mv a1, s1
67 call sbi_save
68 tail start_kernel
69
70relocate:
71 /* Relocate return address */
72 li a1, PAGE_OFFSET
73 la a0, _start
74 sub a1, a1, a0
75 add ra, ra, a1
76
77 /* Point stvec to virtual address of intruction after sptbr write */
78 la a0, 1f
79 add a0, a0, a1
80 csrw stvec, a0
81
82 /* Compute sptbr for kernel page tables, but don't load it yet */
83 la a2, swapper_pg_dir
84 srl a2, a2, PAGE_SHIFT
85 li a1, SPTBR_MODE
86 or a2, a2, a1
87
88 /*
89 * Load trampoline page directory, which will cause us to trap to
90 * stvec if VA != PA, or simply fall through if VA == PA
91 */
92 la a0, trampoline_pg_dir
93 srl a0, a0, PAGE_SHIFT
94 or a0, a0, a1
95 sfence.vma
96 csrw sptbr, a0
971:
98 /* Set trap vector to spin forever to help debug */
99 la a0, .Lsecondary_park
100 csrw stvec, a0
101
102 /* Reload the global pointer */
103.option push
104.option norelax
105 la gp, __global_pointer$
106.option pop
107
108 /* Switch to kernel page tables */
109 csrw sptbr, a2
110
111 ret
112
113.Lsecondary_start:
114#ifdef CONFIG_SMP
115 li a1, CONFIG_NR_CPUS
116 bgeu a0, a1, .Lsecondary_park
117
118 /* Set trap vector to spin forever to help debug */
119 la a3, .Lsecondary_park
120 csrw stvec, a3
121
122 slli a3, a0, LGREG
123 la a1, __cpu_up_stack_pointer
124 la a2, __cpu_up_task_pointer
125 add a1, a3, a1
126 add a2, a3, a2
127
128 /*
129 * This hart didn't win the lottery, so we wait for the winning hart to
130 * get far enough along the boot process that it should continue.
131 */
132.Lwait_for_cpu_up:
133 /* FIXME: We should WFI to save some energy here. */
134 REG_L sp, (a1)
135 REG_L tp, (a2)
136 beqz sp, .Lwait_for_cpu_up
137 beqz tp, .Lwait_for_cpu_up
138 fence
139
140 /* Enable virtual memory and relocate to virtual address */
141 call relocate
142
143 tail smp_callin
144#endif
145
146.Lsecondary_park:
147 /* We lack SMP support or have too many harts, so park this hart */
148 wfi
149 j .Lsecondary_park
150END(_start)
151
152__PAGE_ALIGNED_BSS
153 /* Empty zero page */
154 .balign PAGE_SIZE
155ENTRY(empty_zero_page)
156 .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00
157END(empty_zero_page)
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
new file mode 100644
index 000000000000..328718e8026e
--- /dev/null
+++ b/arch/riscv/kernel/irq.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/interrupt.h>
16#include <linux/irqchip.h>
17#include <linux/irqdomain.h>
18
19#ifdef CONFIG_RISCV_INTC
20#include <linux/irqchip/irq-riscv-intc.h>
21#endif
22
23void __init init_IRQ(void)
24{
25 irqchip_init();
26}
27
28asmlinkage void __irq_entry do_IRQ(unsigned int cause, struct pt_regs *regs)
29{
30#ifdef CONFIG_RISCV_INTC
31 /*
32 * FIXME: We don't want a direct call to riscv_intc_irq here. The plan
33 * is to put an IRQ domain here and let the interrupt controller
34 * register with that, but I poked around the arm64 code a bit and
35 * there might be a better way to do it (ie, something fully generic).
36 */
37 riscv_intc_irq(cause, regs);
38#endif
39}
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
new file mode 100644
index 000000000000..e0f05034fc21
--- /dev/null
+++ b/arch/riscv/kernel/module.c
@@ -0,0 +1,217 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * Copyright (C) 2017 Zihao Yu
13 */
14
15#include <linux/elf.h>
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/moduleloader.h>
19
20static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
21{
22 *(u64 *)location = v;
23 return 0;
24}
25
26static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
27 Elf_Addr v)
28{
29 s64 offset = (void *)v - (void *)location;
30 u32 imm12 = (offset & 0x1000) << (31 - 12);
31 u32 imm11 = (offset & 0x800) >> (11 - 7);
32 u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
33 u32 imm4_1 = (offset & 0x1e) << (11 - 4);
34
35 *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1;
36 return 0;
37}
38
39static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
40 Elf_Addr v)
41{
42 s64 offset = (void *)v - (void *)location;
43 u32 imm20 = (offset & 0x100000) << (31 - 20);
44 u32 imm19_12 = (offset & 0xff000);
45 u32 imm11 = (offset & 0x800) << (20 - 11);
46 u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
47
48 *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1;
49 return 0;
50}
51
52static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
53 Elf_Addr v)
54{
55 s64 offset = (void *)v - (void *)location;
56 s32 hi20;
57
58 if (offset != (s32)offset) {
59 pr_err(
60 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
61 me->name, v, location);
62 return -EINVAL;
63 }
64
65 hi20 = (offset + 0x800) & 0xfffff000;
66 *location = (*location & 0xfff) | hi20;
67 return 0;
68}
69
70static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location,
71 Elf_Addr v)
72{
73 /*
74 * v is the lo12 value to fill. It is calculated before calling this
75 * handler.
76 */
77 *location = (*location & 0xfffff) | ((v & 0xfff) << 20);
78 return 0;
79}
80
81static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
82 Elf_Addr v)
83{
84 /*
85 * v is the lo12 value to fill. It is calculated before calling this
86 * handler.
87 */
88 u32 imm11_5 = (v & 0xfe0) << (31 - 11);
89 u32 imm4_0 = (v & 0x1f) << (11 - 4);
90
91 *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
92 return 0;
93}
94
95static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
96 Elf_Addr v)
97{
98 s64 offset = (void *)v - (void *)location;
99 s32 fill_v = offset;
100 u32 hi20, lo12;
101
102 if (offset != fill_v) {
103 pr_err(
104 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
105 me->name, v, location);
106 return -EINVAL;
107 }
108
109 hi20 = (offset + 0x800) & 0xfffff000;
110 lo12 = (offset - hi20) & 0xfff;
111 *location = (*location & 0xfff) | hi20;
112 *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
113 return 0;
114}
115
116static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
117 Elf_Addr v)
118{
119 return 0;
120}
121
122static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
123 Elf_Addr v) = {
124 [R_RISCV_64] = apply_r_riscv_64_rela,
125 [R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
126 [R_RISCV_JAL] = apply_r_riscv_jal_rela,
127 [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
128 [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
129 [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela,
130 [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela,
131 [R_RISCV_RELAX] = apply_r_riscv_relax_rela,
132};
133
134int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
135 unsigned int symindex, unsigned int relsec,
136 struct module *me)
137{
138 Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
139 int (*handler)(struct module *me, u32 *location, Elf_Addr v);
140 Elf_Sym *sym;
141 u32 *location;
142 unsigned int i, type;
143 Elf_Addr v;
144 int res;
145
146 pr_debug("Applying relocate section %u to %u\n", relsec,
147 sechdrs[relsec].sh_info);
148
149 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
150 /* This is where to make the change */
151 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
152 + rel[i].r_offset;
153 /* This is the symbol it is referring to */
154 sym = (Elf_Sym *)sechdrs[symindex].sh_addr
155 + ELF_RISCV_R_SYM(rel[i].r_info);
156 if (IS_ERR_VALUE(sym->st_value)) {
157 /* Ignore unresolved weak symbol */
158 if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
159 continue;
160 pr_warning("%s: Unknown symbol %s\n",
161 me->name, strtab + sym->st_name);
162 return -ENOENT;
163 }
164
165 type = ELF_RISCV_R_TYPE(rel[i].r_info);
166
167 if (type < ARRAY_SIZE(reloc_handlers_rela))
168 handler = reloc_handlers_rela[type];
169 else
170 handler = NULL;
171
172 if (!handler) {
173 pr_err("%s: Unknown relocation type %u\n",
174 me->name, type);
175 return -EINVAL;
176 }
177
178 v = sym->st_value + rel[i].r_addend;
179
180 if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
181 unsigned int j;
182
183 for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
184 u64 hi20_loc =
185 sechdrs[sechdrs[relsec].sh_info].sh_addr
186 + rel[j].r_offset;
187 /* Find the corresponding HI20 PC-relative relocation entry */
188 if (hi20_loc == sym->st_value) {
189 Elf_Sym *hi20_sym =
190 (Elf_Sym *)sechdrs[symindex].sh_addr
191 + ELF_RISCV_R_SYM(rel[j].r_info);
192 u64 hi20_sym_val =
193 hi20_sym->st_value
194 + rel[j].r_addend;
195 /* Calculate lo12 */
196 s64 offset = hi20_sym_val - hi20_loc;
197 s32 hi20 = (offset + 0x800) & 0xfffff000;
198 s32 lo12 = offset - hi20;
199 v = lo12;
200 break;
201 }
202 }
203 if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
204 pr_err(
205 "%s: Can not find HI20 PC-relative relocation information\n",
206 me->name);
207 return -EINVAL;
208 }
209 }
210
211 res = handler(me, location, v);
212 if (res)
213 return res;
214 }
215
216 return 0;
217}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
new file mode 100644
index 000000000000..0d90dcc1fbd3
--- /dev/null
+++ b/arch/riscv/kernel/process.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
3 * Chen Liqin <liqin.chen@sunplusct.com>
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Copyright (C) 2012 Regents of the University of California
6 * Copyright (C) 2017 SiFive
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see the file COPYING, or write
20 * to the Free Software Foundation, Inc.,
21 */
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/sched/task_stack.h>
26#include <linux/tick.h>
27#include <linux/ptrace.h>
28
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/processor.h>
32#include <asm/csr.h>
33#include <asm/string.h>
34#include <asm/switch_to.h>
35
36extern asmlinkage void ret_from_fork(void);
37extern asmlinkage void ret_from_kernel_thread(void);
38
39void arch_cpu_idle(void)
40{
41 wait_for_interrupt();
42 local_irq_enable();
43}
44
45void show_regs(struct pt_regs *regs)
46{
47 show_regs_print_info(KERN_DEFAULT);
48
49 pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
50 regs->sepc, regs->ra, regs->sp);
51 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
52 regs->gp, regs->tp, regs->t0);
53 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
54 regs->t1, regs->t2, regs->s0);
55 pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
56 regs->s1, regs->a0, regs->a1);
57 pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
58 regs->a2, regs->a3, regs->a4);
59 pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
60 regs->a5, regs->a6, regs->a7);
61 pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
62 regs->s2, regs->s3, regs->s4);
63 pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
64 regs->s5, regs->s6, regs->s7);
65 pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
66 regs->s8, regs->s9, regs->s10);
67 pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
68 regs->s11, regs->t3, regs->t4);
69 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
70 regs->t5, regs->t6);
71
72 pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
73 regs->sstatus, regs->sbadaddr, regs->scause);
74}
75
76void start_thread(struct pt_regs *regs, unsigned long pc,
77 unsigned long sp)
78{
79 regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
80 regs->sepc = pc;
81 regs->sp = sp;
82 set_fs(USER_DS);
83}
84
85void flush_thread(void)
86{
87 /*
88 * Reset FPU context
89 * frm: round to nearest, ties to even (IEEE default)
90 * fflags: accrued exceptions cleared
91 */
92 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
93}
94
95int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
96{
97 fstate_save(src, task_pt_regs(src));
98 *dst = *src;
99 return 0;
100}
101
102int copy_thread(unsigned long clone_flags, unsigned long usp,
103 unsigned long arg, struct task_struct *p)
104{
105 struct pt_regs *childregs = task_pt_regs(p);
106
107 /* p->thread holds context to be restored by __switch_to() */
108 if (unlikely(p->flags & PF_KTHREAD)) {
109 /* Kernel thread */
110 const register unsigned long gp __asm__ ("gp");
111 memset(childregs, 0, sizeof(struct pt_regs));
112 childregs->gp = gp;
113 childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
114
115 p->thread.ra = (unsigned long)ret_from_kernel_thread;
116 p->thread.s[0] = usp; /* fn */
117 p->thread.s[1] = arg;
118 } else {
119 *childregs = *(current_pt_regs());
120 if (usp) /* User fork */
121 childregs->sp = usp;
122 if (clone_flags & CLONE_SETTLS)
123 childregs->tp = childregs->a5;
124 childregs->a0 = 0; /* Return value of fork() */
125 p->thread.ra = (unsigned long)ret_from_fork;
126 }
127 p->thread.sp = (unsigned long)childregs; /* kernel sp */
128 return 0;
129}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
new file mode 100644
index 000000000000..ba3e80712797
--- /dev/null
+++ b/arch/riscv/kernel/ptrace.c
@@ -0,0 +1,125 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 * Copyright 2015 Regents of the University of California
4 * Copyright 2017 SiFive
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Copied from arch/tile/kernel/ptrace.c
16 */
17
18#include <asm/ptrace.h>
19#include <asm/syscall.h>
20#include <asm/thread_info.h>
21#include <linux/ptrace.h>
22#include <linux/elf.h>
23#include <linux/regset.h>
24#include <linux/sched.h>
25#include <linux/sched/task_stack.h>
26#include <linux/tracehook.h>
27#include <trace/events/syscalls.h>
28
29enum riscv_regset {
30 REGSET_X,
31};
32
33static int riscv_gpr_get(struct task_struct *target,
34 const struct user_regset *regset,
35 unsigned int pos, unsigned int count,
36 void *kbuf, void __user *ubuf)
37{
38 struct pt_regs *regs;
39
40 regs = task_pt_regs(target);
41 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
42}
43
44static int riscv_gpr_set(struct task_struct *target,
45 const struct user_regset *regset,
46 unsigned int pos, unsigned int count,
47 const void *kbuf, const void __user *ubuf)
48{
49 int ret;
50 struct pt_regs *regs;
51
52 regs = task_pt_regs(target);
53 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
54 return ret;
55}
56
57
58static const struct user_regset riscv_user_regset[] = {
59 [REGSET_X] = {
60 .core_note_type = NT_PRSTATUS,
61 .n = ELF_NGREG,
62 .size = sizeof(elf_greg_t),
63 .align = sizeof(elf_greg_t),
64 .get = &riscv_gpr_get,
65 .set = &riscv_gpr_set,
66 },
67};
68
69static const struct user_regset_view riscv_user_native_view = {
70 .name = "riscv",
71 .e_machine = EM_RISCV,
72 .regsets = riscv_user_regset,
73 .n = ARRAY_SIZE(riscv_user_regset),
74};
75
76const struct user_regset_view *task_user_regset_view(struct task_struct *task)
77{
78 return &riscv_user_native_view;
79}
80
81void ptrace_disable(struct task_struct *child)
82{
83 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
84}
85
86long arch_ptrace(struct task_struct *child, long request,
87 unsigned long addr, unsigned long data)
88{
89 long ret = -EIO;
90
91 switch (request) {
92 default:
93 ret = ptrace_request(child, request, addr, data);
94 break;
95 }
96
97 return ret;
98}
99
100/*
101 * Allows PTRACE_SYSCALL to work. These are called from entry.S in
102 * {handle,ret_from}_syscall.
103 */
104void do_syscall_trace_enter(struct pt_regs *regs)
105{
106 if (test_thread_flag(TIF_SYSCALL_TRACE))
107 if (tracehook_report_syscall_entry(regs))
108 syscall_set_nr(current, regs, -1);
109
110#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
111 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
112 trace_sys_enter(regs, syscall_get_nr(current, regs));
113#endif
114}
115
116void do_syscall_trace_exit(struct pt_regs *regs)
117{
118 if (test_thread_flag(TIF_SYSCALL_TRACE))
119 tracehook_report_syscall_exit(regs, 0);
120
121#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
122 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
123 trace_sys_exit(regs, regs->regs[0]);
124#endif
125}
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
new file mode 100644
index 000000000000..2a53d26ffdd6
--- /dev/null
+++ b/arch/riscv/kernel/reset.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/reboot.h>
15#include <linux/export.h>
16#include <asm/sbi.h>
17
18void (*pm_power_off)(void) = machine_power_off;
19EXPORT_SYMBOL(pm_power_off);
20
21void machine_restart(char *cmd)
22{
23 do_kernel_restart(cmd);
24 while (1);
25}
26
27void machine_halt(void)
28{
29 machine_power_off();
30}
31
32void machine_power_off(void)
33{
34 sbi_shutdown();
35 while (1);
36}
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
new file mode 100644
index 000000000000..23cc81ec9e94
--- /dev/null
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -0,0 +1,15 @@
1/*
2 * Copyright (C) 2017 Zihao Yu
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/export.h>
10#include <linux/uaccess.h>
11
12/*
13 * Assembly functions that may be used (directly or indirectly) by modules
14 */
15EXPORT_SYMBOL(__copy_user);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
new file mode 100644
index 000000000000..de7db114c315
--- /dev/null
+++ b/arch/riscv/kernel/setup.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
3 * Chen Liqin <liqin.chen@sunplusct.com>
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Copyright (C) 2012 Regents of the University of California
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see the file COPYING, or write
19 * to the Free Software Foundation, Inc.,
20 */
21
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/memblock.h>
25#include <linux/sched.h>
26#include <linux/initrd.h>
27#include <linux/console.h>
28#include <linux/screen_info.h>
29#include <linux/of_fdt.h>
30#include <linux/of_platform.h>
31#include <linux/sched/task.h>
32
33#include <asm/setup.h>
34#include <asm/sections.h>
35#include <asm/pgtable.h>
36#include <asm/smp.h>
37#include <asm/sbi.h>
38#include <asm/tlbflush.h>
39#include <asm/thread_info.h>
40
41#ifdef CONFIG_HVC_RISCV_SBI
42#include <asm/hvc_riscv_sbi.h>
43#endif
44
45#ifdef CONFIG_DUMMY_CONSOLE
46struct screen_info screen_info = {
47 .orig_video_lines = 30,
48 .orig_video_cols = 80,
49 .orig_video_mode = 0,
50 .orig_video_ega_bx = 0,
51 .orig_video_isVGA = 1,
52 .orig_video_points = 8
53};
54#endif
55
56#ifdef CONFIG_CMDLINE_BOOL
57static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
58#endif /* CONFIG_CMDLINE_BOOL */
59
60unsigned long va_pa_offset;
61unsigned long pfn_base;
62
63/* The lucky hart to first increment this variable will boot the other cores */
64atomic_t hart_lottery;
65
66#ifdef CONFIG_BLK_DEV_INITRD
67static void __init setup_initrd(void)
68{
69 extern char __initramfs_start[];
70 extern unsigned long __initramfs_size;
71 unsigned long size;
72
73 if (__initramfs_size > 0) {
74 initrd_start = (unsigned long)(&__initramfs_start);
75 initrd_end = initrd_start + __initramfs_size;
76 }
77
78 if (initrd_start >= initrd_end) {
79 printk(KERN_INFO "initrd not found or empty");
80 goto disable;
81 }
82 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
83 printk(KERN_ERR "initrd extends beyond end of memory");
84 goto disable;
85 }
86
87 size = initrd_end - initrd_start;
88 memblock_reserve(__pa(initrd_start), size);
89 initrd_below_start_ok = 1;
90
91 printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n",
92 (void *)(initrd_start), size);
93 return;
94disable:
95 pr_cont(" - disabling initrd\n");
96 initrd_start = 0;
97 initrd_end = 0;
98}
99#endif /* CONFIG_BLK_DEV_INITRD */
100
101pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
102pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
103
104#ifndef __PAGETABLE_PMD_FOLDED
105#define NUM_SWAPPER_PMDS ((uintptr_t)-PAGE_OFFSET >> PGDIR_SHIFT)
106pmd_t swapper_pmd[PTRS_PER_PMD*((-PAGE_OFFSET)/PGDIR_SIZE)] __page_aligned_bss;
107pmd_t trampoline_pmd[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
108#endif
109
110asmlinkage void __init setup_vm(void)
111{
112 extern char _start;
113 uintptr_t i;
114 uintptr_t pa = (uintptr_t) &_start;
115 pgprot_t prot = __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_EXEC);
116
117 va_pa_offset = PAGE_OFFSET - pa;
118 pfn_base = PFN_DOWN(pa);
119
120 /* Sanity check alignment and size */
121 BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
122 BUG_ON((pa % (PAGE_SIZE * PTRS_PER_PTE)) != 0);
123
124#ifndef __PAGETABLE_PMD_FOLDED
125 trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] =
126 pfn_pgd(PFN_DOWN((uintptr_t)trampoline_pmd),
127 __pgprot(_PAGE_TABLE));
128 trampoline_pmd[0] = pfn_pmd(PFN_DOWN(pa), prot);
129
130 for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) {
131 size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i;
132 swapper_pg_dir[o] =
133 pfn_pgd(PFN_DOWN((uintptr_t)swapper_pmd) + i,
134 __pgprot(_PAGE_TABLE));
135 }
136 for (i = 0; i < ARRAY_SIZE(swapper_pmd); i++)
137 swapper_pmd[i] = pfn_pmd(PFN_DOWN(pa + i * PMD_SIZE), prot);
138#else
139 trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] =
140 pfn_pgd(PFN_DOWN(pa), prot);
141
142 for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) {
143 size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i;
144 swapper_pg_dir[o] =
145 pfn_pgd(PFN_DOWN(pa + i * PGDIR_SIZE), prot);
146 }
147#endif
148}
149
150void __init sbi_save(unsigned int hartid, void *dtb)
151{
152 early_init_dt_scan(__va(dtb));
153}
154
155/*
156 * Allow the user to manually add a memory region (in case DTS is broken);
157 * "mem_end=nn[KkMmGg]"
158 */
159static int __init mem_end_override(char *p)
160{
161 resource_size_t base, end;
162
163 if (!p)
164 return -EINVAL;
165 base = (uintptr_t) __pa(PAGE_OFFSET);
166 end = memparse(p, &p) & PMD_MASK;
167 if (end == 0)
168 return -EINVAL;
169 memblock_add(base, end - base);
170 return 0;
171}
172early_param("mem_end", mem_end_override);
173
174static void __init setup_bootmem(void)
175{
176 struct memblock_region *reg;
177 phys_addr_t mem_size = 0;
178
179 /* Find the memory region containing the kernel */
180 for_each_memblock(memory, reg) {
181 phys_addr_t vmlinux_end = __pa(_end);
182 phys_addr_t end = reg->base + reg->size;
183
184 if (reg->base <= vmlinux_end && vmlinux_end <= end) {
185 /*
186 * Reserve from the start of the region to the end of
187 * the kernel
188 */
189 memblock_reserve(reg->base, vmlinux_end - reg->base);
190 mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
191 }
192 }
193 BUG_ON(mem_size == 0);
194
195 set_max_mapnr(PFN_DOWN(mem_size));
196 max_low_pfn = pfn_base + PFN_DOWN(mem_size);
197
198#ifdef CONFIG_BLK_DEV_INITRD
199 setup_initrd();
200#endif /* CONFIG_BLK_DEV_INITRD */
201
202 early_init_fdt_reserve_self();
203 early_init_fdt_scan_reserved_mem();
204 memblock_allow_resize();
205 memblock_dump_all();
206}
207
208void __init setup_arch(char **cmdline_p)
209{
210#if defined(CONFIG_HVC_RISCV_SBI)
211 if (likely(early_console == NULL)) {
212 early_console = &riscv_sbi_early_console_dev;
213 register_console(early_console);
214 }
215#endif
216
217#ifdef CONFIG_CMDLINE_BOOL
218#ifdef CONFIG_CMDLINE_OVERRIDE
219 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
220#else
221 if (builtin_cmdline[0] != '\0') {
222 /* Append bootloader command line to built-in */
223 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
224 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
225 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
226 }
227#endif /* CONFIG_CMDLINE_OVERRIDE */
228#endif /* CONFIG_CMDLINE_BOOL */
229 *cmdline_p = boot_command_line;
230
231 parse_early_param();
232
233 init_mm.start_code = (unsigned long) _stext;
234 init_mm.end_code = (unsigned long) _etext;
235 init_mm.end_data = (unsigned long) _edata;
236 init_mm.brk = (unsigned long) _end;
237
238 setup_bootmem();
239 paging_init();
240 unflatten_device_tree();
241
242#ifdef CONFIG_SMP
243 setup_smp();
244#endif
245
246#ifdef CONFIG_DUMMY_CONSOLE
247 conswitchp = &dummy_con;
248#endif
249
250 riscv_fill_hwcap();
251}
252
253static int __init riscv_device_init(void)
254{
255 return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
256}
257subsys_initcall_sync(riscv_device_init);
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
new file mode 100644
index 000000000000..718d0c984ef0
--- /dev/null
+++ b/arch/riscv/kernel/signal.c
@@ -0,0 +1,292 @@
1/*
2 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
3 * Chen Liqin <liqin.chen@sunplusct.com>
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Copyright (C) 2012 Regents of the University of California
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see the file COPYING, or write
19 * to the Free Software Foundation, Inc.,
20 */
21
22#include <linux/signal.h>
23#include <linux/uaccess.h>
24#include <linux/syscalls.h>
25#include <linux/tracehook.h>
26#include <linux/linkage.h>
27
28#include <asm/ucontext.h>
29#include <asm/vdso.h>
30#include <asm/switch_to.h>
31#include <asm/csr.h>
32
33#define DEBUG_SIG 0
34
35struct rt_sigframe {
36 struct siginfo info;
37 struct ucontext uc;
38};
39
40static long restore_d_state(struct pt_regs *regs,
41 struct __riscv_d_ext_state __user *state)
42{
43 long err;
44 err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
45 if (likely(!err))
46 fstate_restore(current, regs);
47 return err;
48}
49
50static long save_d_state(struct pt_regs *regs,
51 struct __riscv_d_ext_state __user *state)
52{
53 fstate_save(current, regs);
54 return __copy_to_user(state, &current->thread.fstate, sizeof(*state));
55}
56
57static long restore_sigcontext(struct pt_regs *regs,
58 struct sigcontext __user *sc)
59{
60 long err;
61 size_t i;
62 /* sc_regs is structured the same as the start of pt_regs */
63 err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
64 if (unlikely(err))
65 return err;
66 /* Restore the floating-point state. */
67 err = restore_d_state(regs, &sc->sc_fpregs.d);
68 if (unlikely(err))
69 return err;
70 /* We support no other extension state at this time. */
71 for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++) {
72 u32 value;
73 err = __get_user(value, &sc->sc_fpregs.q.reserved[i]);
74 if (unlikely(err))
75 break;
76 if (value != 0)
77 return -EINVAL;
78 }
79 return err;
80}
81
82SYSCALL_DEFINE0(rt_sigreturn)
83{
84 struct pt_regs *regs = current_pt_regs();
85 struct rt_sigframe __user *frame;
86 struct task_struct *task;
87 sigset_t set;
88
89 /* Always make any pending restarted system calls return -EINTR */
90 current->restart_block.fn = do_no_restart_syscall;
91
92 frame = (struct rt_sigframe __user *)regs->sp;
93
94 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
95 goto badframe;
96
97 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
98 goto badframe;
99
100 set_current_blocked(&set);
101
102 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
103 goto badframe;
104
105 if (restore_altstack(&frame->uc.uc_stack))
106 goto badframe;
107
108 return regs->a0;
109
110badframe:
111 task = current;
112 if (show_unhandled_signals) {
113 pr_info_ratelimited(
114 "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
115 task->comm, task_pid_nr(task), __func__,
116 frame, (void *)regs->sepc, (void *)regs->sp);
117 }
118 force_sig(SIGSEGV, task);
119 return 0;
120}
121
122static long setup_sigcontext(struct rt_sigframe __user *frame,
123 struct pt_regs *regs)
124{
125 struct sigcontext __user *sc = &frame->uc.uc_mcontext;
126 long err;
127 size_t i;
128 /* sc_regs is structured the same as the start of pt_regs */
129 err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
130 /* Save the floating-point state. */
131 err |= save_d_state(regs, &sc->sc_fpregs.d);
132 /* We support no other extension state at this time. */
133 for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++)
134 err |= __put_user(0, &sc->sc_fpregs.q.reserved[i]);
135 return err;
136}
137
138static inline void __user *get_sigframe(struct ksignal *ksig,
139 struct pt_regs *regs, size_t framesize)
140{
141 unsigned long sp;
142 /* Default to using normal stack */
143 sp = regs->sp;
144
145 /*
146 * If we are on the alternate signal stack and would overflow it, don't.
147 * Return an always-bogus address instead so we will die with SIGSEGV.
148 */
149 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
150 return (void __user __force *)(-1UL);
151
152 /* This is the X/Open sanctioned signal stack switching. */
153 sp = sigsp(sp, ksig) - framesize;
154
155 /* Align the stack frame. */
156 sp &= ~0xfUL;
157
158 return (void __user *)sp;
159}
160
161
162static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
163 struct pt_regs *regs)
164{
165 struct rt_sigframe __user *frame;
166 long err = 0;
167
168 frame = get_sigframe(ksig, regs, sizeof(*frame));
169 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
170 return -EFAULT;
171
172 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
173
174 /* Create the ucontext. */
175 err |= __put_user(0, &frame->uc.uc_flags);
176 err |= __put_user(NULL, &frame->uc.uc_link);
177 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
178 err |= setup_sigcontext(frame, regs);
179 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
180 if (err)
181 return -EFAULT;
182
183 /* Set up to return from userspace. */
184 regs->ra = (unsigned long)VDSO_SYMBOL(
185 current->mm->context.vdso, rt_sigreturn);
186
187 /*
188 * Set up registers for signal handler.
189 * Registers that we don't modify keep the value they had from
190 * user-space at the time we took the signal.
191 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
192 * since some things rely on this (e.g. glibc's debug/segfault.c).
193 */
194 regs->sepc = (unsigned long)ksig->ka.sa.sa_handler;
195 regs->sp = (unsigned long)frame;
196 regs->a0 = ksig->sig; /* a0: signal number */
197 regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
198 regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */
199
200#if DEBUG_SIG
201 pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
202 current->comm, task_pid_nr(current), ksig->sig,
203 (void *)regs->sepc, (void *)regs->ra, frame);
204#endif
205
206 return 0;
207}
208
209static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
210{
211 sigset_t *oldset = sigmask_to_save();
212 int ret;
213
214 /* Are we from a system call? */
215 if (regs->scause == EXC_SYSCALL) {
216 /* If so, check system call restarting.. */
217 switch (regs->a0) {
218 case -ERESTART_RESTARTBLOCK:
219 case -ERESTARTNOHAND:
220 regs->a0 = -EINTR;
221 break;
222
223 case -ERESTARTSYS:
224 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
225 regs->a0 = -EINTR;
226 break;
227 }
228 /* fallthrough */
229 case -ERESTARTNOINTR:
230 regs->a0 = regs->orig_a0;
231 regs->sepc -= 0x4;
232 break;
233 }
234 }
235
236 /* Set up the stack frame */
237 ret = setup_rt_frame(ksig, oldset, regs);
238
239 signal_setup_done(ret, ksig, 0);
240}
241
242static void do_signal(struct pt_regs *regs)
243{
244 struct ksignal ksig;
245
246 if (get_signal(&ksig)) {
247 /* Actually deliver the signal */
248 handle_signal(&ksig, regs);
249 return;
250 }
251
252 /* Did we come from a system call? */
253 if (regs->scause == EXC_SYSCALL) {
254 /* Restart the system call - no handlers present */
255 switch (regs->a0) {
256 case -ERESTARTNOHAND:
257 case -ERESTARTSYS:
258 case -ERESTARTNOINTR:
259 regs->a0 = regs->orig_a0;
260 regs->sepc -= 0x4;
261 break;
262 case -ERESTART_RESTARTBLOCK:
263 regs->a0 = regs->orig_a0;
264 regs->a7 = __NR_restart_syscall;
265 regs->sepc -= 0x4;
266 break;
267 }
268 }
269
270 /*
271 * If there is no signal to deliver, we just put the saved
272 * sigmask back.
273 */
274 restore_saved_sigmask();
275}
276
277/*
278 * notification of userspace execution resumption
279 * - triggered by the _TIF_WORK_MASK flags
280 */
281asmlinkage void do_notify_resume(struct pt_regs *regs,
282 unsigned long thread_info_flags)
283{
284 /* Handle pending signal delivery */
285 if (thread_info_flags & _TIF_SIGPENDING)
286 do_signal(regs);
287
288 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
289 clear_thread_flag(TIF_NOTIFY_RESUME);
290 tracehook_notify_resume(regs);
291 }
292}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
new file mode 100644
index 000000000000..b4a71ec5906f
--- /dev/null
+++ b/arch/riscv/kernel/smp.c
@@ -0,0 +1,110 @@
1/*
2 * SMP initialisation and IPI support
3 * Based on arch/arm64/kernel/smp.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Copyright (C) 2015 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/interrupt.h>
23#include <linux/smp.h>
24#include <linux/sched.h>
25
26#include <asm/sbi.h>
27#include <asm/tlbflush.h>
28#include <asm/cacheflush.h>
29
30/* A collection of single bit ipi messages. */
31static struct {
32 unsigned long bits ____cacheline_aligned;
33} ipi_data[NR_CPUS] __cacheline_aligned;
34
35enum ipi_message_type {
36 IPI_RESCHEDULE,
37 IPI_CALL_FUNC,
38 IPI_MAX
39};
40
41irqreturn_t handle_ipi(void)
42{
43 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
44
45 /* Clear pending IPI */
46 csr_clear(sip, SIE_SSIE);
47
48 while (true) {
49 unsigned long ops;
50
51 /* Order bit clearing and data access. */
52 mb();
53
54 ops = xchg(pending_ipis, 0);
55 if (ops == 0)
56 return IRQ_HANDLED;
57
58 if (ops & (1 << IPI_RESCHEDULE))
59 scheduler_ipi();
60
61 if (ops & (1 << IPI_CALL_FUNC))
62 generic_smp_call_function_interrupt();
63
64 BUG_ON((ops >> IPI_MAX) != 0);
65
66 /* Order data access and bit testing. */
67 mb();
68 }
69
70 return IRQ_HANDLED;
71}
72
73static void
74send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
75{
76 int i;
77
78 mb();
79 for_each_cpu(i, to_whom)
80 set_bit(operation, &ipi_data[i].bits);
81
82 mb();
83 sbi_send_ipi(cpumask_bits(to_whom));
84}
85
86void arch_send_call_function_ipi_mask(struct cpumask *mask)
87{
88 send_ipi_message(mask, IPI_CALL_FUNC);
89}
90
91void arch_send_call_function_single_ipi(int cpu)
92{
93 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
94}
95
96static void ipi_stop(void *unused)
97{
98 while (1)
99 wait_for_interrupt();
100}
101
102void smp_send_stop(void)
103{
104 on_each_cpu(ipi_stop, NULL, 1);
105}
106
107void smp_send_reschedule(int cpu)
108{
109 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
110}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
new file mode 100644
index 000000000000..f741458c5a3f
--- /dev/null
+++ b/arch/riscv/kernel/smpboot.c
@@ -0,0 +1,114 @@
1/*
2 * SMP initialisation and IPI support
3 * Based on arch/arm64/kernel/smp.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Copyright (C) 2015 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/sched.h>
24#include <linux/kernel_stat.h>
25#include <linux/notifier.h>
26#include <linux/cpu.h>
27#include <linux/percpu.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30#include <linux/irq.h>
31#include <linux/of.h>
32#include <linux/sched/task_stack.h>
33#include <asm/irq.h>
34#include <asm/mmu_context.h>
35#include <asm/tlbflush.h>
36#include <asm/sections.h>
37#include <asm/sbi.h>
38
39void *__cpu_up_stack_pointer[NR_CPUS];
40void *__cpu_up_task_pointer[NR_CPUS];
41
42void __init smp_prepare_boot_cpu(void)
43{
44}
45
46void __init smp_prepare_cpus(unsigned int max_cpus)
47{
48}
49
50void __init setup_smp(void)
51{
52 struct device_node *dn = NULL;
53 int hart, im_okay_therefore_i_am = 0;
54
55 while ((dn = of_find_node_by_type(dn, "cpu"))) {
56 hart = riscv_of_processor_hart(dn);
57 if (hart >= 0) {
58 set_cpu_possible(hart, true);
59 set_cpu_present(hart, true);
60 if (hart == smp_processor_id()) {
61 BUG_ON(im_okay_therefore_i_am);
62 im_okay_therefore_i_am = 1;
63 }
64 }
65 }
66
67 BUG_ON(!im_okay_therefore_i_am);
68}
69
70int __cpu_up(unsigned int cpu, struct task_struct *tidle)
71{
72 tidle->thread_info.cpu = cpu;
73
74 /*
75 * On RISC-V systems, all harts boot on their own accord. Our _start
76 * selects the first hart to boot the kernel and causes the remainder
77 * of the harts to spin in a loop waiting for their stack pointer to be
78 * setup by that main hart. Writing __cpu_up_stack_pointer signals to
79 * the spinning harts that they can continue the boot process.
80 */
81 smp_mb();
82 __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE;
83 __cpu_up_task_pointer[cpu] = tidle;
84
85 while (!cpu_online(cpu))
86 cpu_relax();
87
88 return 0;
89}
90
91void __init smp_cpus_done(unsigned int max_cpus)
92{
93}
94
95/*
96 * C entry point for a secondary processor.
97 */
98asmlinkage void __init smp_callin(void)
99{
100 struct mm_struct *mm = &init_mm;
101
102 /* All kernel threads share the same mm context. */
103 atomic_inc(&mm->mm_count);
104 current->active_mm = mm;
105
106 trap_init();
107 init_clockevent();
108 notify_cpu_starting(smp_processor_id());
109 set_cpu_online(smp_processor_id(), 1);
110 local_flush_tlb_all();
111 local_irq_enable();
112 preempt_disable();
113 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
114}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
new file mode 100644
index 000000000000..559aae781154
--- /dev/null
+++ b/arch/riscv/kernel/stacktrace.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright (C) 2008 ARM Limited
3 * Copyright (C) 2014 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/export.h>
16#include <linux/kallsyms.h>
17#include <linux/sched.h>
18#include <linux/sched/debug.h>
19#include <linux/sched/task_stack.h>
20#include <linux/stacktrace.h>
21
22#ifdef CONFIG_FRAME_POINTER
23
24struct stackframe {
25 unsigned long fp;
26 unsigned long ra;
27};
28
29static void notrace walk_stackframe(struct task_struct *task,
30 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
31{
32 unsigned long fp, sp, pc;
33
34 if (regs) {
35 fp = GET_FP(regs);
36 sp = GET_USP(regs);
37 pc = GET_IP(regs);
38 } else if (task == NULL || task == current) {
39 const register unsigned long current_sp __asm__ ("sp");
40 fp = (unsigned long)__builtin_frame_address(0);
41 sp = current_sp;
42 pc = (unsigned long)walk_stackframe;
43 } else {
44 /* task blocked in __switch_to */
45 fp = task->thread.s[0];
46 sp = task->thread.sp;
47 pc = task->thread.ra;
48 }
49
50 for (;;) {
51 unsigned long low, high;
52 struct stackframe *frame;
53
54 if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
55 break;
56
57 /* Validate frame pointer */
58 low = sp + sizeof(struct stackframe);
59 high = ALIGN(sp, THREAD_SIZE);
60 if (unlikely(fp < low || fp > high || fp & 0x7))
61 break;
62 /* Unwind stack frame */
63 frame = (struct stackframe *)fp - 1;
64 sp = fp;
65 fp = frame->fp;
66 pc = frame->ra - 0x4;
67 }
68}
69
70#else /* !CONFIG_FRAME_POINTER */
71
72static void notrace walk_stackframe(struct task_struct *task,
73 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
74{
75 unsigned long sp, pc;
76 unsigned long *ksp;
77
78 if (regs) {
79 sp = GET_USP(regs);
80 pc = GET_IP(regs);
81 } else if (task == NULL || task == current) {
82 const register unsigned long current_sp __asm__ ("sp");
83 sp = current_sp;
84 pc = (unsigned long)walk_stackframe;
85 } else {
86 /* task blocked in __switch_to */
87 sp = task->thread.sp;
88 pc = task->thread.ra;
89 }
90
91 if (unlikely(sp & 0x7))
92 return;
93
94 ksp = (unsigned long *)sp;
95 while (!kstack_end(ksp)) {
96 if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
97 break;
98 pc = (*ksp++) - 0x4;
99 }
100}
101
102#endif /* CONFIG_FRAME_POINTER */
103
104
105static bool print_trace_address(unsigned long pc, void *arg)
106{
107 print_ip_sym(pc);
108 return false;
109}
110
111void show_stack(struct task_struct *task, unsigned long *sp)
112{
113 pr_cont("Call Trace:\n");
114 walk_stackframe(task, NULL, print_trace_address, NULL);
115}
116
117
118static bool save_wchan(unsigned long pc, void *arg)
119{
120 if (!in_sched_functions(pc)) {
121 unsigned long *p = arg;
122 *p = pc;
123 return true;
124 }
125 return false;
126}
127
128unsigned long get_wchan(struct task_struct *task)
129{
130 unsigned long pc = 0;
131
132 if (likely(task && task != current && task->state != TASK_RUNNING))
133 walk_stackframe(task, NULL, save_wchan, &pc);
134 return pc;
135}
136
137
138#ifdef CONFIG_STACKTRACE
139
140static bool __save_trace(unsigned long pc, void *arg, bool nosched)
141{
142 struct stack_trace *trace = arg;
143
144 if (unlikely(nosched && in_sched_functions(pc)))
145 return false;
146 if (unlikely(trace->skip > 0)) {
147 trace->skip--;
148 return false;
149 }
150
151 trace->entries[trace->nr_entries++] = pc;
152 return (trace->nr_entries >= trace->max_entries);
153}
154
155static bool save_trace(unsigned long pc, void *arg)
156{
157 return __save_trace(pc, arg, false);
158}
159
160/*
161 * Save stack-backtrace addresses into a stack_trace buffer.
162 */
163void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
164{
165 walk_stackframe(tsk, NULL, save_trace, trace);
166 if (trace->nr_entries < trace->max_entries)
167 trace->entries[trace->nr_entries++] = ULONG_MAX;
168}
169EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
170
171void save_stack_trace(struct stack_trace *trace)
172{
173 save_stack_trace_tsk(NULL, trace);
174}
175EXPORT_SYMBOL_GPL(save_stack_trace);
176
177#endif /* CONFIG_STACKTRACE */
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
new file mode 100644
index 000000000000..4351be7d0533
--- /dev/null
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -0,0 +1,49 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2014 Darius Rad <darius@bluespec.com>
4 * Copyright (C) 2017 SiFive
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/syscalls.h>
17#include <asm/cmpxchg.h>
18#include <asm/unistd.h>
19
20static long riscv_sys_mmap(unsigned long addr, unsigned long len,
21 unsigned long prot, unsigned long flags,
22 unsigned long fd, off_t offset,
23 unsigned long page_shift_offset)
24{
25 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
26 return -EINVAL;
27 return sys_mmap_pgoff(addr, len, prot, flags, fd,
28 offset >> (PAGE_SHIFT - page_shift_offset));
29}
30
31#ifdef CONFIG_64BIT
32SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
33 unsigned long, prot, unsigned long, flags,
34 unsigned long, fd, off_t, offset)
35{
36 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
37}
38#else
39SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
40 unsigned long, prot, unsigned long, flags,
41 unsigned long, fd, off_t, offset)
42{
43 /*
44 * Note that the shift for mmap2 is constant (12),
45 * regardless of PAGE_SIZE
46 */
47 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
48}
49#endif /* !CONFIG_64BIT */
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
new file mode 100644
index 000000000000..4e30dc5fb593
--- /dev/null
+++ b/arch/riscv/kernel/syscall_table.c
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2009 Arnd Bergmann <arnd@arndb.de>
3 * Copyright (C) 2012 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/linkage.h>
16#include <linux/syscalls.h>
17#include <asm-generic/syscalls.h>
18
19#undef __SYSCALL
20#define __SYSCALL(nr, call) [nr] = (call),
21
22void *sys_call_table[__NR_syscalls] = {
23 [0 ... __NR_syscalls - 1] = sys_ni_syscall,
24#include <asm/unistd.h>
25};
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
new file mode 100644
index 000000000000..2463fcca719e
--- /dev/null
+++ b/arch/riscv/kernel/time.c
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clocksource.h>
16#include <linux/clockchips.h>
17#include <linux/delay.h>
18
19#ifdef CONFIG_RISCV_TIMER
20#include <linux/timer_riscv.h>
21#endif
22
23#include <asm/sbi.h>
24
25unsigned long riscv_timebase;
26
27DECLARE_PER_CPU(struct clock_event_device, riscv_clock_event);
28
29void riscv_timer_interrupt(void)
30{
31#ifdef CONFIG_RISCV_TIMER
32 /*
33 * FIXME: This needs to be cleaned up along with the rest of the IRQ
34 * handling cleanup. See irq.c for more details.
35 */
36 struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
37
38 evdev->event_handler(evdev);
39#endif
40}
41
42void __init init_clockevent(void)
43{
44 timer_probe();
45 csr_set(sie, SIE_STIE);
46}
47
48void __init time_init(void)
49{
50 struct device_node *cpu;
51 u32 prop;
52
53 cpu = of_find_node_by_path("/cpus");
54 if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop))
55 panic(KERN_WARNING "RISC-V system with no 'timebase-frequency' in DTS\n");
56 riscv_timebase = prop;
57
58 lpj_fine = riscv_timebase / HZ;
59
60 init_clockevent();
61}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
new file mode 100644
index 000000000000..93132cb59184
--- /dev/null
+++ b/arch/riscv/kernel/traps.c
@@ -0,0 +1,180 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/sched/debug.h>
18#include <linux/sched/signal.h>
19#include <linux/signal.h>
20#include <linux/kdebug.h>
21#include <linux/uaccess.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/irq.h>
25
26#include <asm/processor.h>
27#include <asm/ptrace.h>
28#include <asm/csr.h>
29
30int show_unhandled_signals = 1;
31
32extern asmlinkage void handle_exception(void);
33
34static DEFINE_SPINLOCK(die_lock);
35
36void die(struct pt_regs *regs, const char *str)
37{
38 static int die_counter;
39 int ret;
40
41 oops_enter();
42
43 spin_lock_irq(&die_lock);
44 console_verbose();
45 bust_spinlocks(1);
46
47 pr_emerg("%s [#%d]\n", str, ++die_counter);
48 print_modules();
49 show_regs(regs);
50
51 ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV);
52
53 bust_spinlocks(0);
54 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
55 spin_unlock_irq(&die_lock);
56 oops_exit();
57
58 if (in_interrupt())
59 panic("Fatal exception in interrupt");
60 if (panic_on_oops)
61 panic("Fatal exception");
62 if (ret != NOTIFY_STOP)
63 do_exit(SIGSEGV);
64}
65
66static inline void do_trap_siginfo(int signo, int code,
67 unsigned long addr, struct task_struct *tsk)
68{
69 siginfo_t info;
70
71 info.si_signo = signo;
72 info.si_errno = 0;
73 info.si_code = code;
74 info.si_addr = (void __user *)addr;
75 force_sig_info(signo, &info, tsk);
76}
77
78void do_trap(struct pt_regs *regs, int signo, int code,
79 unsigned long addr, struct task_struct *tsk)
80{
81 if (show_unhandled_signals && unhandled_signal(tsk, signo)
82 && printk_ratelimit()) {
83 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
84 tsk->comm, task_pid_nr(tsk), signo, code, addr);
85 print_vma_addr(KERN_CONT " in ", GET_IP(regs));
86 pr_cont("\n");
87 show_regs(regs);
88 }
89
90 do_trap_siginfo(signo, code, addr, tsk);
91}
92
93static void do_trap_error(struct pt_regs *regs, int signo, int code,
94 unsigned long addr, const char *str)
95{
96 if (user_mode(regs)) {
97 do_trap(regs, signo, code, addr, current);
98 } else {
99 if (!fixup_exception(regs))
100 die(regs, str);
101 }
102}
103
104#define DO_ERROR_INFO(name, signo, code, str) \
105asmlinkage void name(struct pt_regs *regs) \
106{ \
107 do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \
108}
109
110DO_ERROR_INFO(do_trap_unknown,
111 SIGILL, ILL_ILLTRP, "unknown exception");
112DO_ERROR_INFO(do_trap_insn_misaligned,
113 SIGBUS, BUS_ADRALN, "instruction address misaligned");
114DO_ERROR_INFO(do_trap_insn_fault,
115 SIGSEGV, SEGV_ACCERR, "instruction access fault");
116DO_ERROR_INFO(do_trap_insn_illegal,
117 SIGILL, ILL_ILLOPC, "illegal instruction");
118DO_ERROR_INFO(do_trap_load_misaligned,
119 SIGBUS, BUS_ADRALN, "load address misaligned");
120DO_ERROR_INFO(do_trap_load_fault,
121 SIGSEGV, SEGV_ACCERR, "load access fault");
122DO_ERROR_INFO(do_trap_store_misaligned,
123 SIGBUS, BUS_ADRALN, "store (or AMO) address misaligned");
124DO_ERROR_INFO(do_trap_store_fault,
125 SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
126DO_ERROR_INFO(do_trap_ecall_u,
127 SIGILL, ILL_ILLTRP, "environment call from U-mode");
128DO_ERROR_INFO(do_trap_ecall_s,
129 SIGILL, ILL_ILLTRP, "environment call from S-mode");
130DO_ERROR_INFO(do_trap_ecall_m,
131 SIGILL, ILL_ILLTRP, "environment call from M-mode");
132
133asmlinkage void do_trap_break(struct pt_regs *regs)
134{
135#ifdef CONFIG_GENERIC_BUG
136 if (!user_mode(regs)) {
137 enum bug_trap_type type;
138
139 type = report_bug(regs->sepc, regs);
140 switch (type) {
141 case BUG_TRAP_TYPE_NONE:
142 break;
143 case BUG_TRAP_TYPE_WARN:
144 regs->sepc += sizeof(bug_insn_t);
145 return;
146 case BUG_TRAP_TYPE_BUG:
147 die(regs, "Kernel BUG");
148 }
149 }
150#endif /* CONFIG_GENERIC_BUG */
151
152 do_trap_siginfo(SIGTRAP, TRAP_BRKPT, regs->sepc, current);
153 regs->sepc += 0x4;
154}
155
156#ifdef CONFIG_GENERIC_BUG
157int is_valid_bugaddr(unsigned long pc)
158{
159 bug_insn_t insn;
160
161 if (pc < PAGE_OFFSET)
162 return 0;
163 if (probe_kernel_address((bug_insn_t __user *)pc, insn))
164 return 0;
165 return (insn == __BUG_INSN);
166}
167#endif /* CONFIG_GENERIC_BUG */
168
169void __init trap_init(void)
170{
171 /*
172 * Set sup0 scratch register to 0, indicating to exception vector
173 * that we are presently executing in the kernel
174 */
175 csr_write(sscratch, 0);
176 /* Set the exception vector address */
177 csr_write(stvec, &handle_exception);
178 /* Enable all interrupts */
179 csr_write(sie, -1);
180}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
new file mode 100644
index 000000000000..e8a178df8144
--- /dev/null
+++ b/arch/riscv/kernel/vdso.c
@@ -0,0 +1,125 @@
1/*
2 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
3 * <benh@kernel.crashing.org>
4 * Copyright (C) 2012 ARM Limited
5 * Copyright (C) 2015 Regents of the University of California
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/binfmts.h>
23#include <linux/err.h>
24
25#include <asm/vdso.h>
26
27extern char vdso_start[], vdso_end[];
28
29static unsigned int vdso_pages;
30static struct page **vdso_pagelist;
31
32/*
33 * The vDSO data page.
34 */
35static union {
36 struct vdso_data data;
37 u8 page[PAGE_SIZE];
38} vdso_data_store __page_aligned_data;
39struct vdso_data *vdso_data = &vdso_data_store.data;
40
41static int __init vdso_init(void)
42{
43 unsigned int i;
44
45 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
46 vdso_pagelist =
47 kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
48 if (unlikely(vdso_pagelist == NULL)) {
49 pr_err("vdso: pagelist allocation failed\n");
50 return -ENOMEM;
51 }
52
53 for (i = 0; i < vdso_pages; i++) {
54 struct page *pg;
55
56 pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
57 ClearPageReserved(pg);
58 vdso_pagelist[i] = pg;
59 }
60 vdso_pagelist[i] = virt_to_page(vdso_data);
61
62 return 0;
63}
64arch_initcall(vdso_init);
65
66int arch_setup_additional_pages(struct linux_binprm *bprm,
67 int uses_interp)
68{
69 struct mm_struct *mm = current->mm;
70 unsigned long vdso_base, vdso_len;
71 int ret;
72
73 vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
74
75 down_write(&mm->mmap_sem);
76 vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
77 if (unlikely(IS_ERR_VALUE(vdso_base))) {
78 ret = vdso_base;
79 goto end;
80 }
81
82 /*
83 * Put vDSO base into mm struct. We need to do this before calling
84 * install_special_mapping or the perf counter mmap tracking code
85 * will fail to recognise it as a vDSO (since arch_vma_name fails).
86 */
87 mm->context.vdso = (void *)vdso_base;
88
89 ret = install_special_mapping(mm, vdso_base, vdso_len,
90 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
91 vdso_pagelist);
92
93 if (unlikely(ret))
94 mm->context.vdso = NULL;
95
96end:
97 up_write(&mm->mmap_sem);
98 return ret;
99}
100
101const char *arch_vma_name(struct vm_area_struct *vma)
102{
103 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
104 return "[vdso]";
105 return NULL;
106}
107
108/*
109 * Function stubs to prevent linker errors when AT_SYSINFO_EHDR is defined
110 */
111
112int in_gate_area_no_mm(unsigned long addr)
113{
114 return 0;
115}
116
117int in_gate_area(struct mm_struct *mm, unsigned long addr)
118{
119 return 0;
120}
121
122struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
123{
124 return NULL;
125}
diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..97c2d69d0289
--- /dev/null
+++ b/arch/riscv/kernel/vdso/.gitignore
@@ -0,0 +1,2 @@
1vdso.lds
2*.tmp
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
new file mode 100644
index 000000000000..523d0a8ac8db
--- /dev/null
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -0,0 +1,63 @@
1# Copied from arch/tile/kernel/vdso/Makefile
2
3# Symbols present in the vdso
4vdso-syms = rt_sigreturn
5
6# Files to link into the vdso
7obj-vdso = $(patsubst %, %.o, $(vdso-syms))
8
9# Build rules
10targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
11obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
12
13obj-y += vdso.o vdso-syms.o
14CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
15
16# Disable gcov profiling for VDSO code
17GCOV_PROFILE := n
18
19# Force dependency
20$(obj)/vdso.o: $(obj)/vdso.so
21
22# link rule for the .so file, .lds has to be first
23SYSCFLAGS_vdso.so.dbg = $(c_flags)
24$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
25 $(call if_changed,vdsold)
26
27# We also create a special relocatable object that should mirror the symbol
28# table and layout of the linked DSO. With ld -R we can then refer to
29# these symbols in the kernel code rather than hand-coded addresses.
30
31SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
32 $(call cc-ldoption, -Wl$(comma)--hash-style=both)
33$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
34 $(call if_changed,vdsold)
35
36LDFLAGS_vdso-syms.o := -r -R
37$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
38 $(call if_changed,ld)
39
40# strip rule for the .so file
41$(obj)/%.so: OBJCOPYFLAGS := -S
42$(obj)/%.so: $(obj)/%.so.dbg FORCE
43 $(call if_changed,objcopy)
44
45# actual build commands
46# The DSO images are built using a special linker script
47# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
48# Make sure only to export the intended __vdso_xxx symbol offsets.
49quiet_cmd_vdsold = VDSOLD $@
50 cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
51 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
52 $(CROSS_COMPILE)objcopy \
53 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
54
55# install commands for the unstripped file
56quiet_cmd_vdso_install = INSTALL $@
57 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
58
59vdso.so: $(obj)/vdso.so.dbg
60 @mkdir -p $(MODLIB)/vdso
61 $(call cmd,vdso_install)
62
63vdso_install: vdso.so
diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S
new file mode 100644
index 000000000000..f5aa3d72acfb
--- /dev/null
+++ b/arch/riscv/kernel/vdso/rt_sigreturn.S
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2014 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/linkage.h>
15#include <asm/unistd.h>
16
17 .text
18ENTRY(__vdso_rt_sigreturn)
19 .cfi_startproc
20 .cfi_signal_frame
21 li a7, __NR_rt_sigreturn
22 scall
23 .cfi_endproc
24ENDPROC(__vdso_rt_sigreturn)
diff --git a/arch/riscv/kernel/vdso/vdso.S b/arch/riscv/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..7055de5f9174
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vdso.S
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2014 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/linkage.h>
16#include <asm/page.h>
17
18 __PAGE_ALIGNED_DATA
19
20 .globl vdso_start, vdso_end
21 .balign PAGE_SIZE
22vdso_start:
23 .incbin "arch/riscv/kernel/vdso/vdso.so"
24 .balign PAGE_SIZE
25vdso_end:
26
27 .previous
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..8c9dce95c11d
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14OUTPUT_ARCH(riscv)
15
16SECTIONS
17{
18 . = SIZEOF_HEADERS;
19
20 .hash : { *(.hash) } :text
21 .gnu.hash : { *(.gnu.hash) }
22 .dynsym : { *(.dynsym) }
23 .dynstr : { *(.dynstr) }
24 .gnu.version : { *(.gnu.version) }
25 .gnu.version_d : { *(.gnu.version_d) }
26 .gnu.version_r : { *(.gnu.version_r) }
27
28 .note : { *(.note.*) } :text :note
29 .dynamic : { *(.dynamic) } :text :dynamic
30
31 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
32 .eh_frame : { KEEP (*(.eh_frame)) } :text
33
34 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
35
36 /*
37 * This linker script is used both with -r and with -shared.
38 * For the layouts to match, we need to skip more than enough
39 * space for the dynamic symbol table, etc. If this amount is
40 * insufficient, ld -shared will error; simply increase it here.
41 */
42 . = 0x800;
43 .text : { *(.text .text.*) } :text
44
45 .data : {
46 *(.got.plt) *(.got)
47 *(.data .data.* .gnu.linkonce.d.*)
48 *(.dynbss)
49 *(.bss .bss.* .gnu.linkonce.b.*)
50 }
51}
52
53/*
54 * We must supply the ELF program headers explicitly to get just one
55 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
56 */
57PHDRS
58{
59 text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
60 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
61 note PT_NOTE FLAGS(4); /* PF_R */
62 eh_frame_hdr PT_GNU_EH_FRAME;
63}
64
65/*
66 * This controls what symbols we export from the DSO.
67 */
68VERSION
69{
70 LINUX_4.15 {
71 global:
72 __vdso_rt_sigreturn;
73 __vdso_cmpxchg32;
74 __vdso_cmpxchg64;
75 local: *;
76 };
77}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..ece84991609c
--- /dev/null
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#define LOAD_OFFSET PAGE_OFFSET
16#include <asm/vmlinux.lds.h>
17#include <asm/page.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20
21OUTPUT_ARCH(riscv)
22ENTRY(_start)
23
24jiffies = jiffies_64;
25
26SECTIONS
27{
28 /* Beginning of code and text segment */
29 . = LOAD_OFFSET;
30 _start = .;
31 __init_begin = .;
32 HEAD_TEXT_SECTION
33 INIT_TEXT_SECTION(PAGE_SIZE)
34 INIT_DATA_SECTION(16)
35 /* we have to discard exit text and such at runtime, not link time */
36 .exit.text :
37 {
38 EXIT_TEXT
39 }
40 .exit.data :
41 {
42 EXIT_DATA
43 }
44 PERCPU_SECTION(L1_CACHE_BYTES)
45 __init_end = .;
46
47 .text : {
48 _text = .;
49 _stext = .;
50 TEXT_TEXT
51 SCHED_TEXT
52 CPUIDLE_TEXT
53 LOCK_TEXT
54 KPROBES_TEXT
55 ENTRY_TEXT
56 IRQENTRY_TEXT
57 *(.fixup)
58 _etext = .;
59 }
60
61 /* Start of data section */
62 _sdata = .;
63 RO_DATA_SECTION(L1_CACHE_BYTES)
64 .srodata : {
65 *(.srodata*)
66 }
67
68 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
69 .sdata : {
70 __global_pointer$ = . + 0x800;
71 *(.sdata*)
72 /* End of data section */
73 _edata = .;
74 *(.sbss*)
75 }
76
77 BSS_SECTION(0, 0, 0)
78
79 EXCEPTION_TABLE(0x10)
80 NOTES
81
82 .rel.dyn : {
83 *(.rel.dyn*)
84 }
85
86 _end = .;
87
88 STABS_DEBUG
89 DWARF_DEBUG
90
91 DISCARDS
92}
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
new file mode 100644
index 000000000000..596c2ca40d63
--- /dev/null
+++ b/arch/riscv/lib/Makefile
@@ -0,0 +1,6 @@
1lib-y += delay.o
2lib-y += memcpy.o
3lib-y += memset.o
4lib-y += uaccess.o
5
6lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c
new file mode 100644
index 000000000000..1cc4ac3964b4
--- /dev/null
+++ b/arch/riscv/lib/delay.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/delay.h>
15#include <linux/param.h>
16#include <linux/timex.h>
17#include <linux/export.h>
18
19/*
20 * This is copies from arch/arm/include/asm/delay.h
21 *
22 * Loop (or tick) based delay:
23 *
24 * loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
25 *
26 * where:
27 *
28 * jiffies_per_sec = HZ
29 * us_per_sec = 1000000
30 *
31 * Therefore the constant part is HZ / 1000000 which is a small
32 * fractional number. To make this usable with integer math, we
33 * scale up this constant by 2^31, perform the actual multiplication,
34 * and scale the result back down by 2^31 with a simple shift:
35 *
36 * loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
37 *
38 * where:
39 *
40 * UDELAY_MULT = 2^31 * HZ / 1000000
41 * = (2^31 / 1000000) * HZ
42 * = 2147.483648 * HZ
43 * = 2147 * HZ + 483648 * HZ / 1000000
44 *
45 * 31 is the biggest scale shift value that won't overflow 32 bits for
46 * delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
47 */
48#define MAX_UDELAY_US 2000
49#define MAX_UDELAY_HZ 1000
50#define UDELAY_MULT (2147UL * HZ + 483648UL * HZ / 1000000UL)
51#define UDELAY_SHIFT 31
52
53#if HZ > MAX_UDELAY_HZ
54#error "HZ > MAX_UDELAY_HZ"
55#endif
56
57/*
58 * RISC-V supports both UDELAY and NDELAY. This is largely the same as above,
59 * but with different constants. I added 10 bits to the shift to get this, but
60 * the result is that I need a 64-bit multiply, which is slow on 32-bit
61 * platforms.
62 *
63 * NDELAY_MULT = 2^41 * HZ / 1000000000
64 * = (2^41 / 1000000000) * HZ
65 * = 2199.02325555 * HZ
66 * = 2199 * HZ + 23255550 * HZ / 1000000000
67 *
68 * The maximum here is to avoid 64-bit overflow, but it isn't checked as it
69 * won't happen.
70 */
71#define MAX_NDELAY_NS (1ULL << 42)
72#define MAX_NDELAY_HZ MAX_UDELAY_HZ
73#define NDELAY_MULT ((unsigned long long)(2199ULL * HZ + 23255550ULL * HZ / 1000000000ULL))
74#define NDELAY_SHIFT 41
75
76#if HZ > MAX_NDELAY_HZ
77#error "HZ > MAX_NDELAY_HZ"
78#endif
79
80void __delay(unsigned long cycles)
81{
82 u64 t0 = get_cycles();
83
84 while ((unsigned long)(get_cycles() - t0) < cycles)
85 cpu_relax();
86}
87
88void udelay(unsigned long usecs)
89{
90 unsigned long ucycles = usecs * lpj_fine * UDELAY_MULT;
91
92 if (unlikely(usecs > MAX_UDELAY_US)) {
93 __delay((u64)usecs * riscv_timebase / 1000000ULL);
94 return;
95 }
96
97 __delay(ucycles >> UDELAY_SHIFT);
98}
99EXPORT_SYMBOL(udelay);
100
101void ndelay(unsigned long nsecs)
102{
103 /*
104 * This doesn't bother checking for overflow, as it won't happen (it's
105 * an hour) of delay.
106 */
107 unsigned long long ncycles = nsecs * lpj_fine * NDELAY_MULT;
108 __delay(ncycles >> NDELAY_SHIFT);
109}
110EXPORT_SYMBOL(ndelay);
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
new file mode 100644
index 000000000000..80f9c1a5c598
--- /dev/null
+++ b/arch/riscv/lib/memcpy.S
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2013 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/linkage.h>
15#include <asm/asm.h>
16
17/* void *memcpy(void *, const void *, size_t) */
18ENTRY(memcpy)
19 move t6, a0 /* Preserve return value */
20
21 /* Defer to byte-oriented copy for small sizes */
22 sltiu a3, a2, 128
23 bnez a3, 4f
24 /* Use word-oriented copy only if low-order bits match */
25 andi a3, t6, SZREG-1
26 andi a4, a1, SZREG-1
27 bne a3, a4, 4f
28
29 beqz a3, 2f /* Skip if already aligned */
30 /*
31 * Round to nearest double word-aligned address
32 * greater than or equal to start address
33 */
34 andi a3, a1, ~(SZREG-1)
35 addi a3, a3, SZREG
36 /* Handle initial misalignment */
37 sub a4, a3, a1
381:
39 lb a5, 0(a1)
40 addi a1, a1, 1
41 sb a5, 0(t6)
42 addi t6, t6, 1
43 bltu a1, a3, 1b
44 sub a2, a2, a4 /* Update count */
45
462:
47 andi a4, a2, ~((16*SZREG)-1)
48 beqz a4, 4f
49 add a3, a1, a4
503:
51 REG_L a4, 0(a1)
52 REG_L a5, SZREG(a1)
53 REG_L a6, 2*SZREG(a1)
54 REG_L a7, 3*SZREG(a1)
55 REG_L t0, 4*SZREG(a1)
56 REG_L t1, 5*SZREG(a1)
57 REG_L t2, 6*SZREG(a1)
58 REG_L t3, 7*SZREG(a1)
59 REG_L t4, 8*SZREG(a1)
60 REG_L t5, 9*SZREG(a1)
61 REG_S a4, 0(t6)
62 REG_S a5, SZREG(t6)
63 REG_S a6, 2*SZREG(t6)
64 REG_S a7, 3*SZREG(t6)
65 REG_S t0, 4*SZREG(t6)
66 REG_S t1, 5*SZREG(t6)
67 REG_S t2, 6*SZREG(t6)
68 REG_S t3, 7*SZREG(t6)
69 REG_S t4, 8*SZREG(t6)
70 REG_S t5, 9*SZREG(t6)
71 REG_L a4, 10*SZREG(a1)
72 REG_L a5, 11*SZREG(a1)
73 REG_L a6, 12*SZREG(a1)
74 REG_L a7, 13*SZREG(a1)
75 REG_L t0, 14*SZREG(a1)
76 REG_L t1, 15*SZREG(a1)
77 addi a1, a1, 16*SZREG
78 REG_S a4, 10*SZREG(t6)
79 REG_S a5, 11*SZREG(t6)
80 REG_S a6, 12*SZREG(t6)
81 REG_S a7, 13*SZREG(t6)
82 REG_S t0, 14*SZREG(t6)
83 REG_S t1, 15*SZREG(t6)
84 addi t6, t6, 16*SZREG
85 bltu a1, a3, 3b
86 andi a2, a2, (16*SZREG)-1 /* Update count */
87
884:
89 /* Handle trailing misalignment */
90 beqz a2, 6f
91 add a3, a1, a2
92
93 /* Use word-oriented copy if co-aligned to word boundary */
94 or a5, a1, t6
95 or a5, a5, a3
96 andi a5, a5, 3
97 bnez a5, 5f
987:
99 lw a4, 0(a1)
100 addi a1, a1, 4
101 sw a4, 0(t6)
102 addi t6, t6, 4
103 bltu a1, a3, 7b
104
105 ret
106
1075:
108 lb a4, 0(a1)
109 addi a1, a1, 1
110 sb a4, 0(t6)
111 addi t6, t6, 1
112 bltu a1, a3, 5b
1136:
114 ret
115END(memcpy)
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
new file mode 100644
index 000000000000..a790107cf4c9
--- /dev/null
+++ b/arch/riscv/lib/memset.S
@@ -0,0 +1,120 @@
1/*
2 * Copyright (C) 2013 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14
15#include <linux/linkage.h>
16#include <asm/asm.h>
17
18/* void *memset(void *, int, size_t) */
19ENTRY(memset)
20 move t0, a0 /* Preserve return value */
21
22 /* Defer to byte-oriented fill for small sizes */
23 sltiu a3, a2, 16
24 bnez a3, 4f
25
26 /*
27 * Round to nearest XLEN-aligned address
28 * greater than or equal to start address
29 */
30 addi a3, t0, SZREG-1
31 andi a3, a3, ~(SZREG-1)
32 beq a3, t0, 2f /* Skip if already aligned */
33 /* Handle initial misalignment */
34 sub a4, a3, t0
351:
36 sb a1, 0(t0)
37 addi t0, t0, 1
38 bltu t0, a3, 1b
39 sub a2, a2, a4 /* Update count */
40
412: /* Duff's device with 32 XLEN stores per iteration */
42 /* Broadcast value into all bytes */
43 andi a1, a1, 0xff
44 slli a3, a1, 8
45 or a1, a3, a1
46 slli a3, a1, 16
47 or a1, a3, a1
48#ifdef CONFIG_64BIT
49 slli a3, a1, 32
50 or a1, a3, a1
51#endif
52
53 /* Calculate end address */
54 andi a4, a2, ~(SZREG-1)
55 add a3, t0, a4
56
57 andi a4, a4, 31*SZREG /* Calculate remainder */
58 beqz a4, 3f /* Shortcut if no remainder */
59 neg a4, a4
60 addi a4, a4, 32*SZREG /* Calculate initial offset */
61
62 /* Adjust start address with offset */
63 sub t0, t0, a4
64
65 /* Jump into loop body */
66 /* Assumes 32-bit instruction lengths */
67 la a5, 3f
68#ifdef CONFIG_64BIT
69 srli a4, a4, 1
70#endif
71 add a5, a5, a4
72 jr a5
733:
74 REG_S a1, 0(t0)
75 REG_S a1, SZREG(t0)
76 REG_S a1, 2*SZREG(t0)
77 REG_S a1, 3*SZREG(t0)
78 REG_S a1, 4*SZREG(t0)
79 REG_S a1, 5*SZREG(t0)
80 REG_S a1, 6*SZREG(t0)
81 REG_S a1, 7*SZREG(t0)
82 REG_S a1, 8*SZREG(t0)
83 REG_S a1, 9*SZREG(t0)
84 REG_S a1, 10*SZREG(t0)
85 REG_S a1, 11*SZREG(t0)
86 REG_S a1, 12*SZREG(t0)
87 REG_S a1, 13*SZREG(t0)
88 REG_S a1, 14*SZREG(t0)
89 REG_S a1, 15*SZREG(t0)
90 REG_S a1, 16*SZREG(t0)
91 REG_S a1, 17*SZREG(t0)
92 REG_S a1, 18*SZREG(t0)
93 REG_S a1, 19*SZREG(t0)
94 REG_S a1, 20*SZREG(t0)
95 REG_S a1, 21*SZREG(t0)
96 REG_S a1, 22*SZREG(t0)
97 REG_S a1, 23*SZREG(t0)
98 REG_S a1, 24*SZREG(t0)
99 REG_S a1, 25*SZREG(t0)
100 REG_S a1, 26*SZREG(t0)
101 REG_S a1, 27*SZREG(t0)
102 REG_S a1, 28*SZREG(t0)
103 REG_S a1, 29*SZREG(t0)
104 REG_S a1, 30*SZREG(t0)
105 REG_S a1, 31*SZREG(t0)
106 addi t0, t0, 32*SZREG
107 bltu t0, a3, 3b
108 andi a2, a2, SZREG-1 /* Update count */
109
1104:
111 /* Handle trailing misalignment */
112 beqz a2, 6f
113 add a3, t0, a2
1145:
115 sb a1, 0(t0)
116 addi t0, t0, 1
117 bltu t0, a3, 5b
1186:
119 ret
120END(memset)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
new file mode 100644
index 000000000000..58fb2877c865
--- /dev/null
+++ b/arch/riscv/lib/uaccess.S
@@ -0,0 +1,117 @@
1#include <linux/linkage.h>
2#include <asm/asm.h>
3#include <asm/csr.h>
4
5 .altmacro
6 .macro fixup op reg addr lbl
7 LOCAL _epc
8_epc:
9 \op \reg, \addr
10 .section __ex_table,"a"
11 .balign RISCV_SZPTR
12 RISCV_PTR _epc, \lbl
13 .previous
14 .endm
15
16ENTRY(__copy_user)
17
18 /* Enable access to user memory */
19 li t6, SR_SUM
20 csrs sstatus, t6
21
22 add a3, a1, a2
23 /* Use word-oriented copy only if low-order bits match */
24 andi t0, a0, SZREG-1
25 andi t1, a1, SZREG-1
26 bne t0, t1, 2f
27
28 addi t0, a1, SZREG-1
29 andi t1, a3, ~(SZREG-1)
30 andi t0, t0, ~(SZREG-1)
31 /*
32 * a3: terminal address of source region
33 * t0: lowest XLEN-aligned address in source
34 * t1: highest XLEN-aligned address in source
35 */
36 bgeu t0, t1, 2f
37 bltu a1, t0, 4f
381:
39 fixup REG_L, t2, (a1), 10f
40 fixup REG_S, t2, (a0), 10f
41 addi a1, a1, SZREG
42 addi a0, a0, SZREG
43 bltu a1, t1, 1b
442:
45 bltu a1, a3, 5f
46
473:
48 /* Disable access to user memory */
49 csrc sstatus, t6
50 li a0, 0
51 ret
524: /* Edge case: unalignment */
53 fixup lbu, t2, (a1), 10f
54 fixup sb, t2, (a0), 10f
55 addi a1, a1, 1
56 addi a0, a0, 1
57 bltu a1, t0, 4b
58 j 1b
595: /* Edge case: remainder */
60 fixup lbu, t2, (a1), 10f
61 fixup sb, t2, (a0), 10f
62 addi a1, a1, 1
63 addi a0, a0, 1
64 bltu a1, a3, 5b
65 j 3b
66ENDPROC(__copy_user)
67
68
69ENTRY(__clear_user)
70
71 /* Enable access to user memory */
72 li t6, SR_SUM
73 csrs sstatus, t6
74
75 add a3, a0, a1
76 addi t0, a0, SZREG-1
77 andi t1, a3, ~(SZREG-1)
78 andi t0, t0, ~(SZREG-1)
79 /*
80 * a3: terminal address of target region
81 * t0: lowest doubleword-aligned address in target region
82 * t1: highest doubleword-aligned address in target region
83 */
84 bgeu t0, t1, 2f
85 bltu a0, t0, 4f
861:
87 fixup REG_S, zero, (a0), 10f
88 addi a0, a0, SZREG
89 bltu a0, t1, 1b
902:
91 bltu a0, a3, 5f
92
933:
94 /* Disable access to user memory */
95 csrc sstatus, t6
96 li a0, 0
97 ret
984: /* Edge case: unalignment */
99 fixup sb, zero, (a0), 10f
100 addi a0, a0, 1
101 bltu a0, t0, 4b
102 j 1b
1035: /* Edge case: remainder */
104 fixup sb, zero, (a0), 10f
105 addi a0, a0, 1
106 bltu a0, a3, 5b
107 j 3b
108ENDPROC(__clear_user)
109
110 .section .fixup,"ax"
111 .balign 4
11210:
113 /* Disable access to user memory */
114 csrs sstatus, t6
115 sub a0, a3, a0
116 ret
117 .previous
diff --git a/arch/riscv/lib/udivdi3.S b/arch/riscv/lib/udivdi3.S
new file mode 100644
index 000000000000..cb01ae5b181a
--- /dev/null
+++ b/arch/riscv/lib/udivdi3.S
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2016-2017 Free Software Foundation, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 .globl __udivdi3
15__udivdi3:
16 mv a2, a1
17 mv a1, a0
18 li a0, -1
19 beqz a2, .L5
20 li a3, 1
21 bgeu a2, a1, .L2
22.L1:
23 blez a2, .L2
24 slli a2, a2, 1
25 slli a3, a3, 1
26 bgtu a1, a2, .L1
27.L2:
28 li a0, 0
29.L3:
30 bltu a1, a2, .L4
31 sub a1, a1, a2
32 or a0, a0, a3
33.L4:
34 srli a3, a3, 1
35 srli a2, a2, 1
36 bnez a3, .L3
37.L5:
38 ret
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
new file mode 100644
index 000000000000..81f7d9ce6d88
--- /dev/null
+++ b/arch/riscv/mm/Makefile
@@ -0,0 +1,4 @@
1obj-y += init.o
2obj-y += fault.o
3obj-y += extable.o
4obj-y += ioremap.o
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
new file mode 100644
index 000000000000..11bb9417123b
--- /dev/null
+++ b/arch/riscv/mm/extable.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
3 * Lennox Wu <lennox.wu@sunplusct.com>
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Copyright (C) 2013 Regents of the University of California
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see the file COPYING, or write
19 * to the Free Software Foundation, Inc.,
20 */
21
22
23#include <linux/extable.h>
24#include <linux/module.h>
25#include <linux/uaccess.h>
26
27int fixup_exception(struct pt_regs *regs)
28{
29 const struct exception_table_entry *fixup;
30
31 fixup = search_exception_tables(regs->sepc);
32 if (fixup) {
33 regs->sepc = fixup->fixup;
34 return 1;
35 }
36 return 0;
37}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
new file mode 100644
index 000000000000..df2ca3c65048
--- /dev/null
+++ b/arch/riscv/mm/fault.c
@@ -0,0 +1,282 @@
1/*
2 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
3 * Lennox Wu <lennox.wu@sunplusct.com>
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Copyright (C) 2012 Regents of the University of California
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see the file COPYING, or write
19 * to the Free Software Foundation, Inc.,
20 */
21
22
23#include <linux/mm.h>
24#include <linux/kernel.h>
25#include <linux/interrupt.h>
26#include <linux/perf_event.h>
27#include <linux/signal.h>
28#include <linux/uaccess.h>
29
30#include <asm/pgalloc.h>
31#include <asm/ptrace.h>
32#include <asm/uaccess.h>
33
34/*
35 * This routine handles page faults. It determines the address and the
36 * problem, and then passes it off to one of the appropriate routines.
37 */
38asmlinkage void do_page_fault(struct pt_regs *regs)
39{
40 struct task_struct *tsk;
41 struct vm_area_struct *vma;
42 struct mm_struct *mm;
43 unsigned long addr, cause;
44 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
45 int fault, code = SEGV_MAPERR;
46
47 cause = regs->scause;
48 addr = regs->sbadaddr;
49
50 tsk = current;
51 mm = tsk->mm;
52
53 /*
54 * Fault-in kernel-space virtual memory on-demand.
55 * The 'reference' page table is init_mm.pgd.
56 *
57 * NOTE! We MUST NOT take any locks for this case. We may
58 * be in an interrupt or a critical region, and should
59 * only copy the information from the master page table,
60 * nothing more.
61 */
62 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
63 goto vmalloc_fault;
64
65 /* Enable interrupts if they were enabled in the parent context. */
66 if (likely(regs->sstatus & SR_PIE))
67 local_irq_enable();
68
69 /*
70 * If we're in an interrupt, have no user context, or are running
71 * in an atomic region, then we must not take the fault.
72 */
73 if (unlikely(faulthandler_disabled() || !mm))
74 goto no_context;
75
76 if (user_mode(regs))
77 flags |= FAULT_FLAG_USER;
78
79 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
80
81retry:
82 down_read(&mm->mmap_sem);
83 vma = find_vma(mm, addr);
84 if (unlikely(!vma))
85 goto bad_area;
86 if (likely(vma->vm_start <= addr))
87 goto good_area;
88 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
89 goto bad_area;
90 if (unlikely(expand_stack(vma, addr)))
91 goto bad_area;
92
93 /*
94 * Ok, we have a good vm_area for this memory access, so
95 * we can handle it.
96 */
97good_area:
98 code = SEGV_ACCERR;
99
100 switch (cause) {
101 case EXC_INST_PAGE_FAULT:
102 if (!(vma->vm_flags & VM_EXEC))
103 goto bad_area;
104 break;
105 case EXC_LOAD_PAGE_FAULT:
106 if (!(vma->vm_flags & VM_READ))
107 goto bad_area;
108 break;
109 case EXC_STORE_PAGE_FAULT:
110 if (!(vma->vm_flags & VM_WRITE))
111 goto bad_area;
112 flags |= FAULT_FLAG_WRITE;
113 break;
114 default:
115 panic("%s: unhandled cause %lu", __func__, cause);
116 }
117
118 /*
119 * If for any reason at all we could not handle the fault,
120 * make sure we exit gracefully rather than endlessly redo
121 * the fault.
122 */
123 fault = handle_mm_fault(vma, addr, flags);
124
125 /*
126 * If we need to retry but a fatal signal is pending, handle the
127 * signal first. We do not need to release the mmap_sem because it
128 * would already be released in __lock_page_or_retry in mm/filemap.c.
129 */
130 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
131 return;
132
133 if (unlikely(fault & VM_FAULT_ERROR)) {
134 if (fault & VM_FAULT_OOM)
135 goto out_of_memory;
136 else if (fault & VM_FAULT_SIGBUS)
137 goto do_sigbus;
138 BUG();
139 }
140
141 /*
142 * Major/minor page fault accounting is only done on the
143 * initial attempt. If we go through a retry, it is extremely
144 * likely that the page will be found in page cache at that point.
145 */
146 if (flags & FAULT_FLAG_ALLOW_RETRY) {
147 if (fault & VM_FAULT_MAJOR) {
148 tsk->maj_flt++;
149 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
150 1, regs, addr);
151 } else {
152 tsk->min_flt++;
153 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
154 1, regs, addr);
155 }
156 if (fault & VM_FAULT_RETRY) {
157 /*
158 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
159 * of starvation.
160 */
161 flags &= ~(FAULT_FLAG_ALLOW_RETRY);
162 flags |= FAULT_FLAG_TRIED;
163
164 /*
165 * No need to up_read(&mm->mmap_sem) as we would
166 * have already released it in __lock_page_or_retry
167 * in mm/filemap.c.
168 */
169 goto retry;
170 }
171 }
172
173 up_read(&mm->mmap_sem);
174 return;
175
176 /*
177 * Something tried to access memory that isn't in our memory map.
178 * Fix it, but check if it's kernel or user first.
179 */
180bad_area:
181 up_read(&mm->mmap_sem);
182 /* User mode accesses just cause a SIGSEGV */
183 if (user_mode(regs)) {
184 do_trap(regs, SIGSEGV, code, addr, tsk);
185 return;
186 }
187
188no_context:
189 /* Are we prepared to handle this kernel fault? */
190 if (fixup_exception(regs))
191 return;
192
193 /*
194 * Oops. The kernel tried to access some bad page. We'll have to
195 * terminate things with extreme prejudice.
196 */
197 bust_spinlocks(1);
198 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
199 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
200 "paging request", addr);
201 die(regs, "Oops");
202 do_exit(SIGKILL);
203
204 /*
205 * We ran out of memory, call the OOM killer, and return the userspace
206 * (which will retry the fault, or kill us if we got oom-killed).
207 */
208out_of_memory:
209 up_read(&mm->mmap_sem);
210 if (!user_mode(regs))
211 goto no_context;
212 pagefault_out_of_memory();
213 return;
214
215do_sigbus:
216 up_read(&mm->mmap_sem);
217 /* Kernel mode? Handle exceptions or die */
218 if (!user_mode(regs))
219 goto no_context;
220 do_trap(regs, SIGBUS, BUS_ADRERR, addr, tsk);
221 return;
222
223vmalloc_fault:
224 {
225 pgd_t *pgd, *pgd_k;
226 pud_t *pud, *pud_k;
227 p4d_t *p4d, *p4d_k;
228 pmd_t *pmd, *pmd_k;
229 pte_t *pte_k;
230 int index;
231
232 if (user_mode(regs))
233 goto bad_area;
234
235 /*
236 * Synchronize this task's top level page-table
237 * with the 'reference' page table.
238 *
239 * Do _not_ use "tsk->active_mm->pgd" here.
240 * We might be inside an interrupt in the middle
241 * of a task switch.
242 */
243 index = pgd_index(addr);
244 pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
245 pgd_k = init_mm.pgd + index;
246
247 if (!pgd_present(*pgd_k))
248 goto no_context;
249 set_pgd(pgd, *pgd_k);
250
251 p4d = p4d_offset(pgd, addr);
252 p4d_k = p4d_offset(pgd_k, addr);
253 if (!p4d_present(*p4d_k))
254 goto no_context;
255
256 pud = pud_offset(p4d, addr);
257 pud_k = pud_offset(p4d_k, addr);
258 if (!pud_present(*pud_k))
259 goto no_context;
260
261 /*
262 * Since the vmalloc area is global, it is unnecessary
263 * to copy individual PTEs
264 */
265 pmd = pmd_offset(pud, addr);
266 pmd_k = pmd_offset(pud_k, addr);
267 if (!pmd_present(*pmd_k))
268 goto no_context;
269 set_pmd(pmd, *pmd_k);
270
271 /*
272 * Make sure the actual PTE exists as well to
273 * catch kernel vmalloc-area accesses to non-mapped
274 * addresses. If we don't do this, this will just
275 * silently loop forever.
276 */
277 pte_k = pte_offset_kernel(pmd_k, addr);
278 if (!pte_present(*pte_k))
279 goto no_context;
280 return;
281 }
282}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
new file mode 100644
index 000000000000..9f4bee5e51fd
--- /dev/null
+++ b/arch/riscv/mm/init.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/bootmem.h>
17#include <linux/initrd.h>
18#include <linux/memblock.h>
19#include <linux/swap.h>
20
21#include <asm/tlbflush.h>
22#include <asm/sections.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25
26static void __init zone_sizes_init(void)
27{
28 unsigned long zones_size[MAX_NR_ZONES];
29
30 memset(zones_size, 0, sizeof(zones_size));
31 zones_size[ZONE_NORMAL] = max_mapnr;
32 free_area_init_node(0, zones_size, pfn_base, NULL);
33}
34
35void setup_zero_page(void)
36{
37 memset((void *)empty_zero_page, 0, PAGE_SIZE);
38}
39
40void __init paging_init(void)
41{
42 init_mm.pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr));
43
44 setup_zero_page();
45 local_flush_tlb_all();
46 zone_sizes_init();
47}
48
49void __init mem_init(void)
50{
51#ifdef CONFIG_FLATMEM
52 BUG_ON(!mem_map);
53#endif /* CONFIG_FLATMEM */
54
55 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
56 free_all_bootmem();
57
58 mem_init_print_info(NULL);
59}
60
61void free_initmem(void)
62{
63 free_initmem_default(0);
64}
65
66#ifdef CONFIG_BLK_DEV_INITRD
67void free_initrd_mem(unsigned long start, unsigned long end)
68{
69}
70#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c
new file mode 100644
index 000000000000..e99194a4077e
--- /dev/null
+++ b/arch/riscv/mm/ioremap.c
@@ -0,0 +1,92 @@
1/*
2 * (C) Copyright 1995 1996 Linus Torvalds
3 * (C) Copyright 2012 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/export.h>
16#include <linux/mm.h>
17#include <linux/vmalloc.h>
18#include <linux/io.h>
19
20#include <asm/pgtable.h>
21
22/*
23 * Remap an arbitrary physical address space into the kernel virtual
24 * address space. Needed when the kernel wants to access high addresses
25 * directly.
26 *
27 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
28 * have to convert them into an offset in a page-aligned mapping, but the
29 * caller shouldn't need to know that small detail.
30 */
31static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
32 pgprot_t prot, void *caller)
33{
34 phys_addr_t last_addr;
35 unsigned long offset, vaddr;
36 struct vm_struct *area;
37
38 /* Disallow wrap-around or zero size */
39 last_addr = addr + size - 1;
40 if (!size || last_addr < addr)
41 return NULL;
42
43 /* Page-align mappings */
44 offset = addr & (~PAGE_MASK);
45 addr &= PAGE_MASK;
46 size = PAGE_ALIGN(size + offset);
47
48 area = get_vm_area_caller(size, VM_IOREMAP, caller);
49 if (!area)
50 return NULL;
51 vaddr = (unsigned long)area->addr;
52
53 if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
54 free_vm_area(area);
55 return NULL;
56 }
57
58 return (void __iomem *)(vaddr + offset);
59}
60
61/*
62 * ioremap - map bus memory into CPU space
63 * @offset: bus address of the memory
64 * @size: size of the resource to map
65 *
66 * ioremap performs a platform specific sequence of operations to
67 * make bus memory CPU accessible via the readb/readw/readl/writeb/
68 * writew/writel functions and the other mmio helpers. The returned
69 * address is not guaranteed to be usable directly as a virtual
70 * address.
71 *
72 * Must be freed with iounmap.
73 */
74void __iomem *ioremap(phys_addr_t offset, unsigned long size)
75{
76 return __ioremap_caller(offset, size, PAGE_KERNEL,
77 __builtin_return_address(0));
78}
79EXPORT_SYMBOL(ioremap);
80
81
82/**
83 * iounmap - Free a IO remapping
84 * @addr: virtual address from ioremap_*
85 *
86 * Caller must ensure there is only one unmapping for the same pointer.
87 */
88void iounmap(void __iomem *addr)
89{
90 vunmap((void *)((unsigned long)addr & PAGE_MASK));
91}
92EXPORT_SYMBOL(iounmap);
diff --git a/include/lib/libgcc.h b/include/lib/libgcc.h
new file mode 100644
index 000000000000..32e1e0f4b2d0
--- /dev/null
+++ b/include/lib/libgcc.h
@@ -0,0 +1,43 @@
1/*
2 * include/lib/libgcc.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see the file COPYING, or write
16 * to the Free Software Foundation, Inc.
17 */
18
19#ifndef __LIB_LIBGCC_H
20#define __LIB_LIBGCC_H
21
22#include <asm/byteorder.h>
23
24typedef int word_type __attribute__ ((mode (__word__)));
25
26#ifdef __BIG_ENDIAN
27struct DWstruct {
28 int high, low;
29};
30#elif defined(__LITTLE_ENDIAN)
31struct DWstruct {
32 int low, high;
33};
34#else
35#error I feel sick.
36#endif
37
38typedef union {
39 struct DWstruct s;
40 long long ll;
41} DWunion;
42
43#endif /* __ASM_LIBGCC_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index b1445b22a6de..a2b6745324ab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -587,3 +587,21 @@ config STRING_SELFTEST
587 bool "Test string functions" 587 bool "Test string functions"
588 588
589endmenu 589endmenu
590
591config GENERIC_ASHLDI3
592 bool
593
594config GENERIC_ASHRDI3
595 bool
596
597config GENERIC_LSHRDI3
598 bool
599
600config GENERIC_MULDI3
601 bool
602
603config GENERIC_CMPDI2
604 bool
605
606config GENERIC_UCMPDI2
607 bool
diff --git a/lib/Makefile b/lib/Makefile
index b8f2c16fccaa..136a0b254564 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -248,3 +248,11 @@ UBSAN_SANITIZE_ubsan.o := n
248obj-$(CONFIG_SBITMAP) += sbitmap.o 248obj-$(CONFIG_SBITMAP) += sbitmap.o
249 249
250obj-$(CONFIG_PARMAN) += parman.o 250obj-$(CONFIG_PARMAN) += parman.o
251
252# GCC library routines
253obj-$(CONFIG_GENERIC_ASHLDI3) += ashldi3.o
254obj-$(CONFIG_GENERIC_ASHRDI3) += ashrdi3.o
255obj-$(CONFIG_GENERIC_LSHRDI3) += lshrdi3.o
256obj-$(CONFIG_GENERIC_MULDI3) += muldi3.o
257obj-$(CONFIG_GENERIC_CMPDI2) += cmpdi2.o
258obj-$(CONFIG_GENERIC_UCMPDI2) += ucmpdi2.o
diff --git a/lib/ashldi3.c b/lib/ashldi3.c
new file mode 100644
index 000000000000..1b6087db95a5
--- /dev/null
+++ b/lib/ashldi3.c
@@ -0,0 +1,44 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, see the file COPYING, or write
14 * to the Free Software Foundation, Inc.
15 */
16
17#include <linux/export.h>
18
19#include <lib/libgcc.h>
20
21long long notrace __ashldi3(long long u, word_type b)
22{
23 DWunion uu, w;
24 word_type bm;
25
26 if (b == 0)
27 return u;
28
29 uu.ll = u;
30 bm = 32 - b;
31
32 if (bm <= 0) {
33 w.s.low = 0;
34 w.s.high = (unsigned int) uu.s.low << -bm;
35 } else {
36 const unsigned int carries = (unsigned int) uu.s.low >> bm;
37
38 w.s.low = (unsigned int) uu.s.low << b;
39 w.s.high = ((unsigned int) uu.s.high << b) | carries;
40 }
41
42 return w.ll;
43}
44EXPORT_SYMBOL(__ashldi3);
diff --git a/lib/ashrdi3.c b/lib/ashrdi3.c
new file mode 100644
index 000000000000..2e67c97ac65a
--- /dev/null
+++ b/lib/ashrdi3.c
@@ -0,0 +1,46 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, see the file COPYING, or write
14 * to the Free Software Foundation, Inc.
15 */
16
17#include <linux/export.h>
18
19#include <lib/libgcc.h>
20
21long long notrace __ashrdi3(long long u, word_type b)
22{
23 DWunion uu, w;
24 word_type bm;
25
26 if (b == 0)
27 return u;
28
29 uu.ll = u;
30 bm = 32 - b;
31
32 if (bm <= 0) {
33 /* w.s.high = 1..1 or 0..0 */
34 w.s.high =
35 uu.s.high >> 31;
36 w.s.low = uu.s.high >> -bm;
37 } else {
38 const unsigned int carries = (unsigned int) uu.s.high << bm;
39
40 w.s.high = uu.s.high >> b;
41 w.s.low = ((unsigned int) uu.s.low >> b) | carries;
42 }
43
44 return w.ll;
45}
46EXPORT_SYMBOL(__ashrdi3);
diff --git a/lib/cmpdi2.c b/lib/cmpdi2.c
new file mode 100644
index 000000000000..6d7ebf6c2b86
--- /dev/null
+++ b/lib/cmpdi2.c
@@ -0,0 +1,42 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, see the file COPYING, or write
14 * to the Free Software Foundation, Inc.
15 */
16
17#include <linux/export.h>
18
19#include <lib/libgcc.h>
20
21word_type notrace __cmpdi2(long long a, long long b)
22{
23 const DWunion au = {
24 .ll = a
25 };
26 const DWunion bu = {
27 .ll = b
28 };
29
30 if (au.s.high < bu.s.high)
31 return 0;
32 else if (au.s.high > bu.s.high)
33 return 2;
34
35 if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
36 return 0;
37 else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
38 return 2;
39
40 return 1;
41}
42EXPORT_SYMBOL(__cmpdi2);
diff --git a/lib/lshrdi3.c b/lib/lshrdi3.c
new file mode 100644
index 000000000000..8e845f4bb65f
--- /dev/null
+++ b/lib/lshrdi3.c
@@ -0,0 +1,45 @@
1/*
2 * lib/lshrdi3.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see the file COPYING, or write
16 * to the Free Software Foundation, Inc.
17 */
18
19#include <linux/module.h>
20#include <lib/libgcc.h>
21
22long long notrace __lshrdi3(long long u, word_type b)
23{
24 DWunion uu, w;
25 word_type bm;
26
27 if (b == 0)
28 return u;
29
30 uu.ll = u;
31 bm = 32 - b;
32
33 if (bm <= 0) {
34 w.s.high = 0;
35 w.s.low = (unsigned int) uu.s.high >> -bm;
36 } else {
37 const unsigned int carries = (unsigned int) uu.s.high << bm;
38
39 w.s.high = (unsigned int) uu.s.high >> b;
40 w.s.low = ((unsigned int) uu.s.low >> b) | carries;
41 }
42
43 return w.ll;
44}
45EXPORT_SYMBOL(__lshrdi3);
diff --git a/lib/muldi3.c b/lib/muldi3.c
new file mode 100644
index 000000000000..88938543e10a
--- /dev/null
+++ b/lib/muldi3.c
@@ -0,0 +1,72 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, see the file COPYING, or write
14 * to the Free Software Foundation, Inc.
15 */
16
17#include <linux/export.h>
18#include <lib/libgcc.h>
19
20#define W_TYPE_SIZE 32
21
22#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
23#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
24#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
25
26/* If we still don't have umul_ppmm, define it using plain C. */
27#if !defined(umul_ppmm)
28#define umul_ppmm(w1, w0, u, v) \
29 do { \
30 unsigned long __x0, __x1, __x2, __x3; \
31 unsigned short __ul, __vl, __uh, __vh; \
32 \
33 __ul = __ll_lowpart(u); \
34 __uh = __ll_highpart(u); \
35 __vl = __ll_lowpart(v); \
36 __vh = __ll_highpart(v); \
37 \
38 __x0 = (unsigned long) __ul * __vl; \
39 __x1 = (unsigned long) __ul * __vh; \
40 __x2 = (unsigned long) __uh * __vl; \
41 __x3 = (unsigned long) __uh * __vh; \
42 \
43 __x1 += __ll_highpart(__x0); /* this can't give carry */\
44 __x1 += __x2; /* but this indeed can */ \
45 if (__x1 < __x2) /* did we get it? */ \
46 __x3 += __ll_B; /* yes, add it in the proper pos */ \
47 \
48 (w1) = __x3 + __ll_highpart(__x1); \
49 (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
50 } while (0)
51#endif
52
53#if !defined(__umulsidi3)
54#define __umulsidi3(u, v) ({ \
55 DWunion __w; \
56 umul_ppmm(__w.s.high, __w.s.low, u, v); \
57 __w.ll; \
58 })
59#endif
60
61long long notrace __muldi3(long long u, long long v)
62{
63 const DWunion uu = {.ll = u};
64 const DWunion vv = {.ll = v};
65 DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
66
67 w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high
68 + (unsigned long) uu.s.high * (unsigned long) vv.s.low);
69
70 return w.ll;
71}
72EXPORT_SYMBOL(__muldi3);
diff --git a/lib/ucmpdi2.c b/lib/ucmpdi2.c
new file mode 100644
index 000000000000..49a53505c8e3
--- /dev/null
+++ b/lib/ucmpdi2.c
@@ -0,0 +1,35 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, see the file COPYING, or write
14 * to the Free Software Foundation, Inc.
15 */
16
17#include <linux/module.h>
18#include <lib/libgcc.h>
19
20word_type __ucmpdi2(unsigned long long a, unsigned long long b)
21{
22 const DWunion au = {.ll = a};
23 const DWunion bu = {.ll = b};
24
25 if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
26 return 0;
27 else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
28 return 2;
29 if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
30 return 0;
31 else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
32 return 2;
33 return 1;
34}
35EXPORT_SYMBOL(__ucmpdi2);