summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-19 12:56:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-19 12:56:36 -0400
commitb0bb1269b9788a35af68587505d8df90498df75f (patch)
treed9cc2ef42c93947c59638f412d479b0fd36f7e9b
parent72cf0b07418a9c8349aa9137194b1ccba6e54a9d (diff)
parent8fef9900d43feb9d5017c72840966733085e3e82 (diff)
Merge tag 'riscv-for-linus-5.2-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux
Pull RISC-V updates from Palmer Dabbelt: "This contains an assortment of RISC-V related patches that I'd like to target for the 5.2 merge window. Most of the patches are cleanups, but there are a handful of user-visible changes: - The nosmp and nr_cpus command-line arguments are now supported, which work like normal. - The SBI console no longer installs itself as a preferred console, we rely on standard mechanisms (/chosen, command-line, hueristics) instead. - sfence_remove_sfence_vma{,_asid} now pass their arguments along to the SBI call. - Modules now support BUG(). - A missing sfence.vma during boot has been added. This bug only manifests during boot. - The arch/riscv support for SiFive's L2 cache controller has been merged, which should un-block the EDAC framework work. I've only tested this on QEMU again, as I didn't have time to get things running on the Unleashed. The latest master from this morning merges in cleanly and passes the tests as well" * tag 'riscv-for-linus-5.2-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux: (31 commits) riscv: fix locking violation in page fault handler RISC-V: sifive_l2_cache: Add L2 cache controller driver for SiFive SoCs RISC-V: Add DT documentation for SiFive L2 Cache Controller RISC-V: Avoid using invalid intermediate translations riscv: Support BUG() in kernel module riscv: Add the support for c.ebreak check in is_valid_bugaddr() riscv: support trap-based WARN() riscv: fix sbi_remote_sfence_vma{,_asid}. riscv: move switch_mm to its own file riscv: move flush_icache_{all,mm} to cacheflush.c tty: Don't force RISCV SBI console as preferred console RISC-V: Access CSRs using CSR numbers RISC-V: Add interrupt related SCAUSE defines in asm/csr.h RISC-V: Use tabs to align macro values in asm/csr.h RISC-V: Fix minor checkpatch issues. RISC-V: Support nr_cpus command line option. RISC-V: Implement nosmp commandline option. RISC-V: Add RISC-V specific arch_match_cpu_phys_id riscv: vdso: drop unnecessary cc-ldoption riscv: call pm_power_off from machine_halt / machine_power_off ...
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt51
-rw-r--r--arch/riscv/Kconfig6
-rw-r--r--arch/riscv/Makefile5
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/bug.h35
-rw-r--r--arch/riscv/include/asm/cacheflush.h2
-rw-r--r--arch/riscv/include/asm/csr.h123
-rw-r--r--arch/riscv/include/asm/elf.h6
-rw-r--r--arch/riscv/include/asm/futex.h13
-rw-r--r--arch/riscv/include/asm/irqflags.h10
-rw-r--r--arch/riscv/include/asm/mmu_context.h59
-rw-r--r--arch/riscv/include/asm/ptrace.h21
-rw-r--r--arch/riscv/include/asm/sbi.h19
-rw-r--r--arch/riscv/include/asm/sifive_l2_cache.h16
-rw-r--r--arch/riscv/include/asm/thread_info.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h28
-rw-r--r--arch/riscv/kernel/asm-offsets.c3
-rw-r--r--arch/riscv/kernel/cpu.c3
-rw-r--r--arch/riscv/kernel/entry.S22
-rw-r--r--arch/riscv/kernel/head.S33
-rw-r--r--arch/riscv/kernel/irq.c19
-rw-r--r--arch/riscv/kernel/perf_event.c4
-rw-r--r--arch/riscv/kernel/reset.c15
-rw-r--r--arch/riscv/kernel/setup.c6
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/riscv/kernel/smp.c61
-rw-r--r--arch/riscv/kernel/smpboot.c22
-rw-r--r--arch/riscv/kernel/stacktrace.c14
-rw-r--r--arch/riscv/kernel/traps.c30
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/mm/Makefile2
-rw-r--r--arch/riscv/mm/cacheflush.c61
-rw-r--r--arch/riscv/mm/context.c69
-rw-r--r--arch/riscv/mm/fault.c9
-rw-r--r--arch/riscv/mm/sifive_l2_cache.c175
-rw-r--r--drivers/tty/hvc/hvc_riscv_sbi.c1
36 files changed, 635 insertions, 321 deletions
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt
new file mode 100644
index 000000000000..73d8f19c3bd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt
@@ -0,0 +1,51 @@
1SiFive L2 Cache Controller
2--------------------------
3The SiFive Level 2 Cache Controller is used to provide access to fast copies
4of memory for masters in a Core Complex. The Level 2 Cache Controller also
5acts as directory-based coherency manager.
6All the properties in ePAPR/DeviceTree specification applies for this platform
7
8Required Properties:
9--------------------
10- compatible: Should be "sifive,fu540-c000-ccache" and "cache"
11
12- cache-block-size: Specifies the block size in bytes of the cache.
13 Should be 64
14
15- cache-level: Should be set to 2 for a level 2 cache
16
17- cache-sets: Specifies the number of associativity sets of the cache.
18 Should be 1024
19
20- cache-size: Specifies the size in bytes of the cache. Should be 2097152
21
22- cache-unified: Specifies the cache is a unified cache
23
24- interrupts: Must contain 3 entries (DirError, DataError and DataFail signals)
25
26- reg: Physical base address and size of L2 cache controller registers map
27
28Optional Properties:
29--------------------
30- next-level-cache: phandle to the next level cache if present.
31
32- memory-region: reference to the reserved-memory for the L2 Loosely Integrated
33 Memory region. The reserved memory node should be defined as per the bindings
34 in reserved-memory.txt
35
36
37Example:
38
39 cache-controller@2010000 {
40 compatible = "sifive,fu540-c000-ccache", "cache";
41 cache-block-size = <64>;
42 cache-level = <2>;
43 cache-sets = <1024>;
44 cache-size = <2097152>;
45 cache-unified;
46 interrupt-parent = <&plic0>;
47 interrupts = <1 2 3>;
48 reg = <0x0 0x2010000 0x0 0x1000>;
49 next-level-cache = <&L25 &L40 &L36>;
50 memory-region = <&l2_lim>;
51 };
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e66745decea1..ee32c66e1af3 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -27,7 +27,7 @@ config RISCV
27 select GENERIC_STRNCPY_FROM_USER 27 select GENERIC_STRNCPY_FROM_USER
28 select GENERIC_STRNLEN_USER 28 select GENERIC_STRNLEN_USER
29 select GENERIC_SMP_IDLE_THREAD 29 select GENERIC_SMP_IDLE_THREAD
30 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A 30 select GENERIC_ATOMIC64 if !64BIT
31 select HAVE_ARCH_AUDITSYSCALL 31 select HAVE_ARCH_AUDITSYSCALL
32 select HAVE_MEMBLOCK_NODE_MAP 32 select HAVE_MEMBLOCK_NODE_MAP
33 select HAVE_DMA_CONTIGUOUS 33 select HAVE_DMA_CONTIGUOUS
@@ -35,7 +35,6 @@ config RISCV
35 select HAVE_PERF_EVENTS 35 select HAVE_PERF_EVENTS
36 select HAVE_SYSCALL_TRACEPOINTS 36 select HAVE_SYSCALL_TRACEPOINTS
37 select IRQ_DOMAIN 37 select IRQ_DOMAIN
38 select RISCV_ISA_A if SMP
39 select SPARSE_IRQ 38 select SPARSE_IRQ
40 select SYSCTL_EXCEPTION_TRACE 39 select SYSCTL_EXCEPTION_TRACE
41 select HAVE_ARCH_TRACEHOOK 40 select HAVE_ARCH_TRACEHOOK
@@ -195,9 +194,6 @@ config RISCV_ISA_C
195 194
196 If you don't know what to do here, say Y. 195 If you don't know what to do here, say Y.
197 196
198config RISCV_ISA_A
199 def_bool y
200
201menu "supported PMU type" 197menu "supported PMU type"
202 depends on PERF_EVENTS 198 depends on PERF_EVENTS
203 199
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index c6342e638ef7..6b0741c9f348 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -39,9 +39,8 @@ endif
39KBUILD_CFLAGS += -Wall 39KBUILD_CFLAGS += -Wall
40 40
41# ISA string setting 41# ISA string setting
42riscv-march-$(CONFIG_ARCH_RV32I) := rv32im 42riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
43riscv-march-$(CONFIG_ARCH_RV64I) := rv64im 43riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
44riscv-march-$(CONFIG_RISCV_ISA_A) := $(riscv-march-y)a
45riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd 44riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
46riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c 45riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
47KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y)) 46KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index cccd12cf27d4..5a7a19d9aa7f 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += compat.h
4generic-y += cputime.h 4generic-y += cputime.h
5generic-y += device.h 5generic-y += device.h
6generic-y += div64.h 6generic-y += div64.h
7generic-y += extable.h
7generic-y += dma.h 8generic-y += dma.h
8generic-y += dma-contiguous.h 9generic-y += dma-contiguous.h
9generic-y += dma-mapping.h 10generic-y += dma-mapping.h
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
index bfc7f099ab1f..52a1fbdeab3b 100644
--- a/arch/riscv/include/asm/bug.h
+++ b/arch/riscv/include/asm/bug.h
@@ -21,7 +21,12 @@
21#include <asm/asm.h> 21#include <asm/asm.h>
22 22
23#ifdef CONFIG_GENERIC_BUG 23#ifdef CONFIG_GENERIC_BUG
24#define __BUG_INSN _AC(0x00100073, UL) /* ebreak */ 24#define __INSN_LENGTH_MASK _UL(0x3)
25#define __INSN_LENGTH_32 _UL(0x3)
26#define __COMPRESSED_INSN_MASK _UL(0xffff)
27
28#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
29#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
25 30
26#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
27typedef u32 bug_insn_t; 32typedef u32 bug_insn_t;
@@ -38,38 +43,46 @@ typedef u32 bug_insn_t;
38#define __BUG_ENTRY \ 43#define __BUG_ENTRY \
39 __BUG_ENTRY_ADDR "\n\t" \ 44 __BUG_ENTRY_ADDR "\n\t" \
40 __BUG_ENTRY_FILE "\n\t" \ 45 __BUG_ENTRY_FILE "\n\t" \
41 RISCV_SHORT " %1" 46 RISCV_SHORT " %1\n\t" \
47 RISCV_SHORT " %2"
42#else 48#else
43#define __BUG_ENTRY \ 49#define __BUG_ENTRY \
44 __BUG_ENTRY_ADDR 50 __BUG_ENTRY_ADDR "\n\t" \
51 RISCV_SHORT " %2"
45#endif 52#endif
46 53
47#define BUG() \ 54#define __BUG_FLAGS(flags) \
48do { \ 55do { \
49 __asm__ __volatile__ ( \ 56 __asm__ __volatile__ ( \
50 "1:\n\t" \ 57 "1:\n\t" \
51 "ebreak\n" \ 58 "ebreak\n" \
52 ".pushsection __bug_table,\"a\"\n\t" \ 59 ".pushsection __bug_table,\"aw\"\n\t" \
53 "2:\n\t" \ 60 "2:\n\t" \
54 __BUG_ENTRY "\n\t" \ 61 __BUG_ENTRY "\n\t" \
55 ".org 2b + %2\n\t" \ 62 ".org 2b + %3\n\t" \
56 ".popsection" \ 63 ".popsection" \
57 : \ 64 : \
58 : "i" (__FILE__), "i" (__LINE__), \ 65 : "i" (__FILE__), "i" (__LINE__), \
59 "i" (sizeof(struct bug_entry))); \ 66 "i" (flags), \
60 unreachable(); \ 67 "i" (sizeof(struct bug_entry))); \
61} while (0) 68} while (0)
69
62#endif /* !__ASSEMBLY__ */ 70#endif /* !__ASSEMBLY__ */
63#else /* CONFIG_GENERIC_BUG */ 71#else /* CONFIG_GENERIC_BUG */
64#ifndef __ASSEMBLY__ 72#ifndef __ASSEMBLY__
65#define BUG() \ 73#define __BUG_FLAGS(flags) do { \
66do { \
67 __asm__ __volatile__ ("ebreak\n"); \ 74 __asm__ __volatile__ ("ebreak\n"); \
68 unreachable(); \
69} while (0) 75} while (0)
70#endif /* !__ASSEMBLY__ */ 76#endif /* !__ASSEMBLY__ */
71#endif /* CONFIG_GENERIC_BUG */ 77#endif /* CONFIG_GENERIC_BUG */
72 78
79#define BUG() do { \
80 __BUG_FLAGS(0); \
81 unreachable(); \
82} while (0)
83
84#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
85
73#define HAVE_ARCH_BUG 86#define HAVE_ARCH_BUG
74 87
75#include <asm-generic/bug.h> 88#include <asm-generic/bug.h>
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8f13074413a7..1f4ba68ab9aa 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page)
47 47
48#else /* CONFIG_SMP */ 48#else /* CONFIG_SMP */
49 49
50#define flush_icache_all() sbi_remote_fence_i(NULL) 50void flush_icache_all(void);
51void flush_icache_mm(struct mm_struct *mm, bool local); 51void flush_icache_mm(struct mm_struct *mm, bool local);
52 52
53#endif /* CONFIG_SMP */ 53#endif /* CONFIG_SMP */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 28a0d1cb374c..3c3c26c3a1f1 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -14,64 +14,95 @@
14#ifndef _ASM_RISCV_CSR_H 14#ifndef _ASM_RISCV_CSR_H
15#define _ASM_RISCV_CSR_H 15#define _ASM_RISCV_CSR_H
16 16
17#include <asm/asm.h>
17#include <linux/const.h> 18#include <linux/const.h>
18 19
19/* Status register flags */ 20/* Status register flags */
20#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ 21#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
21#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */ 22#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
22#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */ 23#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */ 24#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
24 25
25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ 26#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
26#define SR_FS_OFF _AC(0x00000000, UL) 27#define SR_FS_OFF _AC(0x00000000, UL)
27#define SR_FS_INITIAL _AC(0x00002000, UL) 28#define SR_FS_INITIAL _AC(0x00002000, UL)
28#define SR_FS_CLEAN _AC(0x00004000, UL) 29#define SR_FS_CLEAN _AC(0x00004000, UL)
29#define SR_FS_DIRTY _AC(0x00006000, UL) 30#define SR_FS_DIRTY _AC(0x00006000, UL)
30 31
31#define SR_XS _AC(0x00018000, UL) /* Extension Status */ 32#define SR_XS _AC(0x00018000, UL) /* Extension Status */
32#define SR_XS_OFF _AC(0x00000000, UL) 33#define SR_XS_OFF _AC(0x00000000, UL)
33#define SR_XS_INITIAL _AC(0x00008000, UL) 34#define SR_XS_INITIAL _AC(0x00008000, UL)
34#define SR_XS_CLEAN _AC(0x00010000, UL) 35#define SR_XS_CLEAN _AC(0x00010000, UL)
35#define SR_XS_DIRTY _AC(0x00018000, UL) 36#define SR_XS_DIRTY _AC(0x00018000, UL)
36 37
37#ifndef CONFIG_64BIT 38#ifndef CONFIG_64BIT
38#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */ 39#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
39#else 40#else
40#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */ 41#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
41#endif 42#endif
42 43
43/* SATP flags */ 44/* SATP flags */
44#if __riscv_xlen == 32 45#ifndef CONFIG_64BIT
45#define SATP_PPN _AC(0x003FFFFF, UL) 46#define SATP_PPN _AC(0x003FFFFF, UL)
46#define SATP_MODE_32 _AC(0x80000000, UL) 47#define SATP_MODE_32 _AC(0x80000000, UL)
47#define SATP_MODE SATP_MODE_32 48#define SATP_MODE SATP_MODE_32
48#else 49#else
49#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) 50#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
50#define SATP_MODE_39 _AC(0x8000000000000000, UL) 51#define SATP_MODE_39 _AC(0x8000000000000000, UL)
51#define SATP_MODE SATP_MODE_39 52#define SATP_MODE SATP_MODE_39
52#endif 53#endif
53 54
54/* Interrupt Enable and Interrupt Pending flags */ 55/* SCAUSE */
55#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */ 56#define SCAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
56#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */ 57
57#define SIE_SEIE _AC(0x00000200, UL) /* External Interrupt Enable */ 58#define IRQ_U_SOFT 0
58 59#define IRQ_S_SOFT 1
59#define EXC_INST_MISALIGNED 0 60#define IRQ_M_SOFT 3
60#define EXC_INST_ACCESS 1 61#define IRQ_U_TIMER 4
61#define EXC_BREAKPOINT 3 62#define IRQ_S_TIMER 5
62#define EXC_LOAD_ACCESS 5 63#define IRQ_M_TIMER 7
63#define EXC_STORE_ACCESS 7 64#define IRQ_U_EXT 8
64#define EXC_SYSCALL 8 65#define IRQ_S_EXT 9
65#define EXC_INST_PAGE_FAULT 12 66#define IRQ_M_EXT 11
66#define EXC_LOAD_PAGE_FAULT 13 67
67#define EXC_STORE_PAGE_FAULT 15 68#define EXC_INST_MISALIGNED 0
69#define EXC_INST_ACCESS 1
70#define EXC_BREAKPOINT 3
71#define EXC_LOAD_ACCESS 5
72#define EXC_STORE_ACCESS 7
73#define EXC_SYSCALL 8
74#define EXC_INST_PAGE_FAULT 12
75#define EXC_LOAD_PAGE_FAULT 13
76#define EXC_STORE_PAGE_FAULT 15
77
78/* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */
79#define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
80#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
81#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
82
83#define CSR_CYCLE 0xc00
84#define CSR_TIME 0xc01
85#define CSR_INSTRET 0xc02
86#define CSR_SSTATUS 0x100
87#define CSR_SIE 0x104
88#define CSR_STVEC 0x105
89#define CSR_SCOUNTEREN 0x106
90#define CSR_SSCRATCH 0x140
91#define CSR_SEPC 0x141
92#define CSR_SCAUSE 0x142
93#define CSR_STVAL 0x143
94#define CSR_SIP 0x144
95#define CSR_SATP 0x180
96#define CSR_CYCLEH 0xc80
97#define CSR_TIMEH 0xc81
98#define CSR_INSTRETH 0xc82
68 99
69#ifndef __ASSEMBLY__ 100#ifndef __ASSEMBLY__
70 101
71#define csr_swap(csr, val) \ 102#define csr_swap(csr, val) \
72({ \ 103({ \
73 unsigned long __v = (unsigned long)(val); \ 104 unsigned long __v = (unsigned long)(val); \
74 __asm__ __volatile__ ("csrrw %0, " #csr ", %1" \ 105 __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
75 : "=r" (__v) : "rK" (__v) \ 106 : "=r" (__v) : "rK" (__v) \
76 : "memory"); \ 107 : "memory"); \
77 __v; \ 108 __v; \
@@ -80,7 +111,7 @@
80#define csr_read(csr) \ 111#define csr_read(csr) \
81({ \ 112({ \
82 register unsigned long __v; \ 113 register unsigned long __v; \
83 __asm__ __volatile__ ("csrr %0, " #csr \ 114 __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
84 : "=r" (__v) : \ 115 : "=r" (__v) : \
85 : "memory"); \ 116 : "memory"); \
86 __v; \ 117 __v; \
@@ -89,7 +120,7 @@
89#define csr_write(csr, val) \ 120#define csr_write(csr, val) \
90({ \ 121({ \
91 unsigned long __v = (unsigned long)(val); \ 122 unsigned long __v = (unsigned long)(val); \
92 __asm__ __volatile__ ("csrw " #csr ", %0" \ 123 __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
93 : : "rK" (__v) \ 124 : : "rK" (__v) \
94 : "memory"); \ 125 : "memory"); \
95}) 126})
@@ -97,7 +128,7 @@
97#define csr_read_set(csr, val) \ 128#define csr_read_set(csr, val) \
98({ \ 129({ \
99 unsigned long __v = (unsigned long)(val); \ 130 unsigned long __v = (unsigned long)(val); \
100 __asm__ __volatile__ ("csrrs %0, " #csr ", %1" \ 131 __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
101 : "=r" (__v) : "rK" (__v) \ 132 : "=r" (__v) : "rK" (__v) \
102 : "memory"); \ 133 : "memory"); \
103 __v; \ 134 __v; \
@@ -106,7 +137,7 @@
106#define csr_set(csr, val) \ 137#define csr_set(csr, val) \
107({ \ 138({ \
108 unsigned long __v = (unsigned long)(val); \ 139 unsigned long __v = (unsigned long)(val); \
109 __asm__ __volatile__ ("csrs " #csr ", %0" \ 140 __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
110 : : "rK" (__v) \ 141 : : "rK" (__v) \
111 : "memory"); \ 142 : "memory"); \
112}) 143})
@@ -114,7 +145,7 @@
114#define csr_read_clear(csr, val) \ 145#define csr_read_clear(csr, val) \
115({ \ 146({ \
116 unsigned long __v = (unsigned long)(val); \ 147 unsigned long __v = (unsigned long)(val); \
117 __asm__ __volatile__ ("csrrc %0, " #csr ", %1" \ 148 __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
118 : "=r" (__v) : "rK" (__v) \ 149 : "=r" (__v) : "rK" (__v) \
119 : "memory"); \ 150 : "memory"); \
120 __v; \ 151 __v; \
@@ -123,7 +154,7 @@
123#define csr_clear(csr, val) \ 154#define csr_clear(csr, val) \
124({ \ 155({ \
125 unsigned long __v = (unsigned long)(val); \ 156 unsigned long __v = (unsigned long)(val); \
126 __asm__ __volatile__ ("csrc " #csr ", %0" \ 157 __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
127 : : "rK" (__v) \ 158 : : "rK" (__v) \
128 : "memory"); \ 159 : "memory"); \
129}) 160})
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 697fc23b0d5a..ce0cd7d77eb0 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -27,13 +27,7 @@
27#define ELF_CLASS ELFCLASS32 27#define ELF_CLASS ELFCLASS32
28#endif 28#endif
29 29
30#if defined(__LITTLE_ENDIAN)
31#define ELF_DATA ELFDATA2LSB 30#define ELF_DATA ELFDATA2LSB
32#elif defined(__BIG_ENDIAN)
33#define ELF_DATA ELFDATA2MSB
34#else
35#error "Unknown endianness"
36#endif
37 31
38/* 32/*
39 * This is used to ensure we don't load something for the wrong architecture. 33 * This is used to ensure we don't load something for the wrong architecture.
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index 66641624d8a5..4ad6409c4647 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -7,18 +7,6 @@
7#ifndef _ASM_FUTEX_H 7#ifndef _ASM_FUTEX_H
8#define _ASM_FUTEX_H 8#define _ASM_FUTEX_H
9 9
10#ifndef CONFIG_RISCV_ISA_A
11/*
12 * Use the generic interrupt disabling versions if the A extension
13 * is not supported.
14 */
15#ifdef CONFIG_SMP
16#error "Can't support generic futex calls without A extension on SMP"
17#endif
18#include <asm-generic/futex.h>
19
20#else /* CONFIG_RISCV_ISA_A */
21
22#include <linux/futex.h> 10#include <linux/futex.h>
23#include <linux/uaccess.h> 11#include <linux/uaccess.h>
24#include <linux/errno.h> 12#include <linux/errno.h>
@@ -124,5 +112,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
124 return ret; 112 return ret;
125} 113}
126 114
127#endif /* CONFIG_RISCV_ISA_A */
128#endif /* _ASM_FUTEX_H */ 115#endif /* _ASM_FUTEX_H */
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 07a3c6d5706f..1a69b3bcd371 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -21,25 +21,25 @@
21/* read interrupt enabled status */ 21/* read interrupt enabled status */
22static inline unsigned long arch_local_save_flags(void) 22static inline unsigned long arch_local_save_flags(void)
23{ 23{
24 return csr_read(sstatus); 24 return csr_read(CSR_SSTATUS);
25} 25}
26 26
27/* unconditionally enable interrupts */ 27/* unconditionally enable interrupts */
28static inline void arch_local_irq_enable(void) 28static inline void arch_local_irq_enable(void)
29{ 29{
30 csr_set(sstatus, SR_SIE); 30 csr_set(CSR_SSTATUS, SR_SIE);
31} 31}
32 32
33/* unconditionally disable interrupts */ 33/* unconditionally disable interrupts */
34static inline void arch_local_irq_disable(void) 34static inline void arch_local_irq_disable(void)
35{ 35{
36 csr_clear(sstatus, SR_SIE); 36 csr_clear(CSR_SSTATUS, SR_SIE);
37} 37}
38 38
39/* get status and disable interrupts */ 39/* get status and disable interrupts */
40static inline unsigned long arch_local_irq_save(void) 40static inline unsigned long arch_local_irq_save(void)
41{ 41{
42 return csr_read_clear(sstatus, SR_SIE); 42 return csr_read_clear(CSR_SSTATUS, SR_SIE);
43} 43}
44 44
45/* test flags */ 45/* test flags */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
57/* set interrupt enabled status */ 57/* set interrupt enabled status */
58static inline void arch_local_irq_restore(unsigned long flags) 58static inline void arch_local_irq_restore(unsigned long flags)
59{ 59{
60 csr_set(sstatus, flags & SR_SIE); 60 csr_set(CSR_SSTATUS, flags & SR_SIE);
61} 61}
62 62
63#endif /* _ASM_RISCV_IRQFLAGS_H */ 63#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 336d60ec5698..bf4f097a9051 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -20,8 +20,6 @@
20 20
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <asm/tlbflush.h>
24#include <asm/cacheflush.h>
25 23
26static inline void enter_lazy_tlb(struct mm_struct *mm, 24static inline void enter_lazy_tlb(struct mm_struct *mm,
27 struct task_struct *task) 25 struct task_struct *task)
@@ -39,61 +37,8 @@ static inline void destroy_context(struct mm_struct *mm)
39{ 37{
40} 38}
41 39
42/* 40void switch_mm(struct mm_struct *prev, struct mm_struct *next,
43 * When necessary, performs a deferred icache flush for the given MM context, 41 struct task_struct *task);
44 * on the local CPU. RISC-V has no direct mechanism for instruction cache
45 * shoot downs, so instead we send an IPI that informs the remote harts they
46 * need to flush their local instruction caches. To avoid pathologically slow
47 * behavior in a common case (a bunch of single-hart processes on a many-hart
48 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
49 * executing a MM context and instead schedule a deferred local instruction
50 * cache flush to be performed before execution resumes on each hart. This
51 * actually performs that local instruction cache flush, which implicitly only
52 * refers to the current hart.
53 */
54static inline void flush_icache_deferred(struct mm_struct *mm)
55{
56#ifdef CONFIG_SMP
57 unsigned int cpu = smp_processor_id();
58 cpumask_t *mask = &mm->context.icache_stale_mask;
59
60 if (cpumask_test_cpu(cpu, mask)) {
61 cpumask_clear_cpu(cpu, mask);
62 /*
63 * Ensure the remote hart's writes are visible to this hart.
64 * This pairs with a barrier in flush_icache_mm.
65 */
66 smp_mb();
67 local_flush_icache_all();
68 }
69#endif
70}
71
72static inline void switch_mm(struct mm_struct *prev,
73 struct mm_struct *next, struct task_struct *task)
74{
75 if (likely(prev != next)) {
76 /*
77 * Mark the current MM context as inactive, and the next as
78 * active. This is at least used by the icache flushing
79 * routines in order to determine who should
80 */
81 unsigned int cpu = smp_processor_id();
82
83 cpumask_clear_cpu(cpu, mm_cpumask(prev));
84 cpumask_set_cpu(cpu, mm_cpumask(next));
85
86 /*
87 * Use the old spbtr name instead of using the current satp
88 * name to support binutils 2.29 which doesn't know about the
89 * privileged ISA 1.10 yet.
90 */
91 csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
92 local_flush_tlb_all();
93
94 flush_icache_deferred(next);
95 }
96}
97 42
98static inline void activate_mm(struct mm_struct *prev, 43static inline void activate_mm(struct mm_struct *prev,
99 struct mm_struct *next) 44 struct mm_struct *next)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index d35ec2f41381..9c867a4bac83 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -70,47 +70,38 @@ struct pt_regs {
70 70
71 71
72/* Helpers for working with the instruction pointer */ 72/* Helpers for working with the instruction pointer */
73#define GET_IP(regs) ((regs)->sepc)
74#define SET_IP(regs, val) (GET_IP(regs) = (val))
75
76static inline unsigned long instruction_pointer(struct pt_regs *regs) 73static inline unsigned long instruction_pointer(struct pt_regs *regs)
77{ 74{
78 return GET_IP(regs); 75 return regs->sepc;
79} 76}
80static inline void instruction_pointer_set(struct pt_regs *regs, 77static inline void instruction_pointer_set(struct pt_regs *regs,
81 unsigned long val) 78 unsigned long val)
82{ 79{
83 SET_IP(regs, val); 80 regs->sepc = val;
84} 81}
85 82
86#define profile_pc(regs) instruction_pointer(regs) 83#define profile_pc(regs) instruction_pointer(regs)
87 84
88/* Helpers for working with the user stack pointer */ 85/* Helpers for working with the user stack pointer */
89#define GET_USP(regs) ((regs)->sp)
90#define SET_USP(regs, val) (GET_USP(regs) = (val))
91
92static inline unsigned long user_stack_pointer(struct pt_regs *regs) 86static inline unsigned long user_stack_pointer(struct pt_regs *regs)
93{ 87{
94 return GET_USP(regs); 88 return regs->sp;
95} 89}
96static inline void user_stack_pointer_set(struct pt_regs *regs, 90static inline void user_stack_pointer_set(struct pt_regs *regs,
97 unsigned long val) 91 unsigned long val)
98{ 92{
99 SET_USP(regs, val); 93 regs->sp = val;
100} 94}
101 95
102/* Helpers for working with the frame pointer */ 96/* Helpers for working with the frame pointer */
103#define GET_FP(regs) ((regs)->s0)
104#define SET_FP(regs, val) (GET_FP(regs) = (val))
105
106static inline unsigned long frame_pointer(struct pt_regs *regs) 97static inline unsigned long frame_pointer(struct pt_regs *regs)
107{ 98{
108 return GET_FP(regs); 99 return regs->s0;
109} 100}
110static inline void frame_pointer_set(struct pt_regs *regs, 101static inline void frame_pointer_set(struct pt_regs *regs,
111 unsigned long val) 102 unsigned long val)
112{ 103{
113 SET_FP(regs, val); 104 regs->s0 = val;
114} 105}
115 106
116static inline unsigned long regs_return_value(struct pt_regs *regs) 107static inline unsigned long regs_return_value(struct pt_regs *regs)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index b6bb10b92fe2..19f231615510 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -26,22 +26,27 @@
26#define SBI_REMOTE_SFENCE_VMA_ASID 7 26#define SBI_REMOTE_SFENCE_VMA_ASID 7
27#define SBI_SHUTDOWN 8 27#define SBI_SHUTDOWN 8
28 28
29#define SBI_CALL(which, arg0, arg1, arg2) ({ \ 29#define SBI_CALL(which, arg0, arg1, arg2, arg3) ({ \
30 register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \ 30 register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
31 register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); \ 31 register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); \
32 register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); \ 32 register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); \
33 register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); \
33 register uintptr_t a7 asm ("a7") = (uintptr_t)(which); \ 34 register uintptr_t a7 asm ("a7") = (uintptr_t)(which); \
34 asm volatile ("ecall" \ 35 asm volatile ("ecall" \
35 : "+r" (a0) \ 36 : "+r" (a0) \
36 : "r" (a1), "r" (a2), "r" (a7) \ 37 : "r" (a1), "r" (a2), "r" (a3), "r" (a7) \
37 : "memory"); \ 38 : "memory"); \
38 a0; \ 39 a0; \
39}) 40})
40 41
41/* Lazy implementations until SBI is finalized */ 42/* Lazy implementations until SBI is finalized */
42#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0) 43#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0, 0)
43#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0) 44#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0, 0)
44#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0) 45#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0, 0)
46#define SBI_CALL_3(which, arg0, arg1, arg2) \
47 SBI_CALL(which, arg0, arg1, arg2, 0)
48#define SBI_CALL_4(which, arg0, arg1, arg2, arg3) \
49 SBI_CALL(which, arg0, arg1, arg2, arg3)
45 50
46static inline void sbi_console_putchar(int ch) 51static inline void sbi_console_putchar(int ch)
47{ 52{
@@ -86,7 +91,7 @@ static inline void sbi_remote_sfence_vma(const unsigned long *hart_mask,
86 unsigned long start, 91 unsigned long start,
87 unsigned long size) 92 unsigned long size)
88{ 93{
89 SBI_CALL_1(SBI_REMOTE_SFENCE_VMA, hart_mask); 94 SBI_CALL_3(SBI_REMOTE_SFENCE_VMA, hart_mask, start, size);
90} 95}
91 96
92static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, 97static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
@@ -94,7 +99,7 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
94 unsigned long size, 99 unsigned long size,
95 unsigned long asid) 100 unsigned long asid)
96{ 101{
97 SBI_CALL_1(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask); 102 SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
98} 103}
99 104
100#endif 105#endif
diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/arch/riscv/include/asm/sifive_l2_cache.h
new file mode 100644
index 000000000000..04f6748fc50b
--- /dev/null
+++ b/arch/riscv/include/asm/sifive_l2_cache.h
@@ -0,0 +1,16 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * SiFive L2 Cache Controller header file
4 *
5 */
6
7#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
8#define _ASM_RISCV_SIFIVE_L2_CACHE_H
9
10extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
11extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
12
13#define SIFIVE_L2_ERR_TYPE_CE 0
14#define SIFIVE_L2_ERR_TYPE_UE 1
15
16#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 1c9cc8389928..9c039870019b 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -28,7 +28,9 @@
28#include <asm/processor.h> 28#include <asm/processor.h>
29#include <asm/csr.h> 29#include <asm/csr.h>
30 30
31typedef unsigned long mm_segment_t; 31typedef struct {
32 unsigned long seg;
33} mm_segment_t;
32 34
33/* 35/*
34 * low level task data that entry.S needs immediate access to 36 * low level task data that entry.S needs immediate access to
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index fb53a8089e76..b26f407be5c8 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -23,6 +23,7 @@
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/thread_info.h> 24#include <linux/thread_info.h>
25#include <asm/byteorder.h> 25#include <asm/byteorder.h>
26#include <asm/extable.h>
26#include <asm/asm.h> 27#include <asm/asm.h>
27 28
28#define __enable_user_access() \ 29#define __enable_user_access() \
@@ -38,8 +39,10 @@
38 * For historical reasons, these macros are grossly misnamed. 39 * For historical reasons, these macros are grossly misnamed.
39 */ 40 */
40 41
41#define KERNEL_DS (~0UL) 42#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
42#define USER_DS (TASK_SIZE) 43
44#define KERNEL_DS MAKE_MM_SEG(~0UL)
45#define USER_DS MAKE_MM_SEG(TASK_SIZE)
43 46
44#define get_fs() (current_thread_info()->addr_limit) 47#define get_fs() (current_thread_info()->addr_limit)
45 48
@@ -48,9 +51,9 @@ static inline void set_fs(mm_segment_t fs)
48 current_thread_info()->addr_limit = fs; 51 current_thread_info()->addr_limit = fs;
49} 52}
50 53
51#define segment_eq(a, b) ((a) == (b)) 54#define segment_eq(a, b) ((a).seg == (b).seg)
52 55
53#define user_addr_max() (get_fs()) 56#define user_addr_max() (get_fs().seg)
54 57
55 58
56/** 59/**
@@ -82,7 +85,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
82{ 85{
83 const mm_segment_t fs = get_fs(); 86 const mm_segment_t fs = get_fs();
84 87
85 return (size <= fs) && (addr <= (fs - size)); 88 return size <= fs.seg && addr <= fs.seg - size;
86} 89}
87 90
88/* 91/*
@@ -98,21 +101,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
98 * on our cache or tlb entries. 101 * on our cache or tlb entries.
99 */ 102 */
100 103
101struct exception_table_entry {
102 unsigned long insn, fixup;
103};
104
105extern int fixup_exception(struct pt_regs *state);
106
107#if defined(__LITTLE_ENDIAN)
108#define __MSW 1
109#define __LSW 0 104#define __LSW 0
110#elif defined(__BIG_ENDIAN) 105#define __MSW 1
111#define __MSW 0
112#define __LSW 1
113#else
114#error "Unknown endianness"
115#endif
116 106
117/* 107/*
118 * The "__xxx" versions of the user access functions do not verify the address 108 * The "__xxx" versions of the user access functions do not verify the address
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index dac98348c6a3..578bb5efc085 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -312,9 +312,6 @@ void asm_offsets(void)
312 - offsetof(struct task_struct, thread.fstate.f[0]) 312 - offsetof(struct task_struct, thread.fstate.f[0])
313 ); 313 );
314 314
315 /* The assembler needs access to THREAD_SIZE as well. */
316 DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
317
318 /* 315 /*
319 * We allocate a pt_regs on the stack when entering the kernel. This 316 * We allocate a pt_regs on the stack when entering the kernel. This
320 * ensures the alignment is sane. 317 * ensures the alignment is sane.
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index cf2fca12414a..c8d2a3223099 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -136,8 +136,7 @@ static void c_stop(struct seq_file *m, void *v)
136static int c_show(struct seq_file *m, void *v) 136static int c_show(struct seq_file *m, void *v)
137{ 137{
138 unsigned long cpu_id = (unsigned long)v - 1; 138 unsigned long cpu_id = (unsigned long)v - 1;
139 struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id), 139 struct device_node *node = of_get_cpu_node(cpu_id, NULL);
140 NULL);
141 const char *compat, *isa, *mmu; 140 const char *compat, *isa, *mmu;
142 141
143 seq_printf(m, "processor\t: %lu\n", cpu_id); 142 seq_printf(m, "processor\t: %lu\n", cpu_id);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fd9b57c8b4ce..1c1ecc238cfa 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -37,11 +37,11 @@
37 * the kernel thread pointer. If we came from the kernel, sscratch 37 * the kernel thread pointer. If we came from the kernel, sscratch
38 * will contain 0, and we should continue on the current TP. 38 * will contain 0, and we should continue on the current TP.
39 */ 39 */
40 csrrw tp, sscratch, tp 40 csrrw tp, CSR_SSCRATCH, tp
41 bnez tp, _save_context 41 bnez tp, _save_context
42 42
43_restore_kernel_tpsp: 43_restore_kernel_tpsp:
44 csrr tp, sscratch 44 csrr tp, CSR_SSCRATCH
45 REG_S sp, TASK_TI_KERNEL_SP(tp) 45 REG_S sp, TASK_TI_KERNEL_SP(tp)
46_save_context: 46_save_context:
47 REG_S sp, TASK_TI_USER_SP(tp) 47 REG_S sp, TASK_TI_USER_SP(tp)
@@ -87,11 +87,11 @@ _save_context:
87 li t0, SR_SUM | SR_FS 87 li t0, SR_SUM | SR_FS
88 88
89 REG_L s0, TASK_TI_USER_SP(tp) 89 REG_L s0, TASK_TI_USER_SP(tp)
90 csrrc s1, sstatus, t0 90 csrrc s1, CSR_SSTATUS, t0
91 csrr s2, sepc 91 csrr s2, CSR_SEPC
92 csrr s3, sbadaddr 92 csrr s3, CSR_STVAL
93 csrr s4, scause 93 csrr s4, CSR_SCAUSE
94 csrr s5, sscratch 94 csrr s5, CSR_SSCRATCH
95 REG_S s0, PT_SP(sp) 95 REG_S s0, PT_SP(sp)
96 REG_S s1, PT_SSTATUS(sp) 96 REG_S s1, PT_SSTATUS(sp)
97 REG_S s2, PT_SEPC(sp) 97 REG_S s2, PT_SEPC(sp)
@@ -107,8 +107,8 @@ _save_context:
107 .macro RESTORE_ALL 107 .macro RESTORE_ALL
108 REG_L a0, PT_SSTATUS(sp) 108 REG_L a0, PT_SSTATUS(sp)
109 REG_L a2, PT_SEPC(sp) 109 REG_L a2, PT_SEPC(sp)
110 csrw sstatus, a0 110 csrw CSR_SSTATUS, a0
111 csrw sepc, a2 111 csrw CSR_SEPC, a2
112 112
113 REG_L x1, PT_RA(sp) 113 REG_L x1, PT_RA(sp)
114 REG_L x3, PT_GP(sp) 114 REG_L x3, PT_GP(sp)
@@ -155,7 +155,7 @@ ENTRY(handle_exception)
155 * Set sscratch register to 0, so that if a recursive exception 155 * Set sscratch register to 0, so that if a recursive exception
156 * occurs, the exception vector knows it came from the kernel 156 * occurs, the exception vector knows it came from the kernel
157 */ 157 */
158 csrw sscratch, x0 158 csrw CSR_SSCRATCH, x0
159 159
160 /* Load the global pointer */ 160 /* Load the global pointer */
161.option push 161.option push
@@ -248,7 +248,7 @@ resume_userspace:
248 * Save TP into sscratch, so we can find the kernel data structures 248 * Save TP into sscratch, so we can find the kernel data structures
249 * again. 249 * again.
250 */ 250 */
251 csrw sscratch, tp 251 csrw CSR_SSCRATCH, tp
252 252
253restore_all: 253restore_all:
254 RESTORE_ALL 254 RESTORE_ALL
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index fe884cd69abd..370c66ce187a 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -23,7 +23,8 @@
23__INIT 23__INIT
24ENTRY(_start) 24ENTRY(_start)
25 /* Mask all interrupts */ 25 /* Mask all interrupts */
26 csrw sie, zero 26 csrw CSR_SIE, zero
27 csrw CSR_SIP, zero
27 28
28 /* Load the global pointer */ 29 /* Load the global pointer */
29.option push 30.option push
@@ -68,14 +69,10 @@ clear_bss_done:
68 /* Restore C environment */ 69 /* Restore C environment */
69 la tp, init_task 70 la tp, init_task
70 sw zero, TASK_TI_CPU(tp) 71 sw zero, TASK_TI_CPU(tp)
71 72 la sp, init_thread_union + THREAD_SIZE
72 la sp, init_thread_union
73 li a0, ASM_THREAD_SIZE
74 add sp, sp, a0
75 73
76 /* Start the kernel */ 74 /* Start the kernel */
77 mv a0, s0 75 mv a0, s1
78 mv a1, s1
79 call parse_dtb 76 call parse_dtb
80 tail start_kernel 77 tail start_kernel
81 78
@@ -89,7 +86,7 @@ relocate:
89 /* Point stvec to virtual address of intruction after satp write */ 86 /* Point stvec to virtual address of intruction after satp write */
90 la a0, 1f 87 la a0, 1f
91 add a0, a0, a1 88 add a0, a0, a1
92 csrw stvec, a0 89 csrw CSR_STVEC, a0
93 90
94 /* Compute satp for kernel page tables, but don't load it yet */ 91 /* Compute satp for kernel page tables, but don't load it yet */
95 la a2, swapper_pg_dir 92 la a2, swapper_pg_dir
@@ -99,18 +96,20 @@ relocate:
99 96
100 /* 97 /*
101 * Load trampoline page directory, which will cause us to trap to 98 * Load trampoline page directory, which will cause us to trap to
102 * stvec if VA != PA, or simply fall through if VA == PA 99 * stvec if VA != PA, or simply fall through if VA == PA. We need a
100 * full fence here because setup_vm() just wrote these PTEs and we need
101 * to ensure the new translations are in use.
103 */ 102 */
104 la a0, trampoline_pg_dir 103 la a0, trampoline_pg_dir
105 srl a0, a0, PAGE_SHIFT 104 srl a0, a0, PAGE_SHIFT
106 or a0, a0, a1 105 or a0, a0, a1
107 sfence.vma 106 sfence.vma
108 csrw sptbr, a0 107 csrw CSR_SATP, a0
109.align 2 108.align 2
1101: 1091:
111 /* Set trap vector to spin forever to help debug */ 110 /* Set trap vector to spin forever to help debug */
112 la a0, .Lsecondary_park 111 la a0, .Lsecondary_park
113 csrw stvec, a0 112 csrw CSR_STVEC, a0
114 113
115 /* Reload the global pointer */ 114 /* Reload the global pointer */
116.option push 115.option push
@@ -118,8 +117,14 @@ relocate:
118 la gp, __global_pointer$ 117 la gp, __global_pointer$
119.option pop 118.option pop
120 119
121 /* Switch to kernel page tables */ 120 /*
122 csrw sptbr, a2 121 * Switch to kernel page tables. A full fence is necessary in order to
122 * avoid using the trampoline translations, which are only correct for
123 * the first superpage. Fetching the fence is guarnteed to work
124 * because that first superpage is translated the same way.
125 */
126 csrw CSR_SATP, a2
127 sfence.vma
123 128
124 ret 129 ret
125 130
@@ -130,7 +135,7 @@ relocate:
130 135
131 /* Set trap vector to spin forever to help debug */ 136 /* Set trap vector to spin forever to help debug */
132 la a3, .Lsecondary_park 137 la a3, .Lsecondary_park
133 csrw stvec, a3 138 csrw CSR_STVEC, a3
134 139
135 slli a3, a0, LGREG 140 slli a3, a0, LGREG
136 la a1, __cpu_up_stack_pointer 141 la a1, __cpu_up_stack_pointer
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 48e6b7db83a1..6d8659388c49 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -14,17 +14,9 @@
14/* 14/*
15 * Possible interrupt causes: 15 * Possible interrupt causes:
16 */ 16 */
17#define INTERRUPT_CAUSE_SOFTWARE 1 17#define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
18#define INTERRUPT_CAUSE_TIMER 5 18#define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
19#define INTERRUPT_CAUSE_EXTERNAL 9 19#define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
20
21/*
22 * The high order bit of the trap cause register is always set for
23 * interrupts, which allows us to differentiate them from exceptions
24 * quickly. The INTERRUPT_CAUSE_* macros don't contain that bit, so we
25 * need to mask it off.
26 */
27#define INTERRUPT_CAUSE_FLAG (1UL << (__riscv_xlen - 1))
28 20
29int arch_show_interrupts(struct seq_file *p, int prec) 21int arch_show_interrupts(struct seq_file *p, int prec)
30{ 22{
@@ -37,7 +29,7 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
37 struct pt_regs *old_regs = set_irq_regs(regs); 29 struct pt_regs *old_regs = set_irq_regs(regs);
38 30
39 irq_enter(); 31 irq_enter();
40 switch (regs->scause & ~INTERRUPT_CAUSE_FLAG) { 32 switch (regs->scause & ~SCAUSE_IRQ_FLAG) {
41 case INTERRUPT_CAUSE_TIMER: 33 case INTERRUPT_CAUSE_TIMER:
42 riscv_timer_interrupt(); 34 riscv_timer_interrupt();
43 break; 35 break;
@@ -54,7 +46,8 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
54 handle_arch_irq(regs); 46 handle_arch_irq(regs);
55 break; 47 break;
56 default: 48 default:
57 panic("unexpected interrupt cause"); 49 pr_alert("unexpected interrupt cause 0x%lx", regs->scause);
50 BUG();
58 } 51 }
59 irq_exit(); 52 irq_exit();
60 53
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
index 667ee70defea..91626d9ae5f2 100644
--- a/arch/riscv/kernel/perf_event.c
+++ b/arch/riscv/kernel/perf_event.c
@@ -185,10 +185,10 @@ static inline u64 read_counter(int idx)
185 185
186 switch (idx) { 186 switch (idx) {
187 case RISCV_PMU_CYCLE: 187 case RISCV_PMU_CYCLE:
188 val = csr_read(cycle); 188 val = csr_read(CSR_CYCLE);
189 break; 189 break;
190 case RISCV_PMU_INSTRET: 190 case RISCV_PMU_INSTRET:
191 val = csr_read(instret); 191 val = csr_read(CSR_INSTRET);
192 break; 192 break;
193 default: 193 default:
194 WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS); 194 WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
index 2a53d26ffdd6..ed637aee514b 100644
--- a/arch/riscv/kernel/reset.c
+++ b/arch/riscv/kernel/reset.c
@@ -12,11 +12,15 @@
12 */ 12 */
13 13
14#include <linux/reboot.h> 14#include <linux/reboot.h>
15#include <linux/export.h>
16#include <asm/sbi.h> 15#include <asm/sbi.h>
17 16
18void (*pm_power_off)(void) = machine_power_off; 17static void default_power_off(void)
19EXPORT_SYMBOL(pm_power_off); 18{
19 sbi_shutdown();
20 while (1);
21}
22
23void (*pm_power_off)(void) = default_power_off;
20 24
21void machine_restart(char *cmd) 25void machine_restart(char *cmd)
22{ 26{
@@ -26,11 +30,10 @@ void machine_restart(char *cmd)
26 30
27void machine_halt(void) 31void machine_halt(void)
28{ 32{
29 machine_power_off(); 33 pm_power_off();
30} 34}
31 35
32void machine_power_off(void) 36void machine_power_off(void)
33{ 37{
34 sbi_shutdown(); 38 pm_power_off();
35 while (1);
36} 39}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 540a331d1376..d93bcce004e3 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -52,9 +52,11 @@ struct screen_info screen_info = {
52atomic_t hart_lottery; 52atomic_t hart_lottery;
53unsigned long boot_cpu_hartid; 53unsigned long boot_cpu_hartid;
54 54
55void __init parse_dtb(unsigned int hartid, void *dtb) 55void __init parse_dtb(phys_addr_t dtb_phys)
56{ 56{
57 if (early_init_dt_scan(__va(dtb))) 57 void *dtb = __va(dtb_phys);
58
59 if (early_init_dt_scan(dtb))
58 return; 60 return;
59 61
60 pr_err("No DTB passed to the kernel\n"); 62 pr_err("No DTB passed to the kernel\n");
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 837e1646091a..804d6ee4f3c5 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -234,6 +234,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
234 234
235 /* Are we from a system call? */ 235 /* Are we from a system call? */
236 if (regs->scause == EXC_SYSCALL) { 236 if (regs->scause == EXC_SYSCALL) {
237 /* Avoid additional syscall restarting via ret_from_exception */
238 regs->scause = -1UL;
239
237 /* If so, check system call restarting.. */ 240 /* If so, check system call restarting.. */
238 switch (regs->a0) { 241 switch (regs->a0) {
239 case -ERESTART_RESTARTBLOCK: 242 case -ERESTART_RESTARTBLOCK:
@@ -272,6 +275,9 @@ static void do_signal(struct pt_regs *regs)
272 275
273 /* Did we come from a system call? */ 276 /* Did we come from a system call? */
274 if (regs->scause == EXC_SYSCALL) { 277 if (regs->scause == EXC_SYSCALL) {
278 /* Avoid additional syscall restarting via ret_from_exception */
279 regs->scause = -1UL;
280
275 /* Restart the system call - no handlers present */ 281 /* Restart the system call - no handlers present */
276 switch (regs->a0) { 282 switch (regs->a0) {
277 case -ERESTARTNOHAND: 283 case -ERESTARTNOHAND:
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 0c41d07ec281..b2537ffa855c 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -42,7 +42,7 @@ unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
42 42
43void __init smp_setup_processor_id(void) 43void __init smp_setup_processor_id(void)
44{ 44{
45 cpuid_to_hartid_map(0) = boot_cpu_hartid; 45 cpuid_to_hartid_map(0) = boot_cpu_hartid;
46} 46}
47 47
48/* A collection of single bit ipi messages. */ 48/* A collection of single bit ipi messages. */
@@ -53,7 +53,7 @@ static struct {
53 53
54int riscv_hartid_to_cpuid(int hartid) 54int riscv_hartid_to_cpuid(int hartid)
55{ 55{
56 int i = -1; 56 int i;
57 57
58 for (i = 0; i < NR_CPUS; i++) 58 for (i = 0; i < NR_CPUS; i++)
59 if (cpuid_to_hartid_map(i) == hartid) 59 if (cpuid_to_hartid_map(i) == hartid)
@@ -70,6 +70,12 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
70 for_each_cpu(cpu, in) 70 for_each_cpu(cpu, in)
71 cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); 71 cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
72} 72}
73
74bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
75{
76 return phys_id == cpuid_to_hartid_map(cpu);
77}
78
73/* Unsupported */ 79/* Unsupported */
74int setup_profiling_timer(unsigned int multiplier) 80int setup_profiling_timer(unsigned int multiplier)
75{ 81{
@@ -89,7 +95,7 @@ void riscv_software_interrupt(void)
89 unsigned long *stats = ipi_data[smp_processor_id()].stats; 95 unsigned long *stats = ipi_data[smp_processor_id()].stats;
90 96
91 /* Clear pending IPI */ 97 /* Clear pending IPI */
92 csr_clear(sip, SIE_SSIE); 98 csr_clear(CSR_SIP, SIE_SSIE);
93 99
94 while (true) { 100 while (true) {
95 unsigned long ops; 101 unsigned long ops;
@@ -199,52 +205,3 @@ void smp_send_reschedule(int cpu)
199 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 205 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
200} 206}
201 207
202/*
203 * Performs an icache flush for the given MM context. RISC-V has no direct
204 * mechanism for instruction cache shoot downs, so instead we send an IPI that
205 * informs the remote harts they need to flush their local instruction caches.
206 * To avoid pathologically slow behavior in a common case (a bunch of
207 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
208 * IPIs for harts that are not currently executing a MM context and instead
209 * schedule a deferred local instruction cache flush to be performed before
210 * execution resumes on each hart.
211 */
212void flush_icache_mm(struct mm_struct *mm, bool local)
213{
214 unsigned int cpu;
215 cpumask_t others, hmask, *mask;
216
217 preempt_disable();
218
219 /* Mark every hart's icache as needing a flush for this MM. */
220 mask = &mm->context.icache_stale_mask;
221 cpumask_setall(mask);
222 /* Flush this hart's I$ now, and mark it as flushed. */
223 cpu = smp_processor_id();
224 cpumask_clear_cpu(cpu, mask);
225 local_flush_icache_all();
226
227 /*
228 * Flush the I$ of other harts concurrently executing, and mark them as
229 * flushed.
230 */
231 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
232 local |= cpumask_empty(&others);
233 if (mm != current->active_mm || !local) {
234 cpumask_clear(&hmask);
235 riscv_cpuid_to_hartid_mask(&others, &hmask);
236 sbi_remote_fence_i(hmask.bits);
237 } else {
238 /*
239 * It's assumed that at least one strongly ordered operation is
240 * performed on this hart between setting a hart's cpumask bit
241 * and scheduling this MM context on that hart. Sending an SBI
242 * remote message will do this, but in the case where no
243 * messages are sent we still need to order this hart's writes
244 * with flush_icache_deferred().
245 */
246 smp_mb();
247 }
248
249 preempt_enable();
250}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index eb533b5c2c8c..7a0b62252524 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -47,6 +47,17 @@ void __init smp_prepare_boot_cpu(void)
47 47
48void __init smp_prepare_cpus(unsigned int max_cpus) 48void __init smp_prepare_cpus(unsigned int max_cpus)
49{ 49{
50 int cpuid;
51
52 /* This covers non-smp usecase mandated by "nosmp" option */
53 if (max_cpus == 0)
54 return;
55
56 for_each_possible_cpu(cpuid) {
57 if (cpuid == smp_processor_id())
58 continue;
59 set_cpu_present(cpuid, true);
60 }
50} 61}
51 62
52void __init setup_smp(void) 63void __init setup_smp(void)
@@ -73,12 +84,19 @@ void __init setup_smp(void)
73 } 84 }
74 85
75 cpuid_to_hartid_map(cpuid) = hart; 86 cpuid_to_hartid_map(cpuid) = hart;
76 set_cpu_possible(cpuid, true);
77 set_cpu_present(cpuid, true);
78 cpuid++; 87 cpuid++;
79 } 88 }
80 89
81 BUG_ON(!found_boot_cpu); 90 BUG_ON(!found_boot_cpu);
91
92 if (cpuid > nr_cpu_ids)
93 pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
94 cpuid, nr_cpu_ids);
95
96 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
97 if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
98 set_cpu_possible(cpuid, true);
99 }
82} 100}
83 101
84int __cpu_up(unsigned int cpu, struct task_struct *tidle) 102int __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 4d403274c2e8..e80a5e8da119 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -33,9 +33,9 @@ static void notrace walk_stackframe(struct task_struct *task,
33 unsigned long fp, sp, pc; 33 unsigned long fp, sp, pc;
34 34
35 if (regs) { 35 if (regs) {
36 fp = GET_FP(regs); 36 fp = frame_pointer(regs);
37 sp = GET_USP(regs); 37 sp = user_stack_pointer(regs);
38 pc = GET_IP(regs); 38 pc = instruction_pointer(regs);
39 } else if (task == NULL || task == current) { 39 } else if (task == NULL || task == current) {
40 const register unsigned long current_sp __asm__ ("sp"); 40 const register unsigned long current_sp __asm__ ("sp");
41 fp = (unsigned long)__builtin_frame_address(0); 41 fp = (unsigned long)__builtin_frame_address(0);
@@ -64,12 +64,8 @@ static void notrace walk_stackframe(struct task_struct *task,
64 frame = (struct stackframe *)fp - 1; 64 frame = (struct stackframe *)fp - 1;
65 sp = fp; 65 sp = fp;
66 fp = frame->fp; 66 fp = frame->fp;
67#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
68 pc = ftrace_graph_ret_addr(current, NULL, frame->ra, 67 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
69 (unsigned long *)(fp - 8)); 68 (unsigned long *)(fp - 8));
70#else
71 pc = frame->ra - 0x4;
72#endif
73 } 69 }
74} 70}
75 71
@@ -82,8 +78,8 @@ static void notrace walk_stackframe(struct task_struct *task,
82 unsigned long *ksp; 78 unsigned long *ksp;
83 79
84 if (regs) { 80 if (regs) {
85 sp = GET_USP(regs); 81 sp = user_stack_pointer(regs);
86 pc = GET_IP(regs); 82 pc = instruction_pointer(regs);
87 } else if (task == NULL || task == current) { 83 } else if (task == NULL || task == current) {
88 const register unsigned long current_sp __asm__ ("sp"); 84 const register unsigned long current_sp __asm__ ("sp");
89 sp = current_sp; 85 sp = current_sp;
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 24a9333dda2c..3d1a651dc54c 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -70,7 +70,7 @@ void do_trap(struct pt_regs *regs, int signo, int code,
70 && printk_ratelimit()) { 70 && printk_ratelimit()) {
71 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT, 71 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
72 tsk->comm, task_pid_nr(tsk), signo, code, addr); 72 tsk->comm, task_pid_nr(tsk), signo, code, addr);
73 print_vma_addr(KERN_CONT " in ", GET_IP(regs)); 73 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
74 pr_cont("\n"); 74 pr_cont("\n");
75 show_regs(regs); 75 show_regs(regs);
76 } 76 }
@@ -118,6 +118,17 @@ DO_ERROR_INFO(do_trap_ecall_s,
118DO_ERROR_INFO(do_trap_ecall_m, 118DO_ERROR_INFO(do_trap_ecall_m,
119 SIGILL, ILL_ILLTRP, "environment call from M-mode"); 119 SIGILL, ILL_ILLTRP, "environment call from M-mode");
120 120
121#ifdef CONFIG_GENERIC_BUG
122static inline unsigned long get_break_insn_length(unsigned long pc)
123{
124 bug_insn_t insn;
125
126 if (probe_kernel_address((bug_insn_t *)pc, insn))
127 return 0;
128 return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
129}
130#endif /* CONFIG_GENERIC_BUG */
131
121asmlinkage void do_trap_break(struct pt_regs *regs) 132asmlinkage void do_trap_break(struct pt_regs *regs)
122{ 133{
123#ifdef CONFIG_GENERIC_BUG 134#ifdef CONFIG_GENERIC_BUG
@@ -129,8 +140,8 @@ asmlinkage void do_trap_break(struct pt_regs *regs)
129 case BUG_TRAP_TYPE_NONE: 140 case BUG_TRAP_TYPE_NONE:
130 break; 141 break;
131 case BUG_TRAP_TYPE_WARN: 142 case BUG_TRAP_TYPE_WARN:
132 regs->sepc += sizeof(bug_insn_t); 143 regs->sepc += get_break_insn_length(regs->sepc);
133 return; 144 break;
134 case BUG_TRAP_TYPE_BUG: 145 case BUG_TRAP_TYPE_BUG:
135 die(regs, "Kernel BUG"); 146 die(regs, "Kernel BUG");
136 } 147 }
@@ -145,11 +156,14 @@ int is_valid_bugaddr(unsigned long pc)
145{ 156{
146 bug_insn_t insn; 157 bug_insn_t insn;
147 158
148 if (pc < PAGE_OFFSET) 159 if (pc < VMALLOC_START)
149 return 0; 160 return 0;
150 if (probe_kernel_address((bug_insn_t *)pc, insn)) 161 if (probe_kernel_address((bug_insn_t *)pc, insn))
151 return 0; 162 return 0;
152 return (insn == __BUG_INSN); 163 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
164 return (insn == __BUG_INSN_32);
165 else
166 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
153} 167}
154#endif /* CONFIG_GENERIC_BUG */ 168#endif /* CONFIG_GENERIC_BUG */
155 169
@@ -159,9 +173,9 @@ void __init trap_init(void)
159 * Set sup0 scratch register to 0, indicating to exception vector 173 * Set sup0 scratch register to 0, indicating to exception vector
160 * that we are presently executing in the kernel 174 * that we are presently executing in the kernel
161 */ 175 */
162 csr_write(sscratch, 0); 176 csr_write(CSR_SSCRATCH, 0);
163 /* Set the exception vector address */ 177 /* Set the exception vector address */
164 csr_write(stvec, &handle_exception); 178 csr_write(CSR_STVEC, &handle_exception);
165 /* Enable all interrupts */ 179 /* Enable all interrupts */
166 csr_write(sie, -1); 180 csr_write(CSR_SIE, -1);
167} 181}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index fec62b24df89..b07b765f312a 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -36,7 +36,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
36# these symbols in the kernel code rather than hand-coded addresses. 36# these symbols in the kernel code rather than hand-coded addresses.
37 37
38SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ 38SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
39 $(call cc-ldoption, -Wl$(comma)--hash-style=both) 39 -Wl,--hash-style=both
40$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE 40$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
41 $(call if_changed,vdsold) 41 $(call if_changed,vdsold)
42 42
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index b68aac701803..8db569141485 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -9,3 +9,5 @@ obj-y += fault.o
9obj-y += extable.o 9obj-y += extable.o
10obj-y += ioremap.o 10obj-y += ioremap.o
11obj-y += cacheflush.o 11obj-y += cacheflush.o
12obj-y += context.o
13obj-y += sifive_l2_cache.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 498c0a0814fe..497b7d07af0c 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -14,6 +14,67 @@
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
16 16
17#ifdef CONFIG_SMP
18
19#include <asm/sbi.h>
20
21void flush_icache_all(void)
22{
23 sbi_remote_fence_i(NULL);
24}
25
26/*
27 * Performs an icache flush for the given MM context. RISC-V has no direct
28 * mechanism for instruction cache shoot downs, so instead we send an IPI that
29 * informs the remote harts they need to flush their local instruction caches.
30 * To avoid pathologically slow behavior in a common case (a bunch of
31 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
32 * IPIs for harts that are not currently executing a MM context and instead
33 * schedule a deferred local instruction cache flush to be performed before
34 * execution resumes on each hart.
35 */
36void flush_icache_mm(struct mm_struct *mm, bool local)
37{
38 unsigned int cpu;
39 cpumask_t others, hmask, *mask;
40
41 preempt_disable();
42
43 /* Mark every hart's icache as needing a flush for this MM. */
44 mask = &mm->context.icache_stale_mask;
45 cpumask_setall(mask);
46 /* Flush this hart's I$ now, and mark it as flushed. */
47 cpu = smp_processor_id();
48 cpumask_clear_cpu(cpu, mask);
49 local_flush_icache_all();
50
51 /*
52 * Flush the I$ of other harts concurrently executing, and mark them as
53 * flushed.
54 */
55 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
56 local |= cpumask_empty(&others);
57 if (mm != current->active_mm || !local) {
58 cpumask_clear(&hmask);
59 riscv_cpuid_to_hartid_mask(&others, &hmask);
60 sbi_remote_fence_i(hmask.bits);
61 } else {
62 /*
63 * It's assumed that at least one strongly ordered operation is
64 * performed on this hart between setting a hart's cpumask bit
65 * and scheduling this MM context on that hart. Sending an SBI
66 * remote message will do this, but in the case where no
67 * messages are sent we still need to order this hart's writes
68 * with flush_icache_deferred().
69 */
70 smp_mb();
71 }
72
73 preempt_enable();
74}
75
76#endif /* CONFIG_SMP */
77
17void flush_icache_pte(pte_t pte) 78void flush_icache_pte(pte_t pte)
18{ 79{
19 struct page *page = pte_page(pte); 80 struct page *page = pte_page(pte);
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
new file mode 100644
index 000000000000..89ceb3cbe218
--- /dev/null
+++ b/arch/riscv/mm/context.c
@@ -0,0 +1,69 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 */
6
7#include <linux/mm.h>
8#include <asm/tlbflush.h>
9#include <asm/cacheflush.h>
10
11/*
12 * When necessary, performs a deferred icache flush for the given MM context,
13 * on the local CPU. RISC-V has no direct mechanism for instruction cache
14 * shoot downs, so instead we send an IPI that informs the remote harts they
15 * need to flush their local instruction caches. To avoid pathologically slow
16 * behavior in a common case (a bunch of single-hart processes on a many-hart
17 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
18 * executing a MM context and instead schedule a deferred local instruction
19 * cache flush to be performed before execution resumes on each hart. This
20 * actually performs that local instruction cache flush, which implicitly only
21 * refers to the current hart.
22 */
23static inline void flush_icache_deferred(struct mm_struct *mm)
24{
25#ifdef CONFIG_SMP
26 unsigned int cpu = smp_processor_id();
27 cpumask_t *mask = &mm->context.icache_stale_mask;
28
29 if (cpumask_test_cpu(cpu, mask)) {
30 cpumask_clear_cpu(cpu, mask);
31 /*
32 * Ensure the remote hart's writes are visible to this hart.
33 * This pairs with a barrier in flush_icache_mm.
34 */
35 smp_mb();
36 local_flush_icache_all();
37 }
38
39#endif
40}
41
42void switch_mm(struct mm_struct *prev, struct mm_struct *next,
43 struct task_struct *task)
44{
45 unsigned int cpu;
46
47 if (unlikely(prev == next))
48 return;
49
50 /*
51 * Mark the current MM context as inactive, and the next as
52 * active. This is at least used by the icache flushing
53 * routines in order to determine who should be flushed.
54 */
55 cpu = smp_processor_id();
56
57 cpumask_clear_cpu(cpu, mm_cpumask(prev));
58 cpumask_set_cpu(cpu, mm_cpumask(next));
59
60 /*
61 * Use the old spbtr name instead of using the current satp
62 * name to support binutils 2.29 which doesn't know about the
63 * privileged ISA 1.10 yet.
64 */
65 csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
66 local_flush_tlb_all();
67
68 flush_icache_deferred(next);
69}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 88401d5125bc..cec8be9e2d6a 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -229,8 +229,9 @@ vmalloc_fault:
229 pte_t *pte_k; 229 pte_t *pte_k;
230 int index; 230 int index;
231 231
232 /* User mode accesses just cause a SIGSEGV */
232 if (user_mode(regs)) 233 if (user_mode(regs))
233 goto bad_area; 234 return do_trap(regs, SIGSEGV, code, addr, tsk);
234 235
235 /* 236 /*
236 * Synchronize this task's top level page-table 237 * Synchronize this task's top level page-table
@@ -239,13 +240,9 @@ vmalloc_fault:
239 * Do _not_ use "tsk->active_mm->pgd" here. 240 * Do _not_ use "tsk->active_mm->pgd" here.
240 * We might be inside an interrupt in the middle 241 * We might be inside an interrupt in the middle
241 * of a task switch. 242 * of a task switch.
242 *
243 * Note: Use the old spbtr name instead of using the current
244 * satp name to support binutils 2.29 which doesn't know about
245 * the privileged ISA 1.10 yet.
246 */ 243 */
247 index = pgd_index(addr); 244 index = pgd_index(addr);
248 pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index; 245 pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
249 pgd_k = init_mm.pgd + index; 246 pgd_k = init_mm.pgd + index;
250 247
251 if (!pgd_present(*pgd_k)) 248 if (!pgd_present(*pgd_k))
diff --git a/arch/riscv/mm/sifive_l2_cache.c b/arch/riscv/mm/sifive_l2_cache.c
new file mode 100644
index 000000000000..4eb64619b3f4
--- /dev/null
+++ b/arch/riscv/mm/sifive_l2_cache.c
@@ -0,0 +1,175 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SiFive L2 cache controller Driver
4 *
5 * Copyright (C) 2018-2019 SiFive, Inc.
6 *
7 */
8#include <linux/debugfs.h>
9#include <linux/interrupt.h>
10#include <linux/of_irq.h>
11#include <linux/of_address.h>
12#include <asm/sifive_l2_cache.h>
13
14#define SIFIVE_L2_DIRECCFIX_LOW 0x100
15#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
16#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
17
18#define SIFIVE_L2_DATECCFIX_LOW 0x140
19#define SIFIVE_L2_DATECCFIX_HIGH 0x144
20#define SIFIVE_L2_DATECCFIX_COUNT 0x148
21
22#define SIFIVE_L2_DATECCFAIL_LOW 0x160
23#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
24#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
25
26#define SIFIVE_L2_CONFIG 0x00
27#define SIFIVE_L2_WAYENABLE 0x08
28#define SIFIVE_L2_ECCINJECTERR 0x40
29
30#define SIFIVE_L2_MAX_ECCINTR 3
31
32static void __iomem *l2_base;
33static int g_irq[SIFIVE_L2_MAX_ECCINTR];
34
35enum {
36 DIR_CORR = 0,
37 DATA_CORR,
38 DATA_UNCORR,
39};
40
41#ifdef CONFIG_DEBUG_FS
42static struct dentry *sifive_test;
43
44static ssize_t l2_write(struct file *file, const char __user *data,
45 size_t count, loff_t *ppos)
46{
47 unsigned int val;
48
49 if (kstrtouint_from_user(data, count, 0, &val))
50 return -EINVAL;
51 if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
52 writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
53 else
54 return -EINVAL;
55 return count;
56}
57
58static const struct file_operations l2_fops = {
59 .owner = THIS_MODULE,
60 .open = simple_open,
61 .write = l2_write
62};
63
64static void setup_sifive_debug(void)
65{
66 sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
67
68 debugfs_create_file("sifive_debug_inject_error", 0200,
69 sifive_test, NULL, &l2_fops);
70}
71#endif
72
73static void l2_config_read(void)
74{
75 u32 regval, val;
76
77 regval = readl(l2_base + SIFIVE_L2_CONFIG);
78 val = regval & 0xFF;
79 pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
80 val = (regval & 0xFF00) >> 8;
81 pr_info("L2CACHE: No. of ways per bank: %d\n", val);
82 val = (regval & 0xFF0000) >> 16;
83 pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
84 val = (regval & 0xFF000000) >> 24;
85 pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
86
87 regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
88 pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
89}
90
91static const struct of_device_id sifive_l2_ids[] = {
92 { .compatible = "sifive,fu540-c000-ccache" },
93 { /* end of table */ },
94};
95
96static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
97
98int register_sifive_l2_error_notifier(struct notifier_block *nb)
99{
100 return atomic_notifier_chain_register(&l2_err_chain, nb);
101}
102EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
103
104int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
105{
106 return atomic_notifier_chain_unregister(&l2_err_chain, nb);
107}
108EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
109
110static irqreturn_t l2_int_handler(int irq, void *device)
111{
112 unsigned int regval, add_h, add_l;
113
114 if (irq == g_irq[DIR_CORR]) {
115 add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
116 add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
117 pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
118 regval = readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
119 atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
120 "DirECCFix");
121 }
122 if (irq == g_irq[DATA_CORR]) {
123 add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
124 add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
125 pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
126 regval = readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
127 atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
128 "DatECCFix");
129 }
130 if (irq == g_irq[DATA_UNCORR]) {
131 add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
132 add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
133 pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
134 regval = readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
135 atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
136 "DatECCFail");
137 }
138
139 return IRQ_HANDLED;
140}
141
142int __init sifive_l2_init(void)
143{
144 struct device_node *np;
145 struct resource res;
146 int i, rc;
147
148 np = of_find_matching_node(NULL, sifive_l2_ids);
149 if (!np)
150 return -ENODEV;
151
152 if (of_address_to_resource(np, 0, &res))
153 return -ENODEV;
154
155 l2_base = ioremap(res.start, resource_size(&res));
156 if (!l2_base)
157 return -ENOMEM;
158
159 for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
160 g_irq[i] = irq_of_parse_and_map(np, i);
161 rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
162 if (rc) {
163 pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
164 return rc;
165 }
166 }
167
168 l2_config_read();
169
170#ifdef CONFIG_DEBUG_FS
171 setup_sifive_debug();
172#endif
173 return 0;
174}
175device_initcall(sifive_l2_init);
diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
index 75155bde2b88..31f53fa77e4a 100644
--- a/drivers/tty/hvc/hvc_riscv_sbi.c
+++ b/drivers/tty/hvc/hvc_riscv_sbi.c
@@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
53static int __init hvc_sbi_console_init(void) 53static int __init hvc_sbi_console_init(void)
54{ 54{
55 hvc_instantiate(0, 0, &hvc_sbi_ops); 55 hvc_instantiate(0, 0, &hvc_sbi_ops);
56 add_preferred_console("hvc", 0, NULL);
57 56
58 return 0; 57 return 0;
59} 58}