aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPalmer Dabbelt <palmer@dabbelt.com>2017-07-10 21:03:19 -0400
committerPalmer Dabbelt <palmer@dabbelt.com>2017-09-26 18:26:45 -0400
commit5d8544e2d0075a5f3c9a2cf27152354d54360da1 (patch)
tree56169c3eaeff96fe1d8547c3bac7cfa658af9d60
parentfab957c11efe2f405e08b9f0d080524bc2631428 (diff)
RISC-V: Generic library routines and assembly
This patch contains code that is more specific to the RISC-V ISA than it is to Linux. It contains string and math operations, C wrappers for various assembly instructions, stack walking code, and uaccess. Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
-rw-r--r--arch/riscv/include/asm/asm.h76
-rw-r--r--arch/riscv/include/asm/csr.h132
-rw-r--r--arch/riscv/include/asm/linkage.h20
-rw-r--r--arch/riscv/include/asm/string.h26
-rw-r--r--arch/riscv/include/asm/uaccess.h513
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h55
-rw-r--r--arch/riscv/kernel/stacktrace.c177
-rw-r--r--arch/riscv/lib/memcpy.S115
-rw-r--r--arch/riscv/lib/memset.S120
-rw-r--r--arch/riscv/lib/uaccess.S117
-rw-r--r--arch/riscv/lib/udivdi3.S38
11 files changed, 1389 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
new file mode 100644
index 000000000000..6cbbb6a68d76
--- /dev/null
+++ b/arch/riscv/include/asm/asm.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_ASM_H
15#define _ASM_RISCV_ASM_H
16
17#ifdef __ASSEMBLY__
18#define __ASM_STR(x) x
19#else
20#define __ASM_STR(x) #x
21#endif
22
23#if __riscv_xlen == 64
24#define __REG_SEL(a, b) __ASM_STR(a)
25#elif __riscv_xlen == 32
26#define __REG_SEL(a, b) __ASM_STR(b)
27#else
28#error "Unexpected __riscv_xlen"
29#endif
30
31#define REG_L __REG_SEL(ld, lw)
32#define REG_S __REG_SEL(sd, sw)
33#define SZREG __REG_SEL(8, 4)
34#define LGREG __REG_SEL(3, 2)
35
36#if __SIZEOF_POINTER__ == 8
37#ifdef __ASSEMBLY__
38#define RISCV_PTR .dword
39#define RISCV_SZPTR 8
40#define RISCV_LGPTR 3
41#else
42#define RISCV_PTR ".dword"
43#define RISCV_SZPTR "8"
44#define RISCV_LGPTR "3"
45#endif
46#elif __SIZEOF_POINTER__ == 4
47#ifdef __ASSEMBLY__
48#define RISCV_PTR .word
49#define RISCV_SZPTR 4
50#define RISCV_LGPTR 2
51#else
52#define RISCV_PTR ".word"
53#define RISCV_SZPTR "4"
54#define RISCV_LGPTR "2"
55#endif
56#else
57#error "Unexpected __SIZEOF_POINTER__"
58#endif
59
60#if (__SIZEOF_INT__ == 4)
61#define INT __ASM_STR(.word)
62#define SZINT __ASM_STR(4)
63#define LGINT __ASM_STR(2)
64#else
65#error "Unexpected __SIZEOF_INT__"
66#endif
67
68#if (__SIZEOF_SHORT__ == 2)
69#define SHORT __ASM_STR(.half)
70#define SZSHORT __ASM_STR(2)
71#define LGSHORT __ASM_STR(1)
72#else
73#error "Unexpected __SIZEOF_SHORT__"
74#endif
75
76#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
new file mode 100644
index 000000000000..0d64bc9f4f91
--- /dev/null
+++ b/arch/riscv/include/asm/csr.h
@@ -0,0 +1,132 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_CSR_H
15#define _ASM_RISCV_CSR_H
16
17#include <linux/const.h>
18
19/* Status register flags */
20#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */
21#define SR_PIE _AC(0x00000020, UL) /* Previous IE */
22#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */
23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
24
25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
26#define SR_FS_OFF _AC(0x00000000, UL)
27#define SR_FS_INITIAL _AC(0x00002000, UL)
28#define SR_FS_CLEAN _AC(0x00004000, UL)
29#define SR_FS_DIRTY _AC(0x00006000, UL)
30
31#define SR_XS _AC(0x00018000, UL) /* Extension Status */
32#define SR_XS_OFF _AC(0x00000000, UL)
33#define SR_XS_INITIAL _AC(0x00008000, UL)
34#define SR_XS_CLEAN _AC(0x00010000, UL)
35#define SR_XS_DIRTY _AC(0x00018000, UL)
36
37#ifndef CONFIG_64BIT
38#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
39#else
40#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
41#endif
42
43/* SPTBR flags */
44#if __riscv_xlen == 32
45#define SPTBR_PPN _AC(0x003FFFFF, UL)
46#define SPTBR_MODE_32 _AC(0x80000000, UL)
47#define SPTBR_MODE SPTBR_MODE_32
48#else
49#define SPTBR_PPN _AC(0x00000FFFFFFFFFFF, UL)
50#define SPTBR_MODE_39 _AC(0x8000000000000000, UL)
51#define SPTBR_MODE SPTBR_MODE_39
52#endif
53
54/* Interrupt Enable and Interrupt Pending flags */
55#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */
56#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */
57
58#define EXC_INST_MISALIGNED 0
59#define EXC_INST_ACCESS 1
60#define EXC_BREAKPOINT 3
61#define EXC_LOAD_ACCESS 5
62#define EXC_STORE_ACCESS 7
63#define EXC_SYSCALL 8
64#define EXC_INST_PAGE_FAULT 12
65#define EXC_LOAD_PAGE_FAULT 13
66#define EXC_STORE_PAGE_FAULT 15
67
68#ifndef __ASSEMBLY__
69
70#define csr_swap(csr, val) \
71({ \
72 unsigned long __v = (unsigned long)(val); \
73 __asm__ __volatile__ ("csrrw %0, " #csr ", %1" \
74 : "=r" (__v) : "rK" (__v) \
75 : "memory"); \
76 __v; \
77})
78
79#define csr_read(csr) \
80({ \
81 register unsigned long __v; \
82 __asm__ __volatile__ ("csrr %0, " #csr \
83 : "=r" (__v) : \
84 : "memory"); \
85 __v; \
86})
87
88#define csr_write(csr, val) \
89({ \
90 unsigned long __v = (unsigned long)(val); \
91 __asm__ __volatile__ ("csrw " #csr ", %0" \
92 : : "rK" (__v) \
93 : "memory"); \
94})
95
96#define csr_read_set(csr, val) \
97({ \
98 unsigned long __v = (unsigned long)(val); \
99 __asm__ __volatile__ ("csrrs %0, " #csr ", %1" \
100 : "=r" (__v) : "rK" (__v) \
101 : "memory"); \
102 __v; \
103})
104
105#define csr_set(csr, val) \
106({ \
107 unsigned long __v = (unsigned long)(val); \
108 __asm__ __volatile__ ("csrs " #csr ", %0" \
109 : : "rK" (__v) \
110 : "memory"); \
111})
112
113#define csr_read_clear(csr, val) \
114({ \
115 unsigned long __v = (unsigned long)(val); \
116 __asm__ __volatile__ ("csrrc %0, " #csr ", %1" \
117 : "=r" (__v) : "rK" (__v) \
118 : "memory"); \
119 __v; \
120})
121
122#define csr_clear(csr, val) \
123({ \
124 unsigned long __v = (unsigned long)(val); \
125 __asm__ __volatile__ ("csrc " #csr ", %0" \
126 : : "rK" (__v) \
127 : "memory"); \
128})
129
130#endif /* __ASSEMBLY__ */
131
132#endif /* _ASM_RISCV_CSR_H */
diff --git a/arch/riscv/include/asm/linkage.h b/arch/riscv/include/asm/linkage.h
new file mode 100644
index 000000000000..b7b304ca89c4
--- /dev/null
+++ b/arch/riscv/include/asm/linkage.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2015 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_LINKAGE_H
15#define _ASM_RISCV_LINKAGE_H
16
17#define __ALIGN .balign 4
18#define __ALIGN_STR ".balign 4"
19
20#endif /* _ASM_RISCV_LINKAGE_H */
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
new file mode 100644
index 000000000000..9210fcf4ff52
--- /dev/null
+++ b/arch/riscv/include/asm/string.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2013 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_STRING_H
15#define _ASM_RISCV_STRING_H
16
17#include <linux/types.h>
18#include <linux/linkage.h>
19
20#define __HAVE_ARCH_MEMSET
21extern asmlinkage void *memset(void *, int, size_t);
22
23#define __HAVE_ARCH_MEMCPY
24extern asmlinkage void *memcpy(void *, const void *, size_t);
25
26#endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
new file mode 100644
index 000000000000..27b90d64814b
--- /dev/null
+++ b/arch/riscv/include/asm/uaccess.h
@@ -0,0 +1,513 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * This file was copied from include/asm-generic/uaccess.h
14 */
15
16#ifndef _ASM_RISCV_UACCESS_H
17#define _ASM_RISCV_UACCESS_H
18
19/*
20 * User space memory access functions
21 */
22#include <linux/errno.h>
23#include <linux/compiler.h>
24#include <linux/thread_info.h>
25#include <asm/byteorder.h>
26#include <asm/asm.h>
27
28#define __enable_user_access() \
29 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
30#define __disable_user_access() \
31 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
32
33/*
34 * The fs value determines whether argument validity checking should be
35 * performed or not. If get_fs() == USER_DS, checking is performed, with
36 * get_fs() == KERNEL_DS, checking is bypassed.
37 *
38 * For historical reasons, these macros are grossly misnamed.
39 */
40
41#define KERNEL_DS (~0UL)
42#define USER_DS (TASK_SIZE)
43
44#define get_ds() (KERNEL_DS)
45#define get_fs() (current_thread_info()->addr_limit)
46
47static inline void set_fs(mm_segment_t fs)
48{
49 current_thread_info()->addr_limit = fs;
50}
51
52#define segment_eq(a, b) ((a) == (b))
53
54#define user_addr_max() (get_fs())
55
56
57#define VERIFY_READ 0
58#define VERIFY_WRITE 1
59
60/**
61 * access_ok: - Checks if a user space pointer is valid
62 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
63 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
64 * to write to a block, it is always safe to read from it.
65 * @addr: User space pointer to start of block to check
66 * @size: Size of block to check
67 *
68 * Context: User context only. This function may sleep.
69 *
70 * Checks if a pointer to a block of memory in user space is valid.
71 *
72 * Returns true (nonzero) if the memory block may be valid, false (zero)
73 * if it is definitely invalid.
74 *
75 * Note that, depending on architecture, this function probably just
76 * checks that the pointer is in the user space range - after calling
77 * this function, memory access functions may still return -EFAULT.
78 */
79#define access_ok(type, addr, size) ({ \
80 __chk_user_ptr(addr); \
81 likely(__access_ok((unsigned long __force)(addr), (size))); \
82})
83
84/*
85 * Ensure that the range [addr, addr+size) is within the process's
86 * address space
87 */
88static inline int __access_ok(unsigned long addr, unsigned long size)
89{
90 const mm_segment_t fs = get_fs();
91
92 return (size <= fs) && (addr <= (fs - size));
93}
94
95/*
96 * The exception table consists of pairs of addresses: the first is the
97 * address of an instruction that is allowed to fault, and the second is
98 * the address at which the program should continue. No registers are
99 * modified, so it is entirely up to the continuation code to figure out
100 * what to do.
101 *
102 * All the routines below use bits of fixup code that are out of line
103 * with the main instruction path. This means when everything is well,
104 * we don't even have to jump over them. Further, they do not intrude
105 * on our cache or tlb entries.
106 */
107
108struct exception_table_entry {
109 unsigned long insn, fixup;
110};
111
112extern int fixup_exception(struct pt_regs *state);
113
114#if defined(__LITTLE_ENDIAN)
115#define __MSW 1
116#define __LSW 0
117#elif defined(__BIG_ENDIAN)
118#define __MSW 0
119#define __LSW 1
120#else
121#error "Unknown endianness"
122#endif
123
124/*
125 * The "__xxx" versions of the user access functions do not verify the address
126 * space - it must have been done previously with a separate "access_ok()"
127 * call.
128 */
129
130#ifdef CONFIG_MMU
131#define __get_user_asm(insn, x, ptr, err) \
132do { \
133 uintptr_t __tmp; \
134 __typeof__(x) __x; \
135 __enable_user_access(); \
136 __asm__ __volatile__ ( \
137 "1:\n" \
138 " " insn " %1, %3\n" \
139 "2:\n" \
140 " .section .fixup,\"ax\"\n" \
141 " .balign 4\n" \
142 "3:\n" \
143 " li %0, %4\n" \
144 " li %1, 0\n" \
145 " jump 2b, %2\n" \
146 " .previous\n" \
147 " .section __ex_table,\"a\"\n" \
148 " .balign " RISCV_SZPTR "\n" \
149 " " RISCV_PTR " 1b, 3b\n" \
150 " .previous" \
151 : "+r" (err), "=&r" (__x), "=r" (__tmp) \
152 : "m" (*(ptr)), "i" (-EFAULT)); \
153 __disable_user_access(); \
154 (x) = __x; \
155} while (0)
156#endif /* CONFIG_MMU */
157
158#ifdef CONFIG_64BIT
159#define __get_user_8(x, ptr, err) \
160 __get_user_asm("ld", x, ptr, err)
161#else /* !CONFIG_64BIT */
162#ifdef CONFIG_MMU
163#define __get_user_8(x, ptr, err) \
164do { \
165 u32 __user *__ptr = (u32 __user *)(ptr); \
166 u32 __lo, __hi; \
167 uintptr_t __tmp; \
168 __enable_user_access(); \
169 __asm__ __volatile__ ( \
170 "1:\n" \
171 " lw %1, %4\n" \
172 "2:\n" \
173 " lw %2, %5\n" \
174 "3:\n" \
175 " .section .fixup,\"ax\"\n" \
176 " .balign 4\n" \
177 "4:\n" \
178 " li %0, %6\n" \
179 " li %1, 0\n" \
180 " li %2, 0\n" \
181 " jump 3b, %3\n" \
182 " .previous\n" \
183 " .section __ex_table,\"a\"\n" \
184 " .balign " RISCV_SZPTR "\n" \
185 " " RISCV_PTR " 1b, 4b\n" \
186 " " RISCV_PTR " 2b, 4b\n" \
187 " .previous" \
188 : "+r" (err), "=&r" (__lo), "=r" (__hi), \
189 "=r" (__tmp) \
190 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
191 "i" (-EFAULT)); \
192 __disable_user_access(); \
193 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
194 (((u64)__hi << 32) | __lo))); \
195} while (0)
196#endif /* CONFIG_MMU */
197#endif /* CONFIG_64BIT */
198
199
200/**
201 * __get_user: - Get a simple variable from user space, with less checking.
202 * @x: Variable to store result.
203 * @ptr: Source address, in user space.
204 *
205 * Context: User context only. This function may sleep.
206 *
207 * This macro copies a single simple variable from user space to kernel
208 * space. It supports simple types like char and int, but not larger
209 * data types like structures or arrays.
210 *
211 * @ptr must have pointer-to-simple-variable type, and the result of
212 * dereferencing @ptr must be assignable to @x without a cast.
213 *
214 * Caller must check the pointer with access_ok() before calling this
215 * function.
216 *
217 * Returns zero on success, or -EFAULT on error.
218 * On error, the variable @x is set to zero.
219 */
220#define __get_user(x, ptr) \
221({ \
222 register long __gu_err = 0; \
223 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
224 __chk_user_ptr(__gu_ptr); \
225 switch (sizeof(*__gu_ptr)) { \
226 case 1: \
227 __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
228 break; \
229 case 2: \
230 __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
231 break; \
232 case 4: \
233 __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
234 break; \
235 case 8: \
236 __get_user_8((x), __gu_ptr, __gu_err); \
237 break; \
238 default: \
239 BUILD_BUG(); \
240 } \
241 __gu_err; \
242})
243
244/**
245 * get_user: - Get a simple variable from user space.
246 * @x: Variable to store result.
247 * @ptr: Source address, in user space.
248 *
249 * Context: User context only. This function may sleep.
250 *
251 * This macro copies a single simple variable from user space to kernel
252 * space. It supports simple types like char and int, but not larger
253 * data types like structures or arrays.
254 *
255 * @ptr must have pointer-to-simple-variable type, and the result of
256 * dereferencing @ptr must be assignable to @x without a cast.
257 *
258 * Returns zero on success, or -EFAULT on error.
259 * On error, the variable @x is set to zero.
260 */
261#define get_user(x, ptr) \
262({ \
263 const __typeof__(*(ptr)) __user *__p = (ptr); \
264 might_fault(); \
265 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
266 __get_user((x), __p) : \
267 ((x) = 0, -EFAULT); \
268})
269
270
271#ifdef CONFIG_MMU
272#define __put_user_asm(insn, x, ptr, err) \
273do { \
274 uintptr_t __tmp; \
275 __typeof__(*(ptr)) __x = x; \
276 __enable_user_access(); \
277 __asm__ __volatile__ ( \
278 "1:\n" \
279 " " insn " %z3, %2\n" \
280 "2:\n" \
281 " .section .fixup,\"ax\"\n" \
282 " .balign 4\n" \
283 "3:\n" \
284 " li %0, %4\n" \
285 " jump 2b, %1\n" \
286 " .previous\n" \
287 " .section __ex_table,\"a\"\n" \
288 " .balign " RISCV_SZPTR "\n" \
289 " " RISCV_PTR " 1b, 3b\n" \
290 " .previous" \
291 : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
292 : "rJ" (__x), "i" (-EFAULT)); \
293 __disable_user_access(); \
294} while (0)
295#endif /* CONFIG_MMU */
296
297
298#ifdef CONFIG_64BIT
299#define __put_user_8(x, ptr, err) \
300 __put_user_asm("sd", x, ptr, err)
301#else /* !CONFIG_64BIT */
302#ifdef CONFIG_MMU
303#define __put_user_8(x, ptr, err) \
304do { \
305 u32 __user *__ptr = (u32 __user *)(ptr); \
306 u64 __x = (__typeof__((x)-(x)))(x); \
307 uintptr_t __tmp; \
308 __enable_user_access(); \
309 __asm__ __volatile__ ( \
310 "1:\n" \
311 " sw %z4, %2\n" \
312 "2:\n" \
313 " sw %z5, %3\n" \
314 "3:\n" \
315 " .section .fixup,\"ax\"\n" \
316 " .balign 4\n" \
317 "4:\n" \
318 " li %0, %6\n" \
319 " jump 2b, %1\n" \
320 " .previous\n" \
321 " .section __ex_table,\"a\"\n" \
322 " .balign " RISCV_SZPTR "\n" \
323 " " RISCV_PTR " 1b, 4b\n" \
324 " " RISCV_PTR " 2b, 4b\n" \
325 " .previous" \
326 : "+r" (err), "=r" (__tmp), \
327 "=m" (__ptr[__LSW]), \
328 "=m" (__ptr[__MSW]) \
329 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
330 __disable_user_access(); \
331} while (0)
332#endif /* CONFIG_MMU */
333#endif /* CONFIG_64BIT */
334
335
336/**
337 * __put_user: - Write a simple value into user space, with less checking.
338 * @x: Value to copy to user space.
339 * @ptr: Destination address, in user space.
340 *
341 * Context: User context only. This function may sleep.
342 *
343 * This macro copies a single simple value from kernel space to user
344 * space. It supports simple types like char and int, but not larger
345 * data types like structures or arrays.
346 *
347 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
348 * to the result of dereferencing @ptr.
349 *
350 * Caller must check the pointer with access_ok() before calling this
351 * function.
352 *
353 * Returns zero on success, or -EFAULT on error.
354 */
355#define __put_user(x, ptr) \
356({ \
357 register long __pu_err = 0; \
358 __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
359 __chk_user_ptr(__gu_ptr); \
360 switch (sizeof(*__gu_ptr)) { \
361 case 1: \
362 __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
363 break; \
364 case 2: \
365 __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
366 break; \
367 case 4: \
368 __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
369 break; \
370 case 8: \
371 __put_user_8((x), __gu_ptr, __pu_err); \
372 break; \
373 default: \
374 BUILD_BUG(); \
375 } \
376 __pu_err; \
377})
378
379/**
380 * put_user: - Write a simple value into user space.
381 * @x: Value to copy to user space.
382 * @ptr: Destination address, in user space.
383 *
384 * Context: User context only. This function may sleep.
385 *
386 * This macro copies a single simple value from kernel space to user
387 * space. It supports simple types like char and int, but not larger
388 * data types like structures or arrays.
389 *
390 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
391 * to the result of dereferencing @ptr.
392 *
393 * Returns zero on success, or -EFAULT on error.
394 */
395#define put_user(x, ptr) \
396({ \
397 __typeof__(*(ptr)) __user *__p = (ptr); \
398 might_fault(); \
399 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
400 __put_user((x), __p) : \
401 -EFAULT; \
402})
403
404
405extern unsigned long __must_check __copy_user(void __user *to,
406 const void __user *from, unsigned long n);
407
408static inline unsigned long
409raw_copy_from_user(void *to, const void __user *from, unsigned long n)
410{
411 return __copy_user(to, from, n);
412}
413
414static inline unsigned long
415raw_copy_to_user(void __user *to, const void *from, unsigned long n)
416{
417 return __copy_user(to, from, n);
418}
419
420extern long strncpy_from_user(char *dest, const char __user *src, long count);
421
422extern long __must_check strlen_user(const char __user *str);
423extern long __must_check strnlen_user(const char __user *str, long n);
424
425extern
426unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
427
428static inline
429unsigned long __must_check clear_user(void __user *to, unsigned long n)
430{
431 might_fault();
432 return access_ok(VERIFY_WRITE, to, n) ?
433 __clear_user(to, n) : n;
434}
435
436/*
437 * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
438 * will set "err" to -EFAULT, while successful accesses return the previous
439 * value.
440 */
441#ifdef CONFIG_MMU
442#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
443({ \
444 __typeof__(ptr) __ptr = (ptr); \
445 __typeof__(*(ptr)) __old = (old); \
446 __typeof__(*(ptr)) __new = (new); \
447 __typeof__(*(ptr)) __ret; \
448 __typeof__(err) __err = 0; \
449 register unsigned int __rc; \
450 __enable_user_access(); \
451 switch (size) { \
452 case 4: \
453 __asm__ __volatile__ ( \
454 "0:\n" \
455 " lr.w" #scb " %[ret], %[ptr]\n" \
456 " bne %[ret], %z[old], 1f\n" \
457 " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
458 " bnez %[rc], 0b\n" \
459 "1:\n" \
460 ".section .fixup,\"ax\"\n" \
461 ".balign 4\n" \
462 "2:\n" \
463 " li %[err], %[efault]\n" \
464 " jump 1b, %[rc]\n" \
465 ".previous\n" \
466 ".section __ex_table,\"a\"\n" \
467 ".balign " RISCV_SZPTR "\n" \
468 " " RISCV_PTR " 1b, 2b\n" \
469 ".previous\n" \
470 : [ret] "=&r" (__ret), \
471 [rc] "=&r" (__rc), \
472 [ptr] "+A" (*__ptr), \
473 [err] "=&r" (__err) \
474 : [old] "rJ" (__old), \
475 [new] "rJ" (__new), \
476 [efault] "i" (-EFAULT)); \
477 break; \
478 case 8: \
479 __asm__ __volatile__ ( \
480 "0:\n" \
481 " lr.d" #scb " %[ret], %[ptr]\n" \
482 " bne %[ret], %z[old], 1f\n" \
483 " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
484 " bnez %[rc], 0b\n" \
485 "1:\n" \
486 ".section .fixup,\"ax\"\n" \
487 ".balign 4\n" \
488 "2:\n" \
489 " li %[err], %[efault]\n" \
490 " jump 1b, %[rc]\n" \
491 ".previous\n" \
492 ".section __ex_table,\"a\"\n" \
493 ".balign " RISCV_SZPTR "\n" \
494 " " RISCV_PTR " 1b, 2b\n" \
495 ".previous\n" \
496 : [ret] "=&r" (__ret), \
497 [rc] "=&r" (__rc), \
498 [ptr] "+A" (*__ptr), \
499 [err] "=&r" (__err) \
500 : [old] "rJ" (__old), \
501 [new] "rJ" (__new), \
502 [efault] "i" (-EFAULT)); \
503 break; \
504 default: \
505 BUILD_BUG(); \
506 } \
507 __disable_user_access(); \
508 (err) = __err; \
509 __ret; \
510})
511#endif /* CONFIG_MMU */
512
513#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..aa6238791d3e
--- /dev/null
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 * Derived from arch/x86/include/asm/word-at-a-time.h
13 */
14
15#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
16#define _ASM_RISCV_WORD_AT_A_TIME_H
17
18
19#include <linux/kernel.h>
20
21struct word_at_a_time {
22 const unsigned long one_bits, high_bits;
23};
24
25#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
26
27static inline unsigned long has_zero(unsigned long val,
28 unsigned long *bits, const struct word_at_a_time *c)
29{
30 unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
31 *bits = mask;
32 return mask;
33}
34
35static inline unsigned long prep_zero_mask(unsigned long val,
36 unsigned long bits, const struct word_at_a_time *c)
37{
38 return bits;
39}
40
41static inline unsigned long create_zero_mask(unsigned long bits)
42{
43 bits = (bits - 1) & ~bits;
44 return bits >> 7;
45}
46
47static inline unsigned long find_zero(unsigned long mask)
48{
49 return fls64(mask) >> 3;
50}
51
52/* The mask we created is directly usable as a bytemask */
53#define zero_bytemask(mask) (mask)
54
55#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
new file mode 100644
index 000000000000..559aae781154
--- /dev/null
+++ b/arch/riscv/kernel/stacktrace.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright (C) 2008 ARM Limited
3 * Copyright (C) 2014 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/export.h>
16#include <linux/kallsyms.h>
17#include <linux/sched.h>
18#include <linux/sched/debug.h>
19#include <linux/sched/task_stack.h>
20#include <linux/stacktrace.h>
21
22#ifdef CONFIG_FRAME_POINTER
23
24struct stackframe {
25 unsigned long fp;
26 unsigned long ra;
27};
28
29static void notrace walk_stackframe(struct task_struct *task,
30 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
31{
32 unsigned long fp, sp, pc;
33
34 if (regs) {
35 fp = GET_FP(regs);
36 sp = GET_USP(regs);
37 pc = GET_IP(regs);
38 } else if (task == NULL || task == current) {
39 const register unsigned long current_sp __asm__ ("sp");
40 fp = (unsigned long)__builtin_frame_address(0);
41 sp = current_sp;
42 pc = (unsigned long)walk_stackframe;
43 } else {
44 /* task blocked in __switch_to */
45 fp = task->thread.s[0];
46 sp = task->thread.sp;
47 pc = task->thread.ra;
48 }
49
50 for (;;) {
51 unsigned long low, high;
52 struct stackframe *frame;
53
54 if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
55 break;
56
57 /* Validate frame pointer */
58 low = sp + sizeof(struct stackframe);
59 high = ALIGN(sp, THREAD_SIZE);
60 if (unlikely(fp < low || fp > high || fp & 0x7))
61 break;
62 /* Unwind stack frame */
63 frame = (struct stackframe *)fp - 1;
64 sp = fp;
65 fp = frame->fp;
66 pc = frame->ra - 0x4;
67 }
68}
69
70#else /* !CONFIG_FRAME_POINTER */
71
72static void notrace walk_stackframe(struct task_struct *task,
73 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
74{
75 unsigned long sp, pc;
76 unsigned long *ksp;
77
78 if (regs) {
79 sp = GET_USP(regs);
80 pc = GET_IP(regs);
81 } else if (task == NULL || task == current) {
82 const register unsigned long current_sp __asm__ ("sp");
83 sp = current_sp;
84 pc = (unsigned long)walk_stackframe;
85 } else {
86 /* task blocked in __switch_to */
87 sp = task->thread.sp;
88 pc = task->thread.ra;
89 }
90
91 if (unlikely(sp & 0x7))
92 return;
93
94 ksp = (unsigned long *)sp;
95 while (!kstack_end(ksp)) {
96 if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
97 break;
98 pc = (*ksp++) - 0x4;
99 }
100}
101
102#endif /* CONFIG_FRAME_POINTER */
103
104
105static bool print_trace_address(unsigned long pc, void *arg)
106{
107 print_ip_sym(pc);
108 return false;
109}
110
111void show_stack(struct task_struct *task, unsigned long *sp)
112{
113 pr_cont("Call Trace:\n");
114 walk_stackframe(task, NULL, print_trace_address, NULL);
115}
116
117
118static bool save_wchan(unsigned long pc, void *arg)
119{
120 if (!in_sched_functions(pc)) {
121 unsigned long *p = arg;
122 *p = pc;
123 return true;
124 }
125 return false;
126}
127
128unsigned long get_wchan(struct task_struct *task)
129{
130 unsigned long pc = 0;
131
132 if (likely(task && task != current && task->state != TASK_RUNNING))
133 walk_stackframe(task, NULL, save_wchan, &pc);
134 return pc;
135}
136
137
138#ifdef CONFIG_STACKTRACE
139
140static bool __save_trace(unsigned long pc, void *arg, bool nosched)
141{
142 struct stack_trace *trace = arg;
143
144 if (unlikely(nosched && in_sched_functions(pc)))
145 return false;
146 if (unlikely(trace->skip > 0)) {
147 trace->skip--;
148 return false;
149 }
150
151 trace->entries[trace->nr_entries++] = pc;
152 return (trace->nr_entries >= trace->max_entries);
153}
154
155static bool save_trace(unsigned long pc, void *arg)
156{
157 return __save_trace(pc, arg, false);
158}
159
160/*
161 * Save stack-backtrace addresses into a stack_trace buffer.
162 */
163void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
164{
165 walk_stackframe(tsk, NULL, save_trace, trace);
166 if (trace->nr_entries < trace->max_entries)
167 trace->entries[trace->nr_entries++] = ULONG_MAX;
168}
169EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
170
171void save_stack_trace(struct stack_trace *trace)
172{
173 save_stack_trace_tsk(NULL, trace);
174}
175EXPORT_SYMBOL_GPL(save_stack_trace);
176
177#endif /* CONFIG_STACKTRACE */
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
new file mode 100644
index 000000000000..80f9c1a5c598
--- /dev/null
+++ b/arch/riscv/lib/memcpy.S
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2013 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/linkage.h>
15#include <asm/asm.h>
16
17/* void *memcpy(void *, const void *, size_t) */
18ENTRY(memcpy)
19 move t6, a0 /* Preserve return value */
20
21 /* Defer to byte-oriented copy for small sizes */
22 sltiu a3, a2, 128
23 bnez a3, 4f
24 /* Use word-oriented copy only if low-order bits match */
25 andi a3, t6, SZREG-1
26 andi a4, a1, SZREG-1
27 bne a3, a4, 4f
28
29 beqz a3, 2f /* Skip if already aligned */
30 /*
31 * Round to nearest double word-aligned address
32 * greater than or equal to start address
33 */
34 andi a3, a1, ~(SZREG-1)
35 addi a3, a3, SZREG
36 /* Handle initial misalignment */
37 sub a4, a3, a1
381:
39 lb a5, 0(a1)
40 addi a1, a1, 1
41 sb a5, 0(t6)
42 addi t6, t6, 1
43 bltu a1, a3, 1b
44 sub a2, a2, a4 /* Update count */
45
462:
47 andi a4, a2, ~((16*SZREG)-1)
48 beqz a4, 4f
49 add a3, a1, a4
503:
51 REG_L a4, 0(a1)
52 REG_L a5, SZREG(a1)
53 REG_L a6, 2*SZREG(a1)
54 REG_L a7, 3*SZREG(a1)
55 REG_L t0, 4*SZREG(a1)
56 REG_L t1, 5*SZREG(a1)
57 REG_L t2, 6*SZREG(a1)
58 REG_L t3, 7*SZREG(a1)
59 REG_L t4, 8*SZREG(a1)
60 REG_L t5, 9*SZREG(a1)
61 REG_S a4, 0(t6)
62 REG_S a5, SZREG(t6)
63 REG_S a6, 2*SZREG(t6)
64 REG_S a7, 3*SZREG(t6)
65 REG_S t0, 4*SZREG(t6)
66 REG_S t1, 5*SZREG(t6)
67 REG_S t2, 6*SZREG(t6)
68 REG_S t3, 7*SZREG(t6)
69 REG_S t4, 8*SZREG(t6)
70 REG_S t5, 9*SZREG(t6)
71 REG_L a4, 10*SZREG(a1)
72 REG_L a5, 11*SZREG(a1)
73 REG_L a6, 12*SZREG(a1)
74 REG_L a7, 13*SZREG(a1)
75 REG_L t0, 14*SZREG(a1)
76 REG_L t1, 15*SZREG(a1)
77 addi a1, a1, 16*SZREG
78 REG_S a4, 10*SZREG(t6)
79 REG_S a5, 11*SZREG(t6)
80 REG_S a6, 12*SZREG(t6)
81 REG_S a7, 13*SZREG(t6)
82 REG_S t0, 14*SZREG(t6)
83 REG_S t1, 15*SZREG(t6)
84 addi t6, t6, 16*SZREG
85 bltu a1, a3, 3b
86 andi a2, a2, (16*SZREG)-1 /* Update count */
87
884:
89 /* Handle trailing misalignment */
90 beqz a2, 6f
91 add a3, a1, a2
92
93 /* Use word-oriented copy if co-aligned to word boundary */
94 or a5, a1, t6
95 or a5, a5, a3
96 andi a5, a5, 3
97 bnez a5, 5f
987:
99 lw a4, 0(a1)
100 addi a1, a1, 4
101 sw a4, 0(t6)
102 addi t6, t6, 4
103 bltu a1, a3, 7b
104
105 ret
106
1075:
108 lb a4, 0(a1)
109 addi a1, a1, 1
110 sb a4, 0(t6)
111 addi t6, t6, 1
112 bltu a1, a3, 5b
1136:
114 ret
115END(memcpy)
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
new file mode 100644
index 000000000000..a790107cf4c9
--- /dev/null
+++ b/arch/riscv/lib/memset.S
@@ -0,0 +1,120 @@
1/*
2 * Copyright (C) 2013 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14
15#include <linux/linkage.h>
16#include <asm/asm.h>
17
18/* void *memset(void *, int, size_t) */
19ENTRY(memset)
20 move t0, a0 /* Preserve return value */
21
22 /* Defer to byte-oriented fill for small sizes */
23 sltiu a3, a2, 16
24 bnez a3, 4f
25
26 /*
27 * Round to nearest XLEN-aligned address
28 * greater than or equal to start address
29 */
30 addi a3, t0, SZREG-1
31 andi a3, a3, ~(SZREG-1)
32 beq a3, t0, 2f /* Skip if already aligned */
33 /* Handle initial misalignment */
34 sub a4, a3, t0
351:
36 sb a1, 0(t0)
37 addi t0, t0, 1
38 bltu t0, a3, 1b
39 sub a2, a2, a4 /* Update count */
40
412: /* Duff's device with 32 XLEN stores per iteration */
42 /* Broadcast value into all bytes */
43 andi a1, a1, 0xff
44 slli a3, a1, 8
45 or a1, a3, a1
46 slli a3, a1, 16
47 or a1, a3, a1
48#ifdef CONFIG_64BIT
49 slli a3, a1, 32
50 or a1, a3, a1
51#endif
52
53 /* Calculate end address */
54 andi a4, a2, ~(SZREG-1)
55 add a3, t0, a4
56
57 andi a4, a4, 31*SZREG /* Calculate remainder */
58 beqz a4, 3f /* Shortcut if no remainder */
59 neg a4, a4
60 addi a4, a4, 32*SZREG /* Calculate initial offset */
61
62 /* Adjust start address with offset */
63 sub t0, t0, a4
64
65 /* Jump into loop body */
66 /* Assumes 32-bit instruction lengths */
67 la a5, 3f
68#ifdef CONFIG_64BIT
69 srli a4, a4, 1
70#endif
71 add a5, a5, a4
72 jr a5
733:
74 REG_S a1, 0(t0)
75 REG_S a1, SZREG(t0)
76 REG_S a1, 2*SZREG(t0)
77 REG_S a1, 3*SZREG(t0)
78 REG_S a1, 4*SZREG(t0)
79 REG_S a1, 5*SZREG(t0)
80 REG_S a1, 6*SZREG(t0)
81 REG_S a1, 7*SZREG(t0)
82 REG_S a1, 8*SZREG(t0)
83 REG_S a1, 9*SZREG(t0)
84 REG_S a1, 10*SZREG(t0)
85 REG_S a1, 11*SZREG(t0)
86 REG_S a1, 12*SZREG(t0)
87 REG_S a1, 13*SZREG(t0)
88 REG_S a1, 14*SZREG(t0)
89 REG_S a1, 15*SZREG(t0)
90 REG_S a1, 16*SZREG(t0)
91 REG_S a1, 17*SZREG(t0)
92 REG_S a1, 18*SZREG(t0)
93 REG_S a1, 19*SZREG(t0)
94 REG_S a1, 20*SZREG(t0)
95 REG_S a1, 21*SZREG(t0)
96 REG_S a1, 22*SZREG(t0)
97 REG_S a1, 23*SZREG(t0)
98 REG_S a1, 24*SZREG(t0)
99 REG_S a1, 25*SZREG(t0)
100 REG_S a1, 26*SZREG(t0)
101 REG_S a1, 27*SZREG(t0)
102 REG_S a1, 28*SZREG(t0)
103 REG_S a1, 29*SZREG(t0)
104 REG_S a1, 30*SZREG(t0)
105 REG_S a1, 31*SZREG(t0)
106 addi t0, t0, 32*SZREG
107 bltu t0, a3, 3b
108 andi a2, a2, SZREG-1 /* Update count */
109
1104:
111 /* Handle trailing misalignment */
112 beqz a2, 6f
113 add a3, t0, a2
1145:
115 sb a1, 0(t0)
116 addi t0, t0, 1
117 bltu t0, a3, 5b
1186:
119 ret
120END(memset)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
new file mode 100644
index 000000000000..58fb2877c865
--- /dev/null
+++ b/arch/riscv/lib/uaccess.S
@@ -0,0 +1,117 @@
1#include <linux/linkage.h>
2#include <asm/asm.h>
3#include <asm/csr.h>
4
5 .altmacro
6 .macro fixup op reg addr lbl
7 LOCAL _epc
8_epc:
9 \op \reg, \addr
10 .section __ex_table,"a"
11 .balign RISCV_SZPTR
12 RISCV_PTR _epc, \lbl
13 .previous
14 .endm
15
16ENTRY(__copy_user)
17
18 /* Enable access to user memory */
19 li t6, SR_SUM
20 csrs sstatus, t6
21
22 add a3, a1, a2
23 /* Use word-oriented copy only if low-order bits match */
24 andi t0, a0, SZREG-1
25 andi t1, a1, SZREG-1
26 bne t0, t1, 2f
27
28 addi t0, a1, SZREG-1
29 andi t1, a3, ~(SZREG-1)
30 andi t0, t0, ~(SZREG-1)
31 /*
32 * a3: terminal address of source region
33 * t0: lowest XLEN-aligned address in source
34 * t1: highest XLEN-aligned address in source
35 */
36 bgeu t0, t1, 2f
37 bltu a1, t0, 4f
381:
39 fixup REG_L, t2, (a1), 10f
40 fixup REG_S, t2, (a0), 10f
41 addi a1, a1, SZREG
42 addi a0, a0, SZREG
43 bltu a1, t1, 1b
442:
45 bltu a1, a3, 5f
46
473:
48 /* Disable access to user memory */
49 csrc sstatus, t6
50 li a0, 0
51 ret
524: /* Edge case: unalignment */
53 fixup lbu, t2, (a1), 10f
54 fixup sb, t2, (a0), 10f
55 addi a1, a1, 1
56 addi a0, a0, 1
57 bltu a1, t0, 4b
58 j 1b
595: /* Edge case: remainder */
60 fixup lbu, t2, (a1), 10f
61 fixup sb, t2, (a0), 10f
62 addi a1, a1, 1
63 addi a0, a0, 1
64 bltu a1, a3, 5b
65 j 3b
66ENDPROC(__copy_user)
67
68
69ENTRY(__clear_user)
70
71 /* Enable access to user memory */
72 li t6, SR_SUM
73 csrs sstatus, t6
74
75 add a3, a0, a1
76 addi t0, a0, SZREG-1
77 andi t1, a3, ~(SZREG-1)
78 andi t0, t0, ~(SZREG-1)
79 /*
80 * a3: terminal address of target region
81 * t0: lowest doubleword-aligned address in target region
82 * t1: highest doubleword-aligned address in target region
83 */
84 bgeu t0, t1, 2f
85 bltu a0, t0, 4f
861:
87 fixup REG_S, zero, (a0), 10f
88 addi a0, a0, SZREG
89 bltu a0, t1, 1b
902:
91 bltu a0, a3, 5f
92
933:
94 /* Disable access to user memory */
95 csrc sstatus, t6
96 li a0, 0
97 ret
984: /* Edge case: unalignment */
99 fixup sb, zero, (a0), 10f
100 addi a0, a0, 1
101 bltu a0, t0, 4b
102 j 1b
1035: /* Edge case: remainder */
104 fixup sb, zero, (a0), 10f
105 addi a0, a0, 1
106 bltu a0, a3, 5b
107 j 3b
108ENDPROC(__clear_user)
109
110 .section .fixup,"ax"
111 .balign 4
11210:
113 /* Disable access to user memory */
114 csrs sstatus, t6
115 sub a0, a3, a0
116 ret
117 .previous
diff --git a/arch/riscv/lib/udivdi3.S b/arch/riscv/lib/udivdi3.S
new file mode 100644
index 000000000000..cb01ae5b181a
--- /dev/null
+++ b/arch/riscv/lib/udivdi3.S
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2016-2017 Free Software Foundation, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 .globl __udivdi3
15__udivdi3:
16 mv a2, a1
17 mv a1, a0
18 li a0, -1
19 beqz a2, .L5
20 li a3, 1
21 bgeu a2, a1, .L2
22.L1:
23 blez a2, .L2
24 slli a2, a2, 1
25 slli a3, a3, 1
26 bgtu a1, a2, .L1
27.L2:
28 li a0, 0
29.L3:
30 bltu a1, a2, .L4
31 sub a1, a1, a2
32 or a0, a0, a3
33.L4:
34 srli a3, a3, 1
35 srli a2, a2, 1
36 bnez a3, .L3
37.L5:
38 ret