aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r/lib')
-rw-r--r--arch/m32r/lib/Makefile7
-rw-r--r--arch/m32r/lib/ashxdi3.S297
-rw-r--r--arch/m32r/lib/checksum.S322
-rw-r--r--arch/m32r/lib/csum_partial_copy.c60
-rw-r--r--arch/m32r/lib/delay.c126
-rw-r--r--arch/m32r/lib/getuser.S88
-rw-r--r--arch/m32r/lib/memcpy.S95
-rw-r--r--arch/m32r/lib/memset.S181
-rw-r--r--arch/m32r/lib/putuser.S84
-rw-r--r--arch/m32r/lib/strlen.S120
-rw-r--r--arch/m32r/lib/usercopy.c391
11 files changed, 1771 insertions, 0 deletions
diff --git a/arch/m32r/lib/Makefile b/arch/m32r/lib/Makefile
new file mode 100644
index 000000000000..e632d10c7d78
--- /dev/null
+++ b/arch/m32r/lib/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for M32R-specific library files..
3#
4
5lib-y := checksum.o ashxdi3.o memset.o memcpy.o getuser.o \
6 putuser.o delay.o strlen.o usercopy.o csum_partial_copy.o
7
diff --git a/arch/m32r/lib/ashxdi3.S b/arch/m32r/lib/ashxdi3.S
new file mode 100644
index 000000000000..78effca9d97a
--- /dev/null
+++ b/arch/m32r/lib/ashxdi3.S
@@ -0,0 +1,297 @@
1/*
2 * linux/arch/m32r/lib/ashxdi3.S
3 *
4 * Copyright (C) 2001,2002 Hiroyuki Kondo, and Hirokazu Takata
5 *
6 */
7/* $Id$ */
8
9#include <linux/config.h>
10
11;
12; input (r0,r1) src
13; input r2 shift val
14; r3 scratch
15; output (r0,r1)
16;
17
18#ifdef CONFIG_ISA_DUAL_ISSUE
19
20#ifndef __LITTLE_ENDIAN__
21
22 .text
23 .align 4
24 .globl __ashrdi3
25__ashrdi3:
26 cmpz r2 || ldi r3, #32
27 jc r14 || cmpu r2, r3
28 bc 1f
29 ; case 32 =< shift
30 mv r1, r0 || srai r0, #31
31 addi r2, #-32
32 sra r1, r2
33 jmp r14
34 .fillinsn
351: ; case shift <32
36 mv r3, r0 || srl r1, r2
37 sra r0, r2 || neg r2, r2
38 sll r3, r2
39 or r1, r3 || jmp r14
40
41 .align 4
42 .globl __ashldi3
43 .globl __lshldi3
44__ashldi3:
45__lshldi3:
46 cmpz r2 || ldi r3, #32
47 jc r14 || cmpu r2, r3
48 bc 1f
49 ; case 32 =< shift
50 mv r0, r1 || addi r2, #-32
51 sll r0, r2 || ldi r1, #0
52 jmp r14
53 .fillinsn
541: ; case shift <32
55 mv r3, r1 || sll r0, r2
56 sll r1, r2 || neg r2, r2
57 srl r3, r2
58 or r0, r3 || jmp r14
59
60 .align 4
61 .globl __lshrdi3
62__lshrdi3:
63 cmpz r2 || ldi r3, #32
64 jc r14 || cmpu r2, r3
65 bc 1f
66 ; case 32 =< shift
67 mv r1, r0 || addi r2, #-32
68 ldi r0, #0 || srl r1, r2
69 jmp r14
70 .fillinsn
711: ; case shift <32
72 mv r3, r0 || srl r1, r2
73 srl r0, r2 || neg r2, r2
74 sll r3, r2
75 or r1, r3 || jmp r14
76
77#else /* LITTLE_ENDIAN */
78
79 .text
80 .align 4
81 .globl __ashrdi3
82__ashrdi3:
83 cmpz r2 || ldi r3, #32
84 jc r14 || cmpu r2, r3
85 bc 1f
86 ; case 32 =< shift
87 mv r0, r1 || srai r1, #31
88 addi r2, #-32
89 sra r0, r2
90 jmp r14
91 .fillinsn
921: ; case shift <32
93 mv r3, r1 || srl r0, r2
94 sra r1, r2 || neg r2, r2
95 sll r3, r2
96 or r0, r3 || jmp r14
97
98 .align 4
99 .globl __ashldi3
100 .globl __lshldi3
101__ashldi3:
102__lshldi3:
103 cmpz r2 || ldi r3, #32
104 jc r14 || cmpu r2, r3
105 bc 1f
106 ; case 32 =< shift
107 mv r1, r0 || addi r2, #-32
108 sll r1, r2 || ldi r0, #0
109 jmp r14
110 .fillinsn
1111: ; case shift <32
112 mv r3, r0 || sll r1, r2
113 sll r0, r2 || neg r2, r2
114 srl r3, r2
115 or r1, r3 || jmp r14
116
117 .align 4
118 .globl __lshrdi3
119__lshrdi3:
120 cmpz r2 || ldi r3, #32
121 jc r14 || cmpu r2, r3
122 bc 1f
123 ; case 32 =< shift
124 mv r0, r1 || addi r2, #-32
125 ldi r1, #0 || srl r0, r2
126 jmp r14
127 .fillinsn
1281: ; case shift <32
129 mv r3, r1 || srl r0, r2
130 srl r1, r2 || neg r2, r2
131 sll r3, r2
132 or r0, r3 || jmp r14
133
134#endif
135
136#else /* not CONFIG_ISA_DUAL_ISSUE */
137
138#ifndef __LITTLE_ENDIAN__
139
140 .text
141 .align 4
142 .globl __ashrdi3
143__ashrdi3:
144 beqz r2, 2f
145 cmpui r2, #32
146 bc 1f
147 ; case 32 =< shift
148 mv r1, r0
149 srai r0, #31
150 addi r2, #-32
151 sra r1, r2
152 jmp r14
153 .fillinsn
1541: ; case shift <32
155 mv r3, r0
156 srl r1, r2
157 sra r0, r2
158 neg r2, r2
159 sll r3, r2
160 or r1, r3
161 .fillinsn
1622:
163 jmp r14
164
165 .align 4
166 .globl __ashldi3
167 .globl __lshldi3
168__ashldi3:
169__lshldi3:
170 beqz r2, 2f
171 cmpui r2, #32
172 bc 1f
173 ; case 32 =< shift
174 mv r0, r1
175 addi r2, #-32
176 sll r0, r2
177 ldi r1, #0
178 jmp r14
179 .fillinsn
1801: ; case shift <32
181 mv r3, r1
182 sll r0, r2
183 sll r1, r2
184 neg r2, r2
185 srl r3, r2
186 or r0, r3
187 .fillinsn
1882:
189 jmp r14
190
191 .align 4
192 .globl __lshrdi3
193__lshrdi3:
194 beqz r2, 2f
195 cmpui r2, #32
196 bc 1f
197 ; case 32 =< shift
198 mv r1, r0
199 ldi r0, #0
200 addi r2, #-32
201 srl r1, r2
202 jmp r14
203 .fillinsn
2041: ; case shift <32
205 mv r3, r0
206 srl r1, r2
207 srl r0, r2
208 neg r2, r2
209 sll r3, r2
210 or r1, r3
211 .fillinsn
2122:
213 jmp r14
214
215#else
216
217 .text
218 .align 4
219 .globl __ashrdi3
220__ashrdi3:
221 beqz r2, 2f
222 cmpui r2, #32
223 bc 1f
224 ; case 32 =< shift
225 mv r0, r1
226 srai r1, #31
227 addi r2, #-32
228 sra r0, r2
229 jmp r14
230 .fillinsn
2311: ; case shift <32
232 mv r3, r1
233 srl r0, r2
234 sra r1, r2
235 neg r2, r2
236 sll r3, r2
237 or r0, r3
238 .fillinsn
2392:
240 jmp r14
241
242 .align 4
243 .globl __ashldi3
244 .globl __lshldi3
245__ashldi3:
246__lshldi3:
247 beqz r2, 2f
248 cmpui r2, #32
249 bc 1f
250 ; case 32 =< shift
251 mv r1, r0
252 addi r2, #-32
253 sll r1, r2
254 ldi r0, #0
255 jmp r14
256 .fillinsn
2571: ; case shift <32
258 mv r3, r0
259 sll r1, r2
260 sll r0, r2
261 neg r2, r2
262 srl r3, r2
263 or r1, r3
264 .fillinsn
2652:
266 jmp r14
267
268 .align 4
269 .globl __lshrdi3
270__lshrdi3:
271 beqz r2, 2f
272 cmpui r2, #32
273 bc 1f
274 ; case 32 =< shift
275 mv r0, r1
276 ldi r1, #0
277 addi r2, #-32
278 srl r0, r2
279 jmp r14
280 .fillinsn
2811: ; case shift <32
282 mv r3, r1
283 srl r0, r2
284 srl r1, r2
285 neg r2, r2
286 sll r3, r2
287 or r0, r3
288 .fillinsn
2892:
290 jmp r14
291
292#endif
293
294#endif /* not CONFIG_ISA_DUAL_ISSUE */
295
296 .end
297
diff --git a/arch/m32r/lib/checksum.S b/arch/m32r/lib/checksum.S
new file mode 100644
index 000000000000..f6fc1bdb87e4
--- /dev/null
+++ b/arch/m32r/lib/checksum.S
@@ -0,0 +1,322 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Pentium Pro/II routines:
12 * Alexander Kjeldaas <astor@guardian.no>
13 * Finn Arne Gangstad <finnag@guardian.no>
14 * Lots of code moved from tcp.c and ip.c; see those files
15 * for more names.
16 *
17 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
18 * handling.
19 * Andi Kleen, add zeroing on error
20 * converted to pure assembler
21 * Hirokazu Takata,Hiroyuki Kondo rewrite for the m32r architecture.
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 */
28/* $Id$ */
29
30
31#include <linux/config.h>
32#include <linux/linkage.h>
33#include <asm/assembler.h>
34#include <asm/errno.h>
35
36/*
37 * computes a partial checksum, e.g. for TCP/UDP fragments
38 */
39
40/*
41unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
42 */
43
44
45#ifdef CONFIG_ISA_DUAL_ISSUE
46
47 /*
48 * Experiments with Ethernet and SLIP connections show that buff
49 * is aligned on either a 2-byte or 4-byte boundary. We get at
50 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
51 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
52 * alignment for the unrolled loop.
53 */
54
55 .text
56ENTRY(csum_partial)
57 ; Function args
58 ; r0: unsigned char *buff
59 ; r1: int len
60 ; r2: unsigned int sum
61
62 push r2 || ldi r2, #0
63 and3 r7, r0, #1 ; Check alignment.
64 beqz r7, 1f ; Jump if alignment is ok.
65 ; 1-byte mis aligned
66 ldub r4, @r0 || addi r0, #1
67 ; clear c-bit || Alignment uses up bytes.
68 cmp r0, r0 || addi r1, #-1
69 ldi r3, #0 || addx r2, r4
70 addx r2, r3
71 .fillinsn
721:
73 and3 r4, r0, #2 ; Check alignment.
74 beqz r4, 2f ; Jump if alignment is ok.
75 ; clear c-bit || Alignment uses up two bytes.
76 cmp r0, r0 || addi r1, #-2
77 bgtz r1, 1f ; Jump if we had at least two bytes.
78 bra 4f || addi r1, #2
79 .fillinsn ; len(r1) was < 2. Deal with it.
801:
81 ; 2-byte aligned
82 lduh r4, @r0 || ldi r3, #0
83 addx r2, r4 || addi r0, #2
84 addx r2, r3
85 .fillinsn
862:
87 ; 4-byte aligned
88 cmp r0, r0 ; clear c-bit
89 srl3 r6, r1, #5
90 beqz r6, 2f
91 .fillinsn
92
931: ld r3, @r0+
94 ld r4, @r0+ ; +4
95 ld r5, @r0+ ; +8
96 ld r3, @r0+ || addx r2, r3 ; +12
97 ld r4, @r0+ || addx r2, r4 ; +16
98 ld r5, @r0+ || addx r2, r5 ; +20
99 ld r3, @r0+ || addx r2, r3 ; +24
100 ld r4, @r0+ || addx r2, r4 ; +28
101 addx r2, r5 || addi r6, #-1
102 addx r2, r3
103 addx r2, r4
104 bnez r6, 1b
105
106 addx r2, r6 ; r6=0
107 cmp r0, r0 ; This clears c-bit
108 .fillinsn
1092: and3 r6, r1, #0x1c ; withdraw len
110 beqz r6, 4f
111 srli r6, #2
112 .fillinsn
113
1143: ld r4, @r0+ || addi r6, #-1
115 addx r2, r4
116 bnez r6, 3b
117
118 addx r2, r6 ; r6=0
119 cmp r0, r0 ; This clears c-bit
120 .fillinsn
1214: and3 r1, r1, #3
122 beqz r1, 7f ; if len == 0 goto end
123 and3 r6, r1, #2
124 beqz r6, 5f ; if len < 2 goto 5f(1byte)
125 lduh r4, @r0 || addi r0, #2
126 addi r1, #-2 || slli r4, #16
127 addx r2, r4
128 beqz r1, 6f
129 .fillinsn
1305: ldub r4, @r0 || ldi r1, #0
131#ifndef __LITTLE_ENDIAN__
132 slli r4, #8
133#endif
134 addx r2, r4
135 .fillinsn
1366: addx r2, r1
137 .fillinsn
1387:
139 and3 r0, r2, #0xffff
140 srli r2, #16
141 add r0, r2
142 srl3 r2, r0, #16
143 beqz r2, 1f
144 addi r0, #1
145 and3 r0, r0, #0xffff
146 .fillinsn
1471:
148 beqz r7, 1f ; swap the upper byte for the lower
149 and3 r2, r0, #0xff
150 srl3 r0, r0, #8
151 slli r2, #8
152 or r0, r2
153 .fillinsn
1541:
155 pop r2 || cmp r0, r0
156 addx r0, r2 || ldi r2, #0
157 addx r0, r2
158 jmp r14
159
160#else /* not CONFIG_ISA_DUAL_ISSUE */
161
162 /*
163 * Experiments with Ethernet and SLIP connections show that buff
164 * is aligned on either a 2-byte or 4-byte boundary. We get at
165 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
166 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
167 * alignment for the unrolled loop.
168 */
169
170 .text
171ENTRY(csum_partial)
172 ; Function args
173 ; r0: unsigned char *buff
174 ; r1: int len
175 ; r2: unsigned int sum
176
177 push r2
178 ldi r2, #0
179 and3 r7, r0, #1 ; Check alignment.
180 beqz r7, 1f ; Jump if alignment is ok.
181 ; 1-byte mis aligned
182 ldub r4, @r0
183 addi r0, #1
184 addi r1, #-1 ; Alignment uses up bytes.
185 cmp r0, r0 ; clear c-bit
186 ldi r3, #0
187 addx r2, r4
188 addx r2, r3
189 .fillinsn
1901:
191 and3 r4, r0, #2 ; Check alignment.
192 beqz r4, 2f ; Jump if alignment is ok.
193 addi r1, #-2 ; Alignment uses up two bytes.
194 cmp r0, r0 ; clear c-bit
195 bgtz r1, 1f ; Jump if we had at least two bytes.
196 addi r1, #2 ; len(r1) was < 2. Deal with it.
197 bra 4f
198 .fillinsn
1991:
200 ; 2-byte aligned
201 lduh r4, @r0
202 addi r0, #2
203 ldi r3, #0
204 addx r2, r4
205 addx r2, r3
206 .fillinsn
2072:
208 ; 4-byte aligned
209 cmp r0, r0 ; clear c-bit
210 srl3 r6, r1, #5
211 beqz r6, 2f
212 .fillinsn
213
2141: ld r3, @r0+
215 ld r4, @r0+ ; +4
216 ld r5, @r0+ ; +8
217 addx r2, r3
218 addx r2, r4
219 addx r2, r5
220 ld r3, @r0+ ; +12
221 ld r4, @r0+ ; +16
222 ld r5, @r0+ ; +20
223 addx r2, r3
224 addx r2, r4
225 addx r2, r5
226 ld r3, @r0+ ; +24
227 ld r4, @r0+ ; +28
228 addi r6, #-1
229 addx r2, r3
230 addx r2, r4
231 bnez r6, 1b
232 addx r2, r6 ; r6=0
233 cmp r0, r0 ; This clears c-bit
234 .fillinsn
235
2362: and3 r6, r1, #0x1c ; withdraw len
237 beqz r6, 4f
238 srli r6, #2
239 .fillinsn
240
2413: ld r4, @r0+
242 addi r6, #-1
243 addx r2, r4
244 bnez r6, 3b
245 addx r2, r6 ; r6=0
246 cmp r0, r0 ; This clears c-bit
247 .fillinsn
248
2494: and3 r1, r1, #3
250 beqz r1, 7f ; if len == 0 goto end
251 and3 r6, r1, #2
252 beqz r6, 5f ; if len < 2 goto 5f(1byte)
253
254 lduh r4, @r0
255 addi r0, #2
256 addi r1, #-2
257 slli r4, #16
258 addx r2, r4
259 beqz r1, 6f
260 .fillinsn
2615: ldub r4, @r0
262#ifndef __LITTLE_ENDIAN__
263 slli r4, #8
264#endif
265 addx r2, r4
266 .fillinsn
2676: ldi r5, #0
268 addx r2, r5
269 .fillinsn
2707:
271 and3 r0, r2, #0xffff
272 srli r2, #16
273 add r0, r2
274 srl3 r2, r0, #16
275 beqz r2, 1f
276 addi r0, #1
277 and3 r0, r0, #0xffff
278 .fillinsn
2791:
280 beqz r7, 1f
281 mv r2, r0
282 srl3 r0, r2, #8
283 and3 r2, r2, #0xff
284 slli r2, #8
285 or r0, r2
286 .fillinsn
2871:
288 pop r2
289 cmp r0, r0
290 addx r0, r2
291 ldi r2, #0
292 addx r0, r2
293 jmp r14
294
295#endif /* not CONFIG_ISA_DUAL_ISSUE */
296
297/*
298unsigned int csum_partial_copy_generic (const char *src, char *dst,
299 int len, int sum, int *src_err_ptr, int *dst_err_ptr)
300 */
301
302/*
303 * Copy from ds while checksumming, otherwise like csum_partial
304 *
305 * The macros SRC and DST specify the type of access for the instruction.
306 * thus we can call a custom exception handler for all access types.
307 *
308 * FIXME: could someone double-check whether I haven't mixed up some SRC and
309 * DST definitions? It's damn hard to trigger all cases. I hope I got
310 * them all but there's no guarantee.
311 */
312
313ENTRY(csum_partial_copy_generic)
314 nop
315 nop
316 nop
317 nop
318 jmp r14
319 nop
320 nop
321 nop
322
diff --git a/arch/m32r/lib/csum_partial_copy.c b/arch/m32r/lib/csum_partial_copy.c
new file mode 100644
index 000000000000..c871b4606b07
--- /dev/null
+++ b/arch/m32r/lib/csum_partial_copy.c
@@ -0,0 +1,60 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * M32R specific IP/TCP/UDP checksumming routines
7 * (Some code taken from MIPS architecture)
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 * Copyright (C) 1994, 1995 Waldorf Electronics GmbH
14 * Copyright (C) 1998, 1999 Ralf Baechle
15 * Copyright (C) 2001-2005 Hiroyuki Kondo, Hirokazu Takata
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/types.h>
21
22#include <net/checksum.h>
23#include <asm/byteorder.h>
24#include <asm/string.h>
25#include <asm/uaccess.h>
26
27/*
28 * Copy while checksumming, otherwise like csum_partial
29 */
30unsigned int
31csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst,
32 int len, unsigned int sum)
33{
34 sum = csum_partial(src, len, sum);
35 memcpy(dst, src, len);
36
37 return sum;
38}
39EXPORT_SYMBOL(csum_partial_copy_nocheck);
40
41/*
42 * Copy from userspace and compute checksum. If we catch an exception
43 * then zero the rest of the buffer.
44 */
45unsigned int
46csum_partial_copy_from_user (const unsigned char __user *src,
47 unsigned char *dst,
48 int len, unsigned int sum, int *err_ptr)
49{
50 int missing;
51
52 missing = copy_from_user(dst, src, len);
53 if (missing) {
54 memset(dst + len - missing, 0, missing);
55 *err_ptr = -EFAULT;
56 }
57
58 return csum_partial(dst, len-missing, sum);
59}
60EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c
new file mode 100644
index 000000000000..fb29632c7eaa
--- /dev/null
+++ b/arch/m32r/lib/delay.c
@@ -0,0 +1,126 @@
1/*
2 * linux/arch/m32r/lib/delay.c
3 *
4 * Copyright (c) 2002 Hitoshi Yamamoto, Hirokazu Takata
5 * Copyright (c) 2004 Hirokazu Takata
6 */
7
8/* $Id$ */
9
10#include <linux/config.h>
11#include <linux/param.h>
12#ifdef CONFIG_SMP
13#include <linux/sched.h>
14#include <asm/current.h>
15#include <asm/smp.h>
16#endif /* CONFIG_SMP */
17#include <asm/processor.h>
18
19void __delay(unsigned long loops)
20{
21#ifdef CONFIG_ISA_DUAL_ISSUE
22 __asm__ __volatile__ (
23 "beqz %0, 2f \n\t"
24 "addi %0, #-1 \n\t"
25
26 " .fillinsn \n\t"
27 "1: \n\t"
28 "cmpz %0 || addi %0, #-1 \n\t"
29 "bc 2f || cmpz %0 \n\t"
30 "bc 2f || addi %0, #-1 \n\t"
31 "cmpz %0 || addi %0, #-1 \n\t"
32 "bc 2f || cmpz %0 \n\t"
33 "bnc 1b || addi %0, #-1 \n\t"
34 " .fillinsn \n\t"
35 "2: \n\t"
36 : "+r" (loops)
37 : "r" (0)
38 : "cbit"
39 );
40#else
41 __asm__ __volatile__ (
42 "beqz %0, 2f \n\t"
43 " .fillinsn \n\t"
44 "1: \n\t"
45 "addi %0, #-1 \n\t"
46 "blez %0, 2f \n\t"
47 "addi %0, #-1 \n\t"
48 "blez %0, 2f \n\t"
49 "addi %0, #-1 \n\t"
50 "blez %0, 2f \n\t"
51 "addi %0, #-1 \n\t"
52 "bgtz %0, 1b \n\t"
53 " .fillinsn \n\t"
54 "2: \n\t"
55 : "+r" (loops)
56 : "r" (0)
57 );
58#endif
59}
60
61void __const_udelay(unsigned long xloops)
62{
63#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
64 /*
65 * loops [1] = (xloops >> 32) [sec] * loops_per_jiffy [1/jiffy]
66 * * HZ [jiffy/sec]
67 * = (xloops >> 32) [sec] * (loops_per_jiffy * HZ) [1/sec]
68 * = (((xloops * loops_per_jiffy) >> 32) * HZ) [1]
69 *
70 * NOTE:
71 * - '[]' depicts variable's dimension in the above equation.
72 * - "rac" instruction rounds the accumulator in word size.
73 */
74 __asm__ __volatile__ (
75 "srli %0, #1 \n\t"
76 "mulwhi %0, %1 ; a0 \n\t"
77 "mulwu1 %0, %1 ; a1 \n\t"
78 "sadd ; a0 += (a1 >> 16) \n\t"
79 "rac a0, a0, #1 \n\t"
80 "mvfacmi %0, a0 \n\t"
81 : "+r" (xloops)
82 : "r" (current_cpu_data.loops_per_jiffy)
83 : "a0", "a1"
84 );
85#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
86 /*
87 * u64 ull;
88 * ull = (u64)xloops * (u64)current_cpu_data.loops_per_jiffy;
89 * xloops = (ull >> 32);
90 */
91 __asm__ __volatile__ (
92 "and3 r4, %0, #0xffff \n\t"
93 "and3 r5, %1, #0xffff \n\t"
94 "mul r4, r5 \n\t"
95 "srl3 r6, %0, #16 \n\t"
96 "srli r4, #16 \n\t"
97 "mul r5, r6 \n\t"
98 "add r4, r5 \n\t"
99 "and3 r5, %0, #0xffff \n\t"
100 "srl3 r6, %1, #16 \n\t"
101 "mul r5, r6 \n\t"
102 "add r4, r5 \n\t"
103 "srl3 r5, %0, #16 \n\t"
104 "srli r4, #16 \n\t"
105 "mul r5, r6 \n\t"
106 "add r4, r5 \n\t"
107 "mv %0, r4 \n\t"
108 : "+r" (xloops)
109 : "r" (current_cpu_data.loops_per_jiffy)
110 : "r4", "r5", "r6"
111 );
112#else
113#error unknown isa configuration
114#endif
115 __delay(xloops * HZ);
116}
117
118void __udelay(unsigned long usecs)
119{
120 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
121}
122
123void __ndelay(unsigned long nsecs)
124{
125 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
126}
diff --git a/arch/m32r/lib/getuser.S b/arch/m32r/lib/getuser.S
new file mode 100644
index 000000000000..58a0db055c5c
--- /dev/null
+++ b/arch/m32r/lib/getuser.S
@@ -0,0 +1,88 @@
1/*
2 * __get_user functions.
3 *
4 * (C) Copyright 2001 Hirokazu Takata
5 *
6 * These functions have a non-standard call interface
7 * to make them more efficient, especially as they
8 * return an error value in addition to the "real"
9 * return value.
10 */
11
12#include <linux/config.h>
13
14/*
15 * __get_user_X
16 *
17 * Inputs: r0 contains the address
18 *
19 * Outputs: r0 is error code (0 or -EFAULT)
20 * r1 contains zero-extended value
21 *
22 * These functions should not modify any other registers,
23 * as they get called from within inline assembly.
24 */
25
26#ifdef CONFIG_ISA_DUAL_ISSUE
27
28 .text
29 .balign 4
30 .globl __get_user_1
31__get_user_1:
321: ldub r1, @r0 || ldi r0, #0
33 jmp r14
34
35 .balign 4
36 .globl __get_user_2
37__get_user_2:
382: lduh r1, @r0 || ldi r0, #0
39 jmp r14
40
41 .balign 4
42 .globl __get_user_4
43__get_user_4:
443: ld r1, @r0 || ldi r0, #0
45 jmp r14
46
47bad_get_user:
48 ldi r1, #0 || ldi r0, #-14
49 jmp r14
50
51#else /* not CONFIG_ISA_DUAL_ISSUE */
52
53 .text
54 .balign 4
55 .globl __get_user_1
56__get_user_1:
571: ldub r1, @r0
58 ldi r0, #0
59 jmp r14
60
61 .balign 4
62 .globl __get_user_2
63__get_user_2:
642: lduh r1, @r0
65 ldi r0, #0
66 jmp r14
67
68 .balign 4
69 .globl __get_user_4
70__get_user_4:
713: ld r1, @r0
72 ldi r0, #0
73 jmp r14
74
75bad_get_user:
76 ldi r1, #0
77 ldi r0, #-14
78 jmp r14
79
80#endif /* not CONFIG_ISA_DUAL_ISSUE */
81
82.section __ex_table,"a"
83 .long 1b,bad_get_user
84 .long 2b,bad_get_user
85 .long 3b,bad_get_user
86.previous
87
88 .end
diff --git a/arch/m32r/lib/memcpy.S b/arch/m32r/lib/memcpy.S
new file mode 100644
index 000000000000..800898a2d2e3
--- /dev/null
+++ b/arch/m32r/lib/memcpy.S
@@ -0,0 +1,95 @@
1/*
2 * linux/arch/m32r/lib/memcpy.S
3 *
4 * Copyright (C) 2001 Hiroyuki Kondo, and Hirokazu Takata
5 * Copyright (C) 2004 Hirokazu Takata
6 *
7 * void *memcopy(void *dst, const void *src, int n);
8 *
9 * dst: r0
10 * src: r1
11 * n : r2
12 */
13/* $Id$ */
14
15
16 .text
17#include <linux/config.h>
18#include <linux/linkage.h>
19#include <asm/assembler.h>
20
21#ifdef CONFIG_ISA_DUAL_ISSUE
22
23 .text
24ENTRY(memcpy)
25memcopy:
26 mv r4, r0 || mv r7, r0
27 or r7, r1 || cmpz r2
28 jc r14 || cmpeq r0, r1 ; return if r2=0
29 jc r14 ; return if r0=r1
30
31 and3 r7, r7, #3
32 bnez r7, byte_copy
33 srl3 r3, r2, #2
34 and3 r2, r2, #3
35 beqz r3, byte_copy
36 addi r4, #-4
37word_copy:
38 ld r7, @r1+ || addi r3, #-1
39 st r7, @+r4 || cmpz r2
40 bnez r3, word_copy
41 addi r4, #4 || jc r14 ; return if r2=0
42#if defined(CONFIG_ISA_M32R2)
43byte_copy:
44 ldb r7, @r1 || addi r1, #1
45 addi r2, #-1 || stb r7, @r4+
46 bnez r2, byte_copy
47#elif defined(CONFIG_ISA_M32R)
48byte_copy:
49 ldb r7, @r1 || addi r1, #1
50 addi r2, #-1 || stb r7, @r4
51 addi r4, #1
52 bnez r2, byte_copy
53#else
54#error unknown isa configuration
55#endif
56end_memcopy:
57 jmp r14
58
59#else /* not CONFIG_ISA_DUAL_ISSUE */
60
61 .text
62ENTRY(memcpy)
63memcopy:
64 mv r4, r0
65 mv r7, r0
66 or r7, r1
67 beq r0, r1, end_memcopy
68 beqz r2, end_memcopy
69
70 and3 r7, r7, #3
71 bnez r7, byte_copy
72 srl3 r3, r2, #2
73 and3 r2, r2, #3
74 beqz r3, byte_copy
75 addi r4, #-4
76word_copy:
77 ld r7, @r1+
78 addi r3, #-1
79 st r7, @+r4
80 bnez r3, word_copy
81 beqz r2, end_memcopy
82 addi r4, #4
83byte_copy:
84 ldb r7, @r1
85 addi r1, #1
86 addi r2, #-1
87 stb r7, @r4
88 addi r4, #1
89 bnez r2, byte_copy
90end_memcopy:
91 jmp r14
92
93#endif /* not CONFIG_ISA_DUAL_ISSUE */
94
95 .end
diff --git a/arch/m32r/lib/memset.S b/arch/m32r/lib/memset.S
new file mode 100644
index 000000000000..6e26df120acb
--- /dev/null
+++ b/arch/m32r/lib/memset.S
@@ -0,0 +1,181 @@
1/*
2 * linux/arch/m32r/lib/memset.S
3 *
4 * Copyright (C) 2001,2002 Hiroyuki Kondo, and Hirokazu Takata
5 * Copyright (C) 2004 Hirokazu Takata
6 *
7 * void *memset(void *dst, int val, int len);
8 *
9 * dst: r0
10 * val: r1
11 * len: r2
12 * ret: r0
13 *
14 */
15/* $Id$ */
16
17#include <linux/config.h>
18
19 .text
20 .global memset
21
22#ifdef CONFIG_ISA_DUAL_ISSUE
23
24 .align 4
25memset:
26 mv r4, r0 || cmpz r2
27 jc r14
28 cmpui r2, #16
29 bnc qword_align_check
30 cmpui r2, #4
31 bc byte_set
32word_align_check: /* len >= 4 */
33 and3 r3, r4, #3
34 beqz r3, word_set
35 addi r3, #-4
36 neg r3, r3 /* r3 = -(r3 - 4) */
37align_word:
38 stb r1, @r4 || addi r4, #1
39 addi r2, #-1 || addi r3, #-1
40 bnez r3, align_word
41 cmpui r2, #4
42 bc byte_set
43word_set:
44 and3 r1, r1, #0x00ff /* r1: abababab <-- ??????ab */
45 sll3 r3, r1, #8
46 or r1, r3 || addi r4, #-4
47 sll3 r3, r1, #16
48 or r1, r3 || addi r2, #-4
49word_set_loop:
50 st r1, @+r4 || addi r2, #-4
51 bgtz r2, word_set_loop
52 bnez r2, byte_set_wrap
53 st r1, @+r4
54 jmp r14
55
56qword_align_check: /* len >= 16 */
57 and3 r3, r4, #15
58 bnez r3, word_align_check
59qword_set:
60 and3 r1, r1, #0x00ff /* r1: abababab <-- ??????ab */
61 sll3 r3, r1, #8
62 or r1, r3 || addi r4, #-4
63 sll3 r3, r1, #16
64 or r1, r3 || ldi r5, #16
65qword_set_loop:
66 ld r3, @(4,r4) /* cache line allocate */
67 st r1, @+r4 || addi r2, #-16
68 st r1, @+r4 || cmpu r2, r5
69 st r1, @+r4
70 st r1, @+r4
71 bnc qword_set_loop || cmpz r2
72 jc r14
73set_remainder:
74 cmpui r2, #4
75 bc byte_set_wrap1
76 addi r2, #-4
77 bra word_set_loop
78
79byte_set_wrap:
80 addi r2, #4
81 cmpz r2
82 jc r14
83byte_set_wrap1:
84 addi r4, #4
85#if defined(CONFIG_ISA_M32R2)
86byte_set:
87 addi r2, #-1 || stb r1, @r4+
88 bnez r2, byte_set
89#elif defined(CONFIG_ISA_M32R)
90byte_set:
91 addi r2, #-1 || stb r1, @r4
92 addi r4, #1
93 bnez r2, byte_set
94#else
95#error unknown isa configuration
96#endif
97end_memset:
98 jmp r14
99
100#else /* not CONFIG_ISA_DUAL_ISSUE */
101
102 .align 4
103memset:
104 mv r4, r0
105 beqz r2, end_memset
106 cmpui r2, #16
107 bnc qword_align_check
108 cmpui r2, #4
109 bc byte_set
110word_align_check: /* len >= 4 */
111 and3 r3, r4, #3
112 beqz r3, word_set
113 addi r3, #-4
114 neg r3, r3 /* r3 = -(r3 - 4) */
115align_word:
116 stb r1, @r4
117 addi r4, #1
118 addi r2, #-1
119 addi r3, #-1
120 bnez r3, align_word
121 cmpui r2, #4
122 bc byte_set
123word_set:
124 and3 r1, r1, #0x00ff /* r1: abababab <-- ??????ab */
125 sll3 r3, r1, #8
126 or r1, r3
127 sll3 r3, r1, #16
128 or r1, r3
129 addi r2, #-4
130 addi r4, #-4
131word_set_loop:
132 st r1, @+r4
133 addi r2, #-4
134 bgtz r2, word_set_loop
135 bnez r2, byte_set_wrap
136 st r1, @+r4
137 jmp r14
138
139qword_align_check: /* len >= 16 */
140 and3 r3, r4, #15
141 bnez r3, word_align_check
142qword_set:
143 and3 r1, r1, #0x00ff /* r1: abababab <-- ??????ab */
144 sll3 r3, r1, #8
145 or r1, r3
146 sll3 r3, r1, #16
147 or r1, r3
148 addi r4, #-4
149qword_set_loop:
150 ld r3, @(4,r4) /* cache line allocate */
151 addi r2, #-16
152 st r1, @+r4
153 st r1, @+r4
154 cmpui r2, #16
155 st r1, @+r4
156 st r1, @+r4
157 bnc qword_set_loop
158 bnez r2, set_remainder
159 jmp r14
160set_remainder:
161 cmpui r2, #4
162 bc byte_set_wrap1
163 addi r2, #-4
164 bra word_set_loop
165
166byte_set_wrap:
167 addi r2, #4
168 beqz r2, end_memset
169byte_set_wrap1:
170 addi r4, #4
171byte_set:
172 addi r2, #-1
173 stb r1, @r4
174 addi r4, #1
175 bnez r2, byte_set
176end_memset:
177 jmp r14
178
179#endif /* not CONFIG_ISA_DUAL_ISSUE */
180
181 .end
diff --git a/arch/m32r/lib/putuser.S b/arch/m32r/lib/putuser.S
new file mode 100644
index 000000000000..218154cc3890
--- /dev/null
+++ b/arch/m32r/lib/putuser.S
@@ -0,0 +1,84 @@
1/*
2 * __put_user functions.
3 *
4 * (C) Copyright 1998 Linus Torvalds
5 * (C) Copyright 2001 Hirokazu Takata
6 *
7 * These functions have a non-standard call interface
8 * to make them more efficient.
9 */
10
11#include <linux/config.h>
12
13/*
14 * __put_user_X
15 *
16 * Inputs: r0 contains the address
17 * r1 contains the value
18 *
19 * Outputs: r0 is error code (0 or -EFAULT)
20 * r1 is corrupted (will contain "current_task").
21 *
22 * These functions should not modify any other registers,
23 * as they get called from within inline assembly.
24 */
25
26#ifdef CONFIG_ISA_DUAL_ISSUE
27
28 .text
29 .balign 4
30 .globl __put_user_1
31__put_user_1:
321: stb r1, @r0 || ldi r0, #0
33 jmp r14
34
35 .balign 4
36 .globl __put_user_2
37__put_user_2:
382: sth r1, @r0 || ldi r0, #0
39 jmp r14
40
41 .balign 4
42 .globl __put_user_4
43__put_user_4:
443: st r1, @r0 || ldi r0, #0
45 jmp r14
46
47bad_put_user:
48 ldi r0, #-14 || jmp r14
49
50#else /* not CONFIG_ISA_DUAL_ISSUE */
51
52 .text
53 .balign 4
54 .globl __put_user_1
55__put_user_1:
561: stb r1, @r0
57 ldi r0, #0
58 jmp r14
59
60 .balign 4
61 .globl __put_user_2
62__put_user_2:
632: sth r1, @r0
64 ldi r0, #0
65 jmp r14
66
67 .balign 4
68 .globl __put_user_4
69__put_user_4:
703: st r1, @r0
71 ldi r0, #0
72 jmp r14
73
74bad_put_user:
75 ldi r0, #-14
76 jmp r14
77
78#endif /* not CONFIG_ISA_DUAL_ISSUE */
79
80.section __ex_table,"a"
81 .long 1b,bad_put_user
82 .long 2b,bad_put_user
83 .long 3b,bad_put_user
84.previous
diff --git a/arch/m32r/lib/strlen.S b/arch/m32r/lib/strlen.S
new file mode 100644
index 000000000000..8d23cfbd600c
--- /dev/null
+++ b/arch/m32r/lib/strlen.S
@@ -0,0 +1,120 @@
1/*
2 * linux/arch/m32r/strlen.S -- strlen code.
3 *
4 * Copyright (C) 2001 Hirokazu Takata
5 *
6 * size_t strlen(const char *s);
7 *
8 */
9/* $Id$ */
10
11
12#include <linux/config.h>
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15
16#ifdef CONFIG_ISA_DUAL_ISSUE
17
18 .text
19ENTRY(strlen)
20 mv r6, r0 || ldi r2, #0
21 and3 r0, r0, #3
22 bnez r0, strlen_byte
23;
24strlen_word:
25 ld r0, @r6+
26;
27 seth r5, #high(0x01010101)
28 or3 r5, r5, #low(0x01010101)
29 sll3 r7, r5, #7
30strlen_word_loop:
31 ld r1, @r6+ || not r4, r0
32 sub r0, r5 || and r4, r7
33 and r4, r0
34 bnez r4, strlen_last_bytes
35 ld r0, @r6+ || not r4, r1
36 sub r1, r5 || and r4, r7
37 and r4, r1 || addi r2, #4
38 bnez r4, strlen_last_bytes
39 addi r2, #4 || bra.s strlen_word_loop
40
41 ; NOTE: If a null char. exists, return 0.
42 ; if ((x - 0x01010101) & ~x & 0x80808080)
43 ; return 0;
44;
45strlen_byte:
46 ldb r1, @r6 || addi r6, #1
47 beqz r1, strlen_exit
48 addi r2, #1 || bra.s strlen_byte
49;
50strlen_last_bytes:
51 ldi r0, #4 || addi r6, #-8
52;
53strlen_byte_loop:
54 ldb r1, @r6 || addi r6, #1
55 addi r0, #-1 || cmpz r1
56 bc.s strlen_exit || cmpz r0
57 addi r2, #1 || bnc.s strlen_byte_loop
58;
59strlen_exit:
60 mv r0, r2 || jmp r14
61
62#else /* not CONFIG_ISA_DUAL_ISSUE */
63
64 .text
65ENTRY(strlen)
66 mv r6, r0
67 ldi r2, #0
68 and3 r0, r0, #3
69 bnez r0, strlen_byte
70;
71strlen_word:
72 ld r0, @r6+
73;
74 seth r5, #high(0x01010101)
75 or3 r5, r5, #low(0x01010101)
76 sll3 r7, r5, #7
77strlen_word_loop:
78 ld r1, @r6+
79 not r4, r0 ; NOTE: If a null char. exists, return 0.
80 sub r0, r5 ; if ((x - 0x01010101) & ~x & 0x80808080)
81 and r4, r7 ; return 0;
82 and r4, r0
83 bnez r4, strlen_last_bytes
84 addi r2, #4
85;
86 ld r0, @r6+
87 not r4, r1 ; NOTE: If a null char. exists, return 0.
88 sub r1, r5 ; if ((x - 0x01010101) & ~x & 0x80808080)
89 and r4, r7 ; return 0;
90 and r4, r1
91 bnez r4, strlen_last_bytes
92 addi r2, #4
93 bra strlen_word_loop
94;
95strlen_byte:
96 ldb r1, @r6
97 addi r6, #1
98 beqz r1, strlen_exit
99 addi r2, #1
100 bra strlen_byte
101;
102strlen_last_bytes:
103 ldi r0, #4
104 addi r6, #-8
105;
106strlen_byte_loop:
107 ldb r1, @r6
108 addi r6, #1
109 addi r0, #-1
110 beqz r1, strlen_exit
111 addi r2, #1
112 bnez r0, strlen_byte_loop
113;
114strlen_exit:
115 mv r0, r2
116 jmp r14
117
118#endif /* not CONFIG_ISA_DUAL_ISSUE */
119
120 .end
diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
new file mode 100644
index 000000000000..6c6855f1aa05
--- /dev/null
+++ b/arch/m32r/lib/usercopy.c
@@ -0,0 +1,391 @@
1/*
2 * User address space access functions.
3 * The non inlined parts of asm-m32r/uaccess.h are here.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2001, 2002, 2004 Hirokazu Takata
8 */
9#include <linux/config.h>
10#include <linux/prefetch.h>
11#include <linux/string.h>
12#include <linux/thread_info.h>
13#include <asm/uaccess.h>
14
15unsigned long
16__generic_copy_to_user(void *to, const void *from, unsigned long n)
17{
18 prefetch(from);
19 if (access_ok(VERIFY_WRITE, to, n))
20 __copy_user(to,from,n);
21 return n;
22}
23
24unsigned long
25__generic_copy_from_user(void *to, const void *from, unsigned long n)
26{
27 prefetchw(to);
28 if (access_ok(VERIFY_READ, from, n))
29 __copy_user_zeroing(to,from,n);
30 else
31 memset(to, 0, n);
32 return n;
33}
34
35
36/*
37 * Copy a null terminated string from userspace.
38 */
39
40#ifdef CONFIG_ISA_DUAL_ISSUE
41
42#define __do_strncpy_from_user(dst,src,count,res) \
43do { \
44 int __d0, __d1, __d2; \
45 __asm__ __volatile__( \
46 " beqz %1, 2f\n" \
47 " .fillinsn\n" \
48 "0: ldb r14, @%3 || addi %3, #1\n" \
49 " stb r14, @%4 || addi %4, #1\n" \
50 " beqz r14, 1f\n" \
51 " addi %1, #-1\n" \
52 " bnez %1, 0b\n" \
53 " .fillinsn\n" \
54 "1: sub %0, %1\n" \
55 " .fillinsn\n" \
56 "2:\n" \
57 ".section .fixup,\"ax\"\n" \
58 " .balign 4\n" \
59 "3: seth r14, #high(2b)\n" \
60 " or3 r14, r14, #low(2b)\n" \
61 " jmp r14 || ldi %0, #%5\n" \
62 ".previous\n" \
63 ".section __ex_table,\"a\"\n" \
64 " .balign 4\n" \
65 " .long 0b,3b\n" \
66 ".previous" \
67 : "=r"(res), "=r"(count), "=&r" (__d0), "=&r" (__d1), \
68 "=&r" (__d2) \
69 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \
70 "4"(dst) \
71 : "r14", "cbit", "memory"); \
72} while (0)
73
74#else /* not CONFIG_ISA_DUAL_ISSUE */
75
76#define __do_strncpy_from_user(dst,src,count,res) \
77do { \
78 int __d0, __d1, __d2; \
79 __asm__ __volatile__( \
80 " beqz %1, 2f\n" \
81 " .fillinsn\n" \
82 "0: ldb r14, @%3\n" \
83 " stb r14, @%4\n" \
84 " addi %3, #1\n" \
85 " addi %4, #1\n" \
86 " beqz r14, 1f\n" \
87 " addi %1, #-1\n" \
88 " bnez %1, 0b\n" \
89 " .fillinsn\n" \
90 "1: sub %0, %1\n" \
91 " .fillinsn\n" \
92 "2:\n" \
93 ".section .fixup,\"ax\"\n" \
94 " .balign 4\n" \
95 "3: ldi %0, #%5\n" \
96 " seth r14, #high(2b)\n" \
97 " or3 r14, r14, #low(2b)\n" \
98 " jmp r14\n" \
99 ".previous\n" \
100 ".section __ex_table,\"a\"\n" \
101 " .balign 4\n" \
102 " .long 0b,3b\n" \
103 ".previous" \
104 : "=r"(res), "=r"(count), "=&r" (__d0), "=&r" (__d1), \
105 "=&r" (__d2) \
106 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \
107 "4"(dst) \
108 : "r14", "cbit", "memory"); \
109} while (0)
110
111#endif /* CONFIG_ISA_DUAL_ISSUE */
112
113long
114__strncpy_from_user(char *dst, const char *src, long count)
115{
116 long res;
117 __do_strncpy_from_user(dst, src, count, res);
118 return res;
119}
120
121long
122strncpy_from_user(char *dst, const char *src, long count)
123{
124 long res = -EFAULT;
125 if (access_ok(VERIFY_READ, src, 1))
126 __do_strncpy_from_user(dst, src, count, res);
127 return res;
128}
129
130
131/*
132 * Zero Userspace
133 */
134
135#ifdef CONFIG_ISA_DUAL_ISSUE
136
137#define __do_clear_user(addr,size) \
138do { \
139 int __dst, __c; \
140 __asm__ __volatile__( \
141 " beqz %1, 9f\n" \
142 " and3 r14, %0, #3\n" \
143 " bnez r14, 2f\n" \
144 " and3 r14, %1, #3\n" \
145 " bnez r14, 2f\n" \
146 " and3 %1, %1, #3\n" \
147 " beqz %2, 2f\n" \
148 " addi %0, #-4\n" \
149 " .fillinsn\n" \
150 "0: ; word clear \n" \
151 " st %6, @+%0 || addi %2, #-1\n" \
152 " bnez %2, 0b\n" \
153 " beqz %1, 9f\n" \
154 " .fillinsn\n" \
155 "2: ; byte clear \n" \
156 " stb %6, @%0 || addi %1, #-1\n" \
157 " addi %0, #1\n" \
158 " bnez %1, 2b\n" \
159 " .fillinsn\n" \
160 "9:\n" \
161 ".section .fixup,\"ax\"\n" \
162 " .balign 4\n" \
163 "4: slli %2, #2\n" \
164 " seth r14, #high(9b)\n" \
165 " or3 r14, r14, #low(9b)\n" \
166 " jmp r14 || add %1, %2\n" \
167 ".previous\n" \
168 ".section __ex_table,\"a\"\n" \
169 " .balign 4\n" \
170 " .long 0b,4b\n" \
171 " .long 2b,9b\n" \
172 ".previous\n" \
173 : "=&r"(__dst), "=&r"(size), "=&r"(__c) \
174 : "0"(addr), "1"(size), "2"(size / 4), "r"(0) \
175 : "r14", "cbit", "memory"); \
176} while (0)
177
178#else /* not CONFIG_ISA_DUAL_ISSUE */
179
180#define __do_clear_user(addr,size) \
181do { \
182 int __dst, __c; \
183 __asm__ __volatile__( \
184 " beqz %1, 9f\n" \
185 " and3 r14, %0, #3\n" \
186 " bnez r14, 2f\n" \
187 " and3 r14, %1, #3\n" \
188 " bnez r14, 2f\n" \
189 " and3 %1, %1, #3\n" \
190 " beqz %2, 2f\n" \
191 " addi %0, #-4\n" \
192 " .fillinsn\n" \
193 "0: st %6, @+%0 ; word clear \n" \
194 " addi %2, #-1\n" \
195 " bnez %2, 0b\n" \
196 " beqz %1, 9f\n" \
197 " .fillinsn\n" \
198 "2: stb %6, @%0 ; byte clear \n" \
199 " addi %1, #-1\n" \
200 " addi %0, #1\n" \
201 " bnez %1, 2b\n" \
202 " .fillinsn\n" \
203 "9:\n" \
204 ".section .fixup,\"ax\"\n" \
205 " .balign 4\n" \
206 "4: slli %2, #2\n" \
207 " add %1, %2\n" \
208 " seth r14, #high(9b)\n" \
209 " or3 r14, r14, #low(9b)\n" \
210 " jmp r14\n" \
211 ".previous\n" \
212 ".section __ex_table,\"a\"\n" \
213 " .balign 4\n" \
214 " .long 0b,4b\n" \
215 " .long 2b,9b\n" \
216 ".previous\n" \
217 : "=&r"(__dst), "=&r"(size), "=&r"(__c) \
218 : "0"(addr), "1"(size), "2"(size / 4), "r"(0) \
219 : "r14", "cbit", "memory"); \
220} while (0)
221
222#endif /* not CONFIG_ISA_DUAL_ISSUE */
223
224unsigned long
225clear_user(void *to, unsigned long n)
226{
227 if (access_ok(VERIFY_WRITE, to, n))
228 __do_clear_user(to, n);
229 return n;
230}
231
232unsigned long
233__clear_user(void *to, unsigned long n)
234{
235 __do_clear_user(to, n);
236 return n;
237}
238
239/*
240 * Return the size of a string (including the ending 0)
241 *
242 * Return 0 on exception, a value greater than N if too long
243 */
244
245#ifdef CONFIG_ISA_DUAL_ISSUE
246
247long strnlen_user(const char *s, long n)
248{
249 unsigned long mask = -__addr_ok(s);
250 unsigned long res;
251
252 __asm__ __volatile__(
253 " and %0, %5 || mv r1, %1\n"
254 " beqz %0, strnlen_exit\n"
255 " and3 r0, %1, #3\n"
256 " bnez r0, strnlen_byte_loop\n"
257 " cmpui %0, #4\n"
258 " bc strnlen_byte_loop\n"
259 "strnlen_word_loop:\n"
260 "0: ld r0, @%1+\n"
261 " pcmpbz r0\n"
262 " bc strnlen_last_bytes_fixup\n"
263 " addi %0, #-4\n"
264 " beqz %0, strnlen_exit\n"
265 " bgtz %0, strnlen_word_loop\n"
266 "strnlen_last_bytes:\n"
267 " mv %0, %4\n"
268 "strnlen_last_bytes_fixup:\n"
269 " addi %1, #-4\n"
270 "strnlen_byte_loop:\n"
271 "1: ldb r0, @%1 || addi %0, #-1\n"
272 " beqz r0, strnlen_exit\n"
273 " addi %1, #1\n"
274 " bnez %0, strnlen_byte_loop\n"
275 "strnlen_exit:\n"
276 " sub %1, r1\n"
277 " add3 %0, %1, #1\n"
278 " .fillinsn\n"
279 "9:\n"
280 ".section .fixup,\"ax\"\n"
281 " .balign 4\n"
282 "4: addi %1, #-4\n"
283 " .fillinsn\n"
284 "5: seth r1, #high(9b)\n"
285 " or3 r1, r1, #low(9b)\n"
286 " jmp r1 || ldi %0, #0\n"
287 ".previous\n"
288 ".section __ex_table,\"a\"\n"
289 " .balign 4\n"
290 " .long 0b,4b\n"
291 " .long 1b,5b\n"
292 ".previous"
293 : "=&r" (res), "=r" (s)
294 : "0" (n), "1" (s), "r" (n & 3), "r" (mask), "r"(0x01010101)
295 : "r0", "r1", "cbit");
296
297 /* NOTE: strnlen_user() algorism:
298 * {
299 * char *p;
300 * for (p = s; n-- && *p != '\0'; ++p)
301 * ;
302 * return p - s + 1;
303 * }
304 */
305
306 /* NOTE: If a null char. exists, return 0.
307 * if ((x - 0x01010101) & ~x & 0x80808080)\n"
308 * return 0;\n"
309 */
310
311 return res & mask;
312}
313
314#else /* not CONFIG_ISA_DUAL_ISSUE */
315
316long strnlen_user(const char *s, long n)
317{
318 unsigned long mask = -__addr_ok(s);
319 unsigned long res;
320
321 __asm__ __volatile__(
322 " and %0, %5\n"
323 " mv r1, %1\n"
324 " beqz %0, strnlen_exit\n"
325 " and3 r0, %1, #3\n"
326 " bnez r0, strnlen_byte_loop\n"
327 " cmpui %0, #4\n"
328 " bc strnlen_byte_loop\n"
329 " sll3 r3, %6, #7\n"
330 "strnlen_word_loop:\n"
331 "0: ld r0, @%1+\n"
332 " not r2, r0\n"
333 " sub r0, %6\n"
334 " and r2, r3\n"
335 " and r2, r0\n"
336 " bnez r2, strnlen_last_bytes_fixup\n"
337 " addi %0, #-4\n"
338 " beqz %0, strnlen_exit\n"
339 " bgtz %0, strnlen_word_loop\n"
340 "strnlen_last_bytes:\n"
341 " mv %0, %4\n"
342 "strnlen_last_bytes_fixup:\n"
343 " addi %1, #-4\n"
344 "strnlen_byte_loop:\n"
345 "1: ldb r0, @%1\n"
346 " addi %0, #-1\n"
347 " beqz r0, strnlen_exit\n"
348 " addi %1, #1\n"
349 " bnez %0, strnlen_byte_loop\n"
350 "strnlen_exit:\n"
351 " sub %1, r1\n"
352 " add3 %0, %1, #1\n"
353 " .fillinsn\n"
354 "9:\n"
355 ".section .fixup,\"ax\"\n"
356 " .balign 4\n"
357 "4: addi %1, #-4\n"
358 " .fillinsn\n"
359 "5: ldi %0, #0\n"
360 " seth r1, #high(9b)\n"
361 " or3 r1, r1, #low(9b)\n"
362 " jmp r1\n"
363 ".previous\n"
364 ".section __ex_table,\"a\"\n"
365 " .balign 4\n"
366 " .long 0b,4b\n"
367 " .long 1b,5b\n"
368 ".previous"
369 : "=&r" (res), "=r" (s)
370 : "0" (n), "1" (s), "r" (n & 3), "r" (mask), "r"(0x01010101)
371 : "r0", "r1", "r2", "r3", "cbit");
372
373 /* NOTE: strnlen_user() algorism:
374 * {
375 * char *p;
376 * for (p = s; n-- && *p != '\0'; ++p)
377 * ;
378 * return p - s + 1;
379 * }
380 */
381
382 /* NOTE: If a null char. exists, return 0.
383 * if ((x - 0x01010101) & ~x & 0x80808080)\n"
384 * return 0;\n"
385 */
386
387 return res & mask;
388}
389
390#endif /* CONFIG_ISA_DUAL_ISSUE */
391