diff options
Diffstat (limited to 'arch/sh/lib64')
-rw-r--r-- | arch/sh/lib64/.gitignore | 1 | ||||
-rw-r--r-- | arch/sh/lib64/Makefile | 15 | ||||
-rw-r--r-- | arch/sh/lib64/c-checksum.c | 214 | ||||
-rw-r--r-- | arch/sh/lib64/clear_page.S | 54 | ||||
-rw-r--r-- | arch/sh/lib64/copy_page.S | 89 | ||||
-rw-r--r-- | arch/sh/lib64/copy_user_memcpy.S | 217 | ||||
-rw-r--r-- | arch/sh/lib64/dbg.c | 430 | ||||
-rw-r--r-- | arch/sh/lib64/memcpy.c | 81 | ||||
-rw-r--r-- | arch/sh/lib64/panic.c | 58 | ||||
-rw-r--r-- | arch/sh/lib64/udelay.c | 56 |
10 files changed, 1215 insertions, 0 deletions
diff --git a/arch/sh/lib64/.gitignore b/arch/sh/lib64/.gitignore new file mode 100644 index 000000000000..3508c2cb23c4 --- /dev/null +++ b/arch/sh/lib64/.gitignore | |||
@@ -0,0 +1 @@ | |||
syscalltab.h | |||
diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile new file mode 100644 index 000000000000..9950966923a0 --- /dev/null +++ b/arch/sh/lib64/Makefile | |||
@@ -0,0 +1,15 @@ | |||
1 | # | ||
2 | # Makefile for the SH-5 specific library files.. | ||
3 | # | ||
4 | # Copyright (C) 2000, 2001 Paolo Alberelli | ||
5 | # Copyright (C) 2003 Paul Mundt | ||
6 | # | ||
7 | # This file is subject to the terms and conditions of the GNU General Public | ||
8 | # License. See the file "COPYING" in the main directory of this archive | ||
9 | # for more details. | ||
10 | # | ||
11 | |||
12 | # Panic should really be compiled as PIC | ||
13 | lib-y := udelay.o c-checksum.o dbg.o panic.o memcpy.o copy_user_memcpy.o \ | ||
14 | copy_page.o clear_page.o | ||
15 | |||
diff --git a/arch/sh/lib64/c-checksum.c b/arch/sh/lib64/c-checksum.c new file mode 100644 index 000000000000..5dfbd8b5e558 --- /dev/null +++ b/arch/sh/lib64/c-checksum.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * arch/sh/lib64/c-checksum.c | ||
3 | * | ||
4 | * This file contains network checksum routines that are better done | ||
5 | * in an architecture-specific manner due to speed.. | ||
6 | */ | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <asm/byteorder.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | static inline unsigned short from64to16(unsigned long long x) | ||
15 | { | ||
16 | /* add up 32-bit words for 33 bits */ | ||
17 | x = (x & 0xffffffff) + (x >> 32); | ||
18 | /* add up 16-bit and 17-bit words for 17+c bits */ | ||
19 | x = (x & 0xffff) + (x >> 16); | ||
20 | /* add up 16-bit and 2-bit for 16+c bit */ | ||
21 | x = (x & 0xffff) + (x >> 16); | ||
22 | /* add up carry.. */ | ||
23 | x = (x & 0xffff) + (x >> 16); | ||
24 | return x; | ||
25 | } | ||
26 | |||
27 | static inline unsigned short foldto16(unsigned long x) | ||
28 | { | ||
29 | /* add up 16-bit for 17 bits */ | ||
30 | x = (x & 0xffff) + (x >> 16); | ||
31 | /* add up carry.. */ | ||
32 | x = (x & 0xffff) + (x >> 16); | ||
33 | return x; | ||
34 | } | ||
35 | |||
36 | static inline unsigned short myfoldto16(unsigned long long x) | ||
37 | { | ||
38 | /* Fold down to 32-bits so we don't loose in the typedef-less | ||
39 | network stack. */ | ||
40 | /* 64 to 33 */ | ||
41 | x = (x & 0xffffffff) + (x >> 32); | ||
42 | /* 33 to 32 */ | ||
43 | x = (x & 0xffffffff) + (x >> 32); | ||
44 | |||
45 | /* add up 16-bit for 17 bits */ | ||
46 | x = (x & 0xffff) + (x >> 16); | ||
47 | /* add up carry.. */ | ||
48 | x = (x & 0xffff) + (x >> 16); | ||
49 | return x; | ||
50 | } | ||
51 | |||
52 | #define odd(x) ((x)&1) | ||
53 | #define U16(x) ntohs(x) | ||
54 | |||
55 | static unsigned long do_csum(const unsigned char *buff, int len) | ||
56 | { | ||
57 | int odd, count; | ||
58 | unsigned long result = 0; | ||
59 | |||
60 | pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len); | ||
61 | #ifdef DEBUG | ||
62 | for (i = 0; i < len; i++) { | ||
63 | if ((i % 26) == 0) | ||
64 | printk("\n"); | ||
65 | printk("%02X ", buff[i]); | ||
66 | } | ||
67 | #endif | ||
68 | |||
69 | if (len <= 0) | ||
70 | goto out; | ||
71 | |||
72 | odd = 1 & (unsigned long) buff; | ||
73 | if (odd) { | ||
74 | result = *buff << 8; | ||
75 | len--; | ||
76 | buff++; | ||
77 | } | ||
78 | count = len >> 1; /* nr of 16-bit words.. */ | ||
79 | if (count) { | ||
80 | if (2 & (unsigned long) buff) { | ||
81 | result += *(unsigned short *) buff; | ||
82 | count--; | ||
83 | len -= 2; | ||
84 | buff += 2; | ||
85 | } | ||
86 | count >>= 1; /* nr of 32-bit words.. */ | ||
87 | if (count) { | ||
88 | unsigned long carry = 0; | ||
89 | do { | ||
90 | unsigned long w = *(unsigned long *) buff; | ||
91 | buff += 4; | ||
92 | count--; | ||
93 | result += carry; | ||
94 | result += w; | ||
95 | carry = (w > result); | ||
96 | } while (count); | ||
97 | result += carry; | ||
98 | result = (result & 0xffff) + (result >> 16); | ||
99 | } | ||
100 | if (len & 2) { | ||
101 | result += *(unsigned short *) buff; | ||
102 | buff += 2; | ||
103 | } | ||
104 | } | ||
105 | if (len & 1) | ||
106 | result += *buff; | ||
107 | result = foldto16(result); | ||
108 | if (odd) | ||
109 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); | ||
110 | |||
111 | pr_debug("\nCHECKSUM is 0x%lx\n", result); | ||
112 | |||
113 | out: | ||
114 | return result; | ||
115 | } | ||
116 | |||
117 | /* computes the checksum of a memory block at buff, length len, | ||
118 | and adds in "sum" (32-bit) */ | ||
119 | __wsum csum_partial(const void *buff, int len, __wsum sum) | ||
120 | { | ||
121 | unsigned long long result = do_csum(buff, len); | ||
122 | |||
123 | /* add in old sum, and carry.. */ | ||
124 | result += (__force u32)sum; | ||
125 | /* 32+c bits -> 32 bits */ | ||
126 | result = (result & 0xffffffff) + (result >> 32); | ||
127 | |||
128 | pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n", | ||
129 | buff, len, sum, result); | ||
130 | |||
131 | return (__force __wsum)result; | ||
132 | } | ||
133 | |||
134 | /* Copy while checksumming, otherwise like csum_partial. */ | ||
135 | __wsum | ||
136 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) | ||
137 | { | ||
138 | sum = csum_partial(src, len, sum); | ||
139 | memcpy(dst, src, len); | ||
140 | |||
141 | return sum; | ||
142 | } | ||
143 | |||
144 | /* Copy from userspace and compute checksum. If we catch an exception | ||
145 | then zero the rest of the buffer. */ | ||
146 | __wsum | ||
147 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, | ||
148 | __wsum sum, int *err_ptr) | ||
149 | { | ||
150 | int missing; | ||
151 | |||
152 | pr_debug | ||
153 | ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n", | ||
154 | src, dst, len, sum, err_ptr); | ||
155 | missing = copy_from_user(dst, src, len); | ||
156 | pr_debug(" access_ok %d\n", __access_ok((unsigned long) src, len)); | ||
157 | pr_debug(" missing %d\n", missing); | ||
158 | if (missing) { | ||
159 | memset(dst + len - missing, 0, missing); | ||
160 | *err_ptr = -EFAULT; | ||
161 | } | ||
162 | |||
163 | return csum_partial(dst, len, sum); | ||
164 | } | ||
165 | |||
166 | /* Copy to userspace and compute checksum. */ | ||
167 | __wsum | ||
168 | csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len, | ||
169 | __wsum sum, int *err_ptr) | ||
170 | { | ||
171 | sum = csum_partial(src, len, sum); | ||
172 | |||
173 | if (copy_to_user(dst, src, len)) | ||
174 | *err_ptr = -EFAULT; | ||
175 | |||
176 | return sum; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
181 | * which always checksum on 4 octet boundaries. | ||
182 | */ | ||
183 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
184 | { | ||
185 | pr_debug("ip_fast_csum %p,%d\n", iph, ihl); | ||
186 | |||
187 | return (__force __sum16)~do_csum(iph, ihl * 4); | ||
188 | } | ||
189 | |||
190 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
191 | unsigned short len, | ||
192 | unsigned short proto, __wsum sum) | ||
193 | { | ||
194 | unsigned long long result; | ||
195 | |||
196 | pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead)); | ||
197 | pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead)); | ||
198 | |||
199 | result = (__force u64) saddr + (__force u64) daddr + | ||
200 | (__force u64) sum + ((len + proto) << 8); | ||
201 | |||
202 | /* Fold down to 32-bits so we don't loose in the typedef-less | ||
203 | network stack. */ | ||
204 | /* 64 to 33 */ | ||
205 | result = (result & 0xffffffff) + (result >> 32); | ||
206 | /* 33 to 32 */ | ||
207 | result = (result & 0xffffffff) + (result >> 32); | ||
208 | |||
209 | pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n", | ||
210 | __FUNCTION__, saddr, daddr, len, proto, sum, result); | ||
211 | |||
212 | return (__wsum)result; | ||
213 | } | ||
214 | EXPORT_SYMBOL(csum_tcpudp_nofold); | ||
diff --git a/arch/sh/lib64/clear_page.S b/arch/sh/lib64/clear_page.S new file mode 100644 index 000000000000..007ab48ecc1c --- /dev/null +++ b/arch/sh/lib64/clear_page.S | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | Copyright 2003 Richard Curnow, SuperH (UK) Ltd. | ||
3 | |||
4 | This file is subject to the terms and conditions of the GNU General Public | ||
5 | License. See the file "COPYING" in the main directory of this archive | ||
6 | for more details. | ||
7 | |||
8 | Tight version of memset for the case of just clearing a page. It turns out | ||
9 | that having the alloco's spaced out slightly due to the increment/branch | ||
10 | pair causes them to contend less for access to the cache. Similarly, | ||
11 | keeping the stores apart from the allocos causes less contention. => Do two | ||
12 | separate loops. Do multiple stores per loop to amortise the | ||
13 | increment/branch cost a little. | ||
14 | |||
15 | Parameters: | ||
16 | r2 : source effective address (start of page) | ||
17 | |||
18 | Always clears 4096 bytes. | ||
19 | |||
20 | Note : alloco guarded by synco to avoid TAKum03020 erratum | ||
21 | |||
22 | */ | ||
23 | |||
24 | .section .text..SHmedia32,"ax" | ||
25 | .little | ||
26 | |||
27 | .balign 8 | ||
28 | .global clear_page | ||
29 | clear_page: | ||
30 | pta/l 1f, tr1 | ||
31 | pta/l 2f, tr2 | ||
32 | ptabs/l r18, tr0 | ||
33 | |||
34 | movi 4096, r7 | ||
35 | add r2, r7, r7 | ||
36 | add r2, r63, r6 | ||
37 | 1: | ||
38 | alloco r6, 0 | ||
39 | synco ! TAKum03020 | ||
40 | addi r6, 32, r6 | ||
41 | bgt/l r7, r6, tr1 | ||
42 | |||
43 | add r2, r63, r6 | ||
44 | 2: | ||
45 | st.q r6, 0, r63 | ||
46 | st.q r6, 8, r63 | ||
47 | st.q r6, 16, r63 | ||
48 | st.q r6, 24, r63 | ||
49 | addi r6, 32, r6 | ||
50 | bgt/l r7, r6, tr2 | ||
51 | |||
52 | blink tr0, r63 | ||
53 | |||
54 | |||
diff --git a/arch/sh/lib64/copy_page.S b/arch/sh/lib64/copy_page.S new file mode 100644 index 000000000000..0ec6fca63b56 --- /dev/null +++ b/arch/sh/lib64/copy_page.S | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | Copyright 2003 Richard Curnow, SuperH (UK) Ltd. | ||
3 | |||
4 | This file is subject to the terms and conditions of the GNU General Public | ||
5 | License. See the file "COPYING" in the main directory of this archive | ||
6 | for more details. | ||
7 | |||
8 | Tight version of mempy for the case of just copying a page. | ||
9 | Prefetch strategy empirically optimised against RTL simulations | ||
10 | of SH5-101 cut2 eval chip with Cayman board DDR memory. | ||
11 | |||
12 | Parameters: | ||
13 | r2 : destination effective address (start of page) | ||
14 | r3 : source effective address (start of page) | ||
15 | |||
16 | Always copies 4096 bytes. | ||
17 | |||
18 | Points to review. | ||
19 | * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead. | ||
20 | It seems like the prefetch needs to be at at least 4 lines ahead to get | ||
21 | the data into the cache in time, and the allocos contend with outstanding | ||
22 | prefetches for the same cache set, so it's better to have the numbers | ||
23 | different. | ||
24 | */ | ||
25 | |||
26 | .section .text..SHmedia32,"ax" | ||
27 | .little | ||
28 | |||
29 | .balign 8 | ||
30 | .global copy_page | ||
31 | copy_page: | ||
32 | |||
33 | /* Copy 4096 bytes worth of data from r3 to r2. | ||
34 | Do prefetches 4 lines ahead. | ||
35 | Do alloco 2 lines ahead */ | ||
36 | |||
37 | pta 1f, tr1 | ||
38 | pta 2f, tr2 | ||
39 | pta 3f, tr3 | ||
40 | ptabs r18, tr0 | ||
41 | |||
42 | #if 0 | ||
43 | /* TAKum03020 */ | ||
44 | ld.q r3, 0x00, r63 | ||
45 | ld.q r3, 0x20, r63 | ||
46 | ld.q r3, 0x40, r63 | ||
47 | ld.q r3, 0x60, r63 | ||
48 | #endif | ||
49 | alloco r2, 0x00 | ||
50 | synco ! TAKum03020 | ||
51 | alloco r2, 0x20 | ||
52 | synco ! TAKum03020 | ||
53 | |||
54 | movi 3968, r6 | ||
55 | add r2, r6, r6 | ||
56 | addi r6, 64, r7 | ||
57 | addi r7, 64, r8 | ||
58 | sub r3, r2, r60 | ||
59 | addi r60, 8, r61 | ||
60 | addi r61, 8, r62 | ||
61 | addi r62, 8, r23 | ||
62 | addi r60, 0x80, r22 | ||
63 | |||
64 | /* Minimal code size. The extra branches inside the loop don't cost much | ||
65 | because they overlap with the time spent waiting for prefetches to | ||
66 | complete. */ | ||
67 | 1: | ||
68 | #if 0 | ||
69 | /* TAKum03020 */ | ||
70 | bge/u r2, r6, tr2 ! skip prefetch for last 4 lines | ||
71 | ldx.q r2, r22, r63 ! prefetch 4 lines hence | ||
72 | #endif | ||
73 | 2: | ||
74 | bge/u r2, r7, tr3 ! skip alloco for last 2 lines | ||
75 | alloco r2, 0x40 ! alloc destination line 2 lines ahead | ||
76 | synco ! TAKum03020 | ||
77 | 3: | ||
78 | ldx.q r2, r60, r36 | ||
79 | ldx.q r2, r61, r37 | ||
80 | ldx.q r2, r62, r38 | ||
81 | ldx.q r2, r23, r39 | ||
82 | st.q r2, 0, r36 | ||
83 | st.q r2, 8, r37 | ||
84 | st.q r2, 16, r38 | ||
85 | st.q r2, 24, r39 | ||
86 | addi r2, 32, r2 | ||
87 | bgt/l r8, r2, tr1 | ||
88 | |||
89 | blink tr0, r63 ! return | ||
diff --git a/arch/sh/lib64/copy_user_memcpy.S b/arch/sh/lib64/copy_user_memcpy.S new file mode 100644 index 000000000000..2a62816d2ddd --- /dev/null +++ b/arch/sh/lib64/copy_user_memcpy.S | |||
@@ -0,0 +1,217 @@ | |||
1 | ! | ||
2 | ! Fast SH memcpy | ||
3 | ! | ||
4 | ! by Toshiyasu Morita (tm@netcom.com) | ||
5 | ! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut) | ||
6 | ! SH5 code Copyright 2002 SuperH Ltd. | ||
7 | ! | ||
8 | ! Entry: ARG0: destination pointer | ||
9 | ! ARG1: source pointer | ||
10 | ! ARG2: byte count | ||
11 | ! | ||
12 | ! Exit: RESULT: destination pointer | ||
13 | ! any other registers in the range r0-r7: trashed | ||
14 | ! | ||
15 | ! Notes: Usually one wants to do small reads and write a longword, but | ||
16 | ! unfortunately it is difficult in some cases to concatanate bytes | ||
17 | ! into a longword on the SH, so this does a longword read and small | ||
18 | ! writes. | ||
19 | ! | ||
20 | ! This implementation makes two assumptions about how it is called: | ||
21 | ! | ||
22 | ! 1.: If the byte count is nonzero, the address of the last byte to be | ||
23 | ! copied is unsigned greater than the address of the first byte to | ||
24 | ! be copied. This could be easily swapped for a signed comparison, | ||
25 | ! but the algorithm used needs some comparison. | ||
26 | ! | ||
27 | ! 2.: When there are two or three bytes in the last word of an 11-or-more | ||
28 | ! bytes memory chunk to b copied, the rest of the word can be read | ||
29 | ! without side effects. | ||
30 | ! This could be easily changed by increasing the minumum size of | ||
31 | ! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2, | ||
32 | ! however, this would cost a few extra cyles on average. | ||
33 | ! For SHmedia, the assumption is that any quadword can be read in its | ||
34 | ! enirety if at least one byte is included in the copy. | ||
35 | |||
36 | /* Imported into Linux kernel by Richard Curnow. This is used to implement the | ||
37 | __copy_user function in the general case, so it has to be a distinct | ||
38 | function from intra-kernel memcpy to allow for exception fix-ups in the | ||
39 | event that the user pointer is bad somewhere in the copy (e.g. due to | ||
40 | running off the end of the vma). | ||
41 | |||
42 | Note, this algorithm will be slightly wasteful in the case where the source | ||
43 | and destination pointers are equally aligned, because the stlo/sthi pairs | ||
44 | could then be merged back into single stores. If there are a lot of cache | ||
45 | misses, this is probably offset by the stall lengths on the preloads. | ||
46 | |||
47 | */ | ||
48 | |||
49 | /* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020 | ||
50 | * erratum. The first two prefetches are nop-ed out to avoid upsetting the | ||
51 | * instruction counts used in the jump address calculation. | ||
52 | * */ | ||
53 | |||
54 | .section .text..SHmedia32,"ax" | ||
55 | .little | ||
56 | .balign 32 | ||
57 | .global copy_user_memcpy | ||
58 | .global copy_user_memcpy_end | ||
59 | copy_user_memcpy: | ||
60 | |||
61 | #define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1 | ||
62 | #define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1 | ||
63 | #define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1 | ||
64 | #define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1 | ||
65 | |||
66 | nop ! ld.b r3,0,r63 ! TAKum03020 | ||
67 | pta/l Large,tr0 | ||
68 | movi 25,r0 | ||
69 | bgeu/u r4,r0,tr0 | ||
70 | nsb r4,r0 | ||
71 | shlli r0,5,r0 | ||
72 | movi (L1-L0+63*32 + 1) & 0xffff,r1 | ||
73 | sub r1, r0, r0 | ||
74 | L0: ptrel r0,tr0 | ||
75 | add r2,r4,r5 | ||
76 | ptabs r18,tr1 | ||
77 | add r3,r4,r6 | ||
78 | blink tr0,r63 | ||
79 | |||
80 | /* Rearranged to make cut2 safe */ | ||
81 | .balign 8 | ||
82 | L4_7: /* 4..7 byte memcpy cntd. */ | ||
83 | stlo.l r2, 0, r0 | ||
84 | or r6, r7, r6 | ||
85 | sthi.l r5, -1, r6 | ||
86 | stlo.l r5, -4, r6 | ||
87 | blink tr1,r63 | ||
88 | |||
89 | .balign 8 | ||
90 | L1: /* 0 byte memcpy */ | ||
91 | nop | ||
92 | blink tr1,r63 | ||
93 | nop | ||
94 | nop | ||
95 | nop | ||
96 | nop | ||
97 | |||
98 | L2_3: /* 2 or 3 byte memcpy cntd. */ | ||
99 | st.b r5,-1,r6 | ||
100 | blink tr1,r63 | ||
101 | |||
102 | /* 1 byte memcpy */ | ||
103 | ld.b r3,0,r0 | ||
104 | st.b r2,0,r0 | ||
105 | blink tr1,r63 | ||
106 | |||
107 | L8_15: /* 8..15 byte memcpy cntd. */ | ||
108 | stlo.q r2, 0, r0 | ||
109 | or r6, r7, r6 | ||
110 | sthi.q r5, -1, r6 | ||
111 | stlo.q r5, -8, r6 | ||
112 | blink tr1,r63 | ||
113 | |||
114 | /* 2 or 3 byte memcpy */ | ||
115 | ld.b r3,0,r0 | ||
116 | nop ! ld.b r2,0,r63 ! TAKum03020 | ||
117 | ld.b r3,1,r1 | ||
118 | st.b r2,0,r0 | ||
119 | pta/l L2_3,tr0 | ||
120 | ld.b r6,-1,r6 | ||
121 | st.b r2,1,r1 | ||
122 | blink tr0, r63 | ||
123 | |||
124 | /* 4 .. 7 byte memcpy */ | ||
125 | LDUAL (r3, 0, r0, r1) | ||
126 | pta L4_7, tr0 | ||
127 | ldlo.l r6, -4, r7 | ||
128 | or r0, r1, r0 | ||
129 | sthi.l r2, 3, r0 | ||
130 | ldhi.l r6, -1, r6 | ||
131 | blink tr0, r63 | ||
132 | |||
133 | /* 8 .. 15 byte memcpy */ | ||
134 | LDUAQ (r3, 0, r0, r1) | ||
135 | pta L8_15, tr0 | ||
136 | ldlo.q r6, -8, r7 | ||
137 | or r0, r1, r0 | ||
138 | sthi.q r2, 7, r0 | ||
139 | ldhi.q r6, -1, r6 | ||
140 | blink tr0, r63 | ||
141 | |||
142 | /* 16 .. 24 byte memcpy */ | ||
143 | LDUAQ (r3, 0, r0, r1) | ||
144 | LDUAQ (r3, 8, r8, r9) | ||
145 | or r0, r1, r0 | ||
146 | sthi.q r2, 7, r0 | ||
147 | or r8, r9, r8 | ||
148 | sthi.q r2, 15, r8 | ||
149 | ldlo.q r6, -8, r7 | ||
150 | ldhi.q r6, -1, r6 | ||
151 | stlo.q r2, 8, r8 | ||
152 | stlo.q r2, 0, r0 | ||
153 | or r6, r7, r6 | ||
154 | sthi.q r5, -1, r6 | ||
155 | stlo.q r5, -8, r6 | ||
156 | blink tr1,r63 | ||
157 | |||
158 | Large: | ||
159 | ! ld.b r2, 0, r63 ! TAKum03020 | ||
160 | pta/l Loop_ua, tr1 | ||
161 | ori r3, -8, r7 | ||
162 | sub r2, r7, r22 | ||
163 | sub r3, r2, r6 | ||
164 | add r2, r4, r5 | ||
165 | ldlo.q r3, 0, r0 | ||
166 | addi r5, -16, r5 | ||
167 | movi 64+8, r27 ! could subtract r7 from that. | ||
168 | stlo.q r2, 0, r0 | ||
169 | sthi.q r2, 7, r0 | ||
170 | ldx.q r22, r6, r0 | ||
171 | bgtu/l r27, r4, tr1 | ||
172 | |||
173 | addi r5, -48, r27 | ||
174 | pta/l Loop_line, tr0 | ||
175 | addi r6, 64, r36 | ||
176 | addi r6, -24, r19 | ||
177 | addi r6, -16, r20 | ||
178 | addi r6, -8, r21 | ||
179 | |||
180 | Loop_line: | ||
181 | ! ldx.q r22, r36, r63 ! TAKum03020 | ||
182 | alloco r22, 32 | ||
183 | synco | ||
184 | addi r22, 32, r22 | ||
185 | ldx.q r22, r19, r23 | ||
186 | sthi.q r22, -25, r0 | ||
187 | ldx.q r22, r20, r24 | ||
188 | ldx.q r22, r21, r25 | ||
189 | stlo.q r22, -32, r0 | ||
190 | ldx.q r22, r6, r0 | ||
191 | sthi.q r22, -17, r23 | ||
192 | sthi.q r22, -9, r24 | ||
193 | sthi.q r22, -1, r25 | ||
194 | stlo.q r22, -24, r23 | ||
195 | stlo.q r22, -16, r24 | ||
196 | stlo.q r22, -8, r25 | ||
197 | bgeu r27, r22, tr0 | ||
198 | |||
199 | Loop_ua: | ||
200 | addi r22, 8, r22 | ||
201 | sthi.q r22, -1, r0 | ||
202 | stlo.q r22, -8, r0 | ||
203 | ldx.q r22, r6, r0 | ||
204 | bgtu/l r5, r22, tr1 | ||
205 | |||
206 | add r3, r4, r7 | ||
207 | ldlo.q r7, -8, r1 | ||
208 | sthi.q r22, 7, r0 | ||
209 | ldhi.q r7, -1, r7 | ||
210 | ptabs r18,tr1 | ||
211 | stlo.q r22, 0, r0 | ||
212 | or r1, r7, r1 | ||
213 | sthi.q r5, 15, r1 | ||
214 | stlo.q r5, 8, r1 | ||
215 | blink tr1, r63 | ||
216 | copy_user_memcpy_end: | ||
217 | nop | ||
diff --git a/arch/sh/lib64/dbg.c b/arch/sh/lib64/dbg.c new file mode 100644 index 000000000000..75825ef6e084 --- /dev/null +++ b/arch/sh/lib64/dbg.c | |||
@@ -0,0 +1,430 @@ | |||
1 | /*-------------------------------------------------------------------------- | ||
2 | -- | ||
3 | -- Identity : Linux50 Debug Funcions | ||
4 | -- | ||
5 | -- File : arch/sh/lib64/dbg.c | ||
6 | -- | ||
7 | -- Copyright 2000, 2001 STMicroelectronics Limited. | ||
8 | -- Copyright 2004 Richard Curnow (evt_debug etc) | ||
9 | -- | ||
10 | --------------------------------------------------------------------------*/ | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | |||
18 | typedef u64 regType_t; | ||
19 | |||
20 | static regType_t getConfigReg(u64 id) | ||
21 | { | ||
22 | register u64 reg __asm__("r2"); | ||
23 | asm volatile ("getcfg %1, 0, %0":"=r" (reg):"r"(id)); | ||
24 | return (reg); | ||
25 | } | ||
26 | |||
27 | /* ======================================================================= */ | ||
28 | |||
29 | static char *szTab[] = { "4k", "64k", "1M", "512M" }; | ||
30 | static char *protTab[] = { "----", | ||
31 | "---R", | ||
32 | "--X-", | ||
33 | "--XR", | ||
34 | "-W--", | ||
35 | "-W-R", | ||
36 | "-WX-", | ||
37 | "-WXR", | ||
38 | "U---", | ||
39 | "U--R", | ||
40 | "U-X-", | ||
41 | "U-XR", | ||
42 | "UW--", | ||
43 | "UW-R", | ||
44 | "UWX-", | ||
45 | "UWXR" | ||
46 | }; | ||
47 | #define ITLB_BASE 0x00000000 | ||
48 | #define DTLB_BASE 0x00800000 | ||
49 | #define MAX_TLBs 64 | ||
50 | /* PTE High */ | ||
51 | #define GET_VALID(pte) ((pte) & 0x1) | ||
52 | #define GET_SHARED(pte) ((pte) & 0x2) | ||
53 | #define GET_ASID(pte) ((pte >> 2) & 0x0ff) | ||
54 | #define GET_EPN(pte) ((pte) & 0xfffff000) | ||
55 | |||
56 | /* PTE Low */ | ||
57 | #define GET_CBEHAVIOR(pte) ((pte) & 0x3) | ||
58 | #define GET_PAGE_SIZE(pte) szTab[((pte >> 3) & 0x3)] | ||
59 | #define GET_PROTECTION(pte) protTab[((pte >> 6) & 0xf)] | ||
60 | #define GET_PPN(pte) ((pte) & 0xfffff000) | ||
61 | |||
62 | #define PAGE_1K_MASK 0x00000000 | ||
63 | #define PAGE_4K_MASK 0x00000010 | ||
64 | #define PAGE_64K_MASK 0x00000080 | ||
65 | #define MMU_PAGESIZE_MASK (PAGE_64K_MASK | PAGE_4K_MASK) | ||
66 | #define PAGE_1MB_MASK MMU_PAGESIZE_MASK | ||
67 | #define PAGE_1K (1024) | ||
68 | #define PAGE_4K (1024 * 4) | ||
69 | #define PAGE_64K (1024 * 64) | ||
70 | #define PAGE_1MB (1024 * 1024) | ||
71 | |||
72 | #define HOW_TO_READ_TLB_CONTENT \ | ||
73 | "[ ID] PPN EPN ASID Share CB P.Size PROT.\n" | ||
74 | |||
75 | void print_single_tlb(unsigned long tlb, int single_print) | ||
76 | { | ||
77 | regType_t pteH; | ||
78 | regType_t pteL; | ||
79 | unsigned int valid, shared, asid, epn, cb, ppn; | ||
80 | char *pSize; | ||
81 | char *pProt; | ||
82 | |||
83 | /* | ||
84 | ** in case of single print <single_print> is true, this implies: | ||
85 | ** 1) print the TLB in any case also if NOT VALID | ||
86 | ** 2) print out the header | ||
87 | */ | ||
88 | |||
89 | pteH = getConfigReg(tlb); | ||
90 | valid = GET_VALID(pteH); | ||
91 | if (single_print) | ||
92 | printk(HOW_TO_READ_TLB_CONTENT); | ||
93 | else if (!valid) | ||
94 | return; | ||
95 | |||
96 | pteL = getConfigReg(tlb + 1); | ||
97 | |||
98 | shared = GET_SHARED(pteH); | ||
99 | asid = GET_ASID(pteH); | ||
100 | epn = GET_EPN(pteH); | ||
101 | cb = GET_CBEHAVIOR(pteL); | ||
102 | pSize = GET_PAGE_SIZE(pteL); | ||
103 | pProt = GET_PROTECTION(pteL); | ||
104 | ppn = GET_PPN(pteL); | ||
105 | printk("[%c%2ld] 0x%08x 0x%08x %03d %02x %02x %4s %s\n", | ||
106 | ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP), | ||
107 | ppn, epn, asid, shared, cb, pSize, pProt); | ||
108 | } | ||
109 | |||
110 | void print_dtlb(void) | ||
111 | { | ||
112 | int count; | ||
113 | unsigned long tlb; | ||
114 | |||
115 | printk(" ================= SH-5 D-TLBs Status ===================\n"); | ||
116 | printk(HOW_TO_READ_TLB_CONTENT); | ||
117 | tlb = DTLB_BASE; | ||
118 | for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) | ||
119 | print_single_tlb(tlb, 0); | ||
120 | printk | ||
121 | (" =============================================================\n"); | ||
122 | } | ||
123 | |||
124 | void print_itlb(void) | ||
125 | { | ||
126 | int count; | ||
127 | unsigned long tlb; | ||
128 | |||
129 | printk(" ================= SH-5 I-TLBs Status ===================\n"); | ||
130 | printk(HOW_TO_READ_TLB_CONTENT); | ||
131 | tlb = ITLB_BASE; | ||
132 | for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) | ||
133 | print_single_tlb(tlb, 0); | ||
134 | printk | ||
135 | (" =============================================================\n"); | ||
136 | } | ||
137 | |||
138 | /* ======================================================================= */ | ||
139 | |||
140 | #ifdef CONFIG_POOR_MANS_STRACE | ||
141 | |||
142 | #include "syscalltab.h" | ||
143 | |||
144 | struct ring_node { | ||
145 | int evt; | ||
146 | int ret_addr; | ||
147 | int event; | ||
148 | int tra; | ||
149 | int pid; | ||
150 | unsigned long sp; | ||
151 | unsigned long pc; | ||
152 | }; | ||
153 | |||
154 | static struct ring_node event_ring[16]; | ||
155 | static int event_ptr = 0; | ||
156 | |||
157 | struct stored_syscall_data { | ||
158 | int pid; | ||
159 | int syscall_number; | ||
160 | }; | ||
161 | |||
162 | #define N_STORED_SYSCALLS 16 | ||
163 | |||
164 | static struct stored_syscall_data stored_syscalls[N_STORED_SYSCALLS]; | ||
165 | static int syscall_next=0; | ||
166 | static int syscall_next_print=0; | ||
167 | |||
168 | void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs) | ||
169 | { | ||
170 | int syscallno = tra & 0xff; | ||
171 | unsigned long sp; | ||
172 | unsigned long stack_bottom; | ||
173 | int pid; | ||
174 | struct ring_node *rr; | ||
175 | |||
176 | pid = current->pid; | ||
177 | stack_bottom = (unsigned long) task_stack_page(current); | ||
178 | asm volatile("ori r15, 0, %0" : "=r" (sp)); | ||
179 | rr = event_ring + event_ptr; | ||
180 | rr->evt = evt; | ||
181 | rr->ret_addr = ret_addr; | ||
182 | rr->event = event; | ||
183 | rr->tra = tra; | ||
184 | rr->pid = pid; | ||
185 | rr->sp = sp; | ||
186 | rr->pc = regs->pc; | ||
187 | |||
188 | if (sp < stack_bottom + 3092) { | ||
189 | printk("evt_debug : stack underflow report\n"); | ||
190 | int i, j; | ||
191 | for (j=0, i = event_ptr; j<16; j++) { | ||
192 | rr = event_ring + i; | ||
193 | printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n", | ||
194 | rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc); | ||
195 | i--; | ||
196 | i &= 15; | ||
197 | } | ||
198 | panic("STACK UNDERFLOW\n"); | ||
199 | } | ||
200 | |||
201 | event_ptr = (event_ptr + 1) & 15; | ||
202 | |||
203 | if ((event == 2) && (evt == 0x160)) { | ||
204 | if (syscallno < NUM_SYSCALL_INFO_ENTRIES) { | ||
205 | /* Store the syscall information to print later. We | ||
206 | * can't print this now - currently we're running with | ||
207 | * SR.BL=1, so we can't take a tlbmiss (which could occur | ||
208 | * in the console drivers under printk). | ||
209 | * | ||
210 | * Just overwrite old entries on ring overflow - this | ||
211 | * is only for last-hope debugging. */ | ||
212 | stored_syscalls[syscall_next].pid = current->pid; | ||
213 | stored_syscalls[syscall_next].syscall_number = syscallno; | ||
214 | syscall_next++; | ||
215 | syscall_next &= (N_STORED_SYSCALLS - 1); | ||
216 | } | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void drain_syscalls(void) { | ||
221 | while (syscall_next_print != syscall_next) { | ||
222 | printk("Task %d: %s()\n", | ||
223 | stored_syscalls[syscall_next_print].pid, | ||
224 | syscall_info_table[stored_syscalls[syscall_next_print].syscall_number].name); | ||
225 | syscall_next_print++; | ||
226 | syscall_next_print &= (N_STORED_SYSCALLS - 1); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | void evt_debug2(unsigned int ret) | ||
231 | { | ||
232 | drain_syscalls(); | ||
233 | printk("Task %d: syscall returns %08x\n", current->pid, ret); | ||
234 | } | ||
235 | |||
236 | void evt_debug_ret_from_irq(struct pt_regs *regs) | ||
237 | { | ||
238 | int pid; | ||
239 | struct ring_node *rr; | ||
240 | |||
241 | pid = current->pid; | ||
242 | rr = event_ring + event_ptr; | ||
243 | rr->evt = 0xffff; | ||
244 | rr->ret_addr = 0; | ||
245 | rr->event = 0; | ||
246 | rr->tra = 0; | ||
247 | rr->pid = pid; | ||
248 | rr->pc = regs->pc; | ||
249 | event_ptr = (event_ptr + 1) & 15; | ||
250 | } | ||
251 | |||
252 | void evt_debug_ret_from_exc(struct pt_regs *regs) | ||
253 | { | ||
254 | int pid; | ||
255 | struct ring_node *rr; | ||
256 | |||
257 | pid = current->pid; | ||
258 | rr = event_ring + event_ptr; | ||
259 | rr->evt = 0xfffe; | ||
260 | rr->ret_addr = 0; | ||
261 | rr->event = 0; | ||
262 | rr->tra = 0; | ||
263 | rr->pid = pid; | ||
264 | rr->pc = regs->pc; | ||
265 | event_ptr = (event_ptr + 1) & 15; | ||
266 | } | ||
267 | |||
268 | #endif /* CONFIG_POOR_MANS_STRACE */ | ||
269 | |||
270 | /* ======================================================================= */ | ||
271 | |||
272 | void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs) | ||
273 | { | ||
274 | |||
275 | unsigned long long ah, al, bh, bl, ch, cl; | ||
276 | |||
277 | printk("\n"); | ||
278 | printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n", | ||
279 | ((from) ? from : "???"), current->pid, trapnr, signr); | ||
280 | |||
281 | asm volatile ("getcon " __EXPEVT ", %0":"=r"(ah)); | ||
282 | asm volatile ("getcon " __EXPEVT ", %0":"=r"(al)); | ||
283 | ah = (ah) >> 32; | ||
284 | al = (al) & 0xffffffff; | ||
285 | asm volatile ("getcon " __KCR1 ", %0":"=r"(bh)); | ||
286 | asm volatile ("getcon " __KCR1 ", %0":"=r"(bl)); | ||
287 | bh = (bh) >> 32; | ||
288 | bl = (bl) & 0xffffffff; | ||
289 | asm volatile ("getcon " __INTEVT ", %0":"=r"(ch)); | ||
290 | asm volatile ("getcon " __INTEVT ", %0":"=r"(cl)); | ||
291 | ch = (ch) >> 32; | ||
292 | cl = (cl) & 0xffffffff; | ||
293 | printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n", | ||
294 | ah, al, bh, bl, ch, cl); | ||
295 | |||
296 | asm volatile ("getcon " __PEXPEVT ", %0":"=r"(ah)); | ||
297 | asm volatile ("getcon " __PEXPEVT ", %0":"=r"(al)); | ||
298 | ah = (ah) >> 32; | ||
299 | al = (al) & 0xffffffff; | ||
300 | asm volatile ("getcon " __PSPC ", %0":"=r"(bh)); | ||
301 | asm volatile ("getcon " __PSPC ", %0":"=r"(bl)); | ||
302 | bh = (bh) >> 32; | ||
303 | bl = (bl) & 0xffffffff; | ||
304 | asm volatile ("getcon " __PSSR ", %0":"=r"(ch)); | ||
305 | asm volatile ("getcon " __PSSR ", %0":"=r"(cl)); | ||
306 | ch = (ch) >> 32; | ||
307 | cl = (cl) & 0xffffffff; | ||
308 | printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n", | ||
309 | ah, al, bh, bl, ch, cl); | ||
310 | |||
311 | ah = (regs->pc) >> 32; | ||
312 | al = (regs->pc) & 0xffffffff; | ||
313 | bh = (regs->regs[18]) >> 32; | ||
314 | bl = (regs->regs[18]) & 0xffffffff; | ||
315 | ch = (regs->regs[15]) >> 32; | ||
316 | cl = (regs->regs[15]) & 0xffffffff; | ||
317 | printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n", | ||
318 | ah, al, bh, bl, ch, cl); | ||
319 | |||
320 | ah = (regs->sr) >> 32; | ||
321 | al = (regs->sr) & 0xffffffff; | ||
322 | asm volatile ("getcon " __TEA ", %0":"=r"(bh)); | ||
323 | asm volatile ("getcon " __TEA ", %0":"=r"(bl)); | ||
324 | bh = (bh) >> 32; | ||
325 | bl = (bl) & 0xffffffff; | ||
326 | asm volatile ("getcon " __KCR0 ", %0":"=r"(ch)); | ||
327 | asm volatile ("getcon " __KCR0 ", %0":"=r"(cl)); | ||
328 | ch = (ch) >> 32; | ||
329 | cl = (cl) & 0xffffffff; | ||
330 | printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n", | ||
331 | ah, al, bh, bl, ch, cl); | ||
332 | |||
333 | ah = (regs->regs[0]) >> 32; | ||
334 | al = (regs->regs[0]) & 0xffffffff; | ||
335 | bh = (regs->regs[1]) >> 32; | ||
336 | bl = (regs->regs[1]) & 0xffffffff; | ||
337 | ch = (regs->regs[2]) >> 32; | ||
338 | cl = (regs->regs[2]) & 0xffffffff; | ||
339 | printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n", | ||
340 | ah, al, bh, bl, ch, cl); | ||
341 | |||
342 | ah = (regs->regs[3]) >> 32; | ||
343 | al = (regs->regs[3]) & 0xffffffff; | ||
344 | bh = (regs->regs[4]) >> 32; | ||
345 | bl = (regs->regs[4]) & 0xffffffff; | ||
346 | ch = (regs->regs[5]) >> 32; | ||
347 | cl = (regs->regs[5]) & 0xffffffff; | ||
348 | printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n", | ||
349 | ah, al, bh, bl, ch, cl); | ||
350 | |||
351 | ah = (regs->regs[6]) >> 32; | ||
352 | al = (regs->regs[6]) & 0xffffffff; | ||
353 | bh = (regs->regs[7]) >> 32; | ||
354 | bl = (regs->regs[7]) & 0xffffffff; | ||
355 | ch = (regs->regs[8]) >> 32; | ||
356 | cl = (regs->regs[8]) & 0xffffffff; | ||
357 | printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n", | ||
358 | ah, al, bh, bl, ch, cl); | ||
359 | |||
360 | ah = (regs->regs[9]) >> 32; | ||
361 | al = (regs->regs[9]) & 0xffffffff; | ||
362 | bh = (regs->regs[10]) >> 32; | ||
363 | bl = (regs->regs[10]) & 0xffffffff; | ||
364 | ch = (regs->regs[11]) >> 32; | ||
365 | cl = (regs->regs[11]) & 0xffffffff; | ||
366 | printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n", | ||
367 | ah, al, bh, bl, ch, cl); | ||
368 | printk("....\n"); | ||
369 | |||
370 | ah = (regs->tregs[0]) >> 32; | ||
371 | al = (regs->tregs[0]) & 0xffffffff; | ||
372 | bh = (regs->tregs[1]) >> 32; | ||
373 | bl = (regs->tregs[1]) & 0xffffffff; | ||
374 | ch = (regs->tregs[2]) >> 32; | ||
375 | cl = (regs->tregs[2]) & 0xffffffff; | ||
376 | printk("T0 : %08Lx%08Lx T1 : %08Lx%08Lx T2 : %08Lx%08Lx\n", | ||
377 | ah, al, bh, bl, ch, cl); | ||
378 | printk("....\n"); | ||
379 | |||
380 | print_dtlb(); | ||
381 | print_itlb(); | ||
382 | } | ||
383 | |||
384 | /* ======================================================================= */ | ||
385 | |||
386 | /* | ||
387 | ** Depending on <base> scan the MMU, Data or Instruction side | ||
388 | ** looking for a valid mapping matching Eaddr & asid. | ||
389 | ** Return -1 if not found or the TLB id entry otherwise. | ||
390 | ** Note: it works only for 4k pages! | ||
391 | */ | ||
392 | static unsigned long | ||
393 | lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid) | ||
394 | { | ||
395 | regType_t pteH; | ||
396 | unsigned long epn; | ||
397 | int count; | ||
398 | |||
399 | epn = Eaddr & 0xfffff000; | ||
400 | |||
401 | for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) { | ||
402 | pteH = getConfigReg(base); | ||
403 | if (GET_VALID(pteH)) | ||
404 | if ((unsigned long) GET_EPN(pteH) == epn) | ||
405 | if ((unsigned long) GET_ASID(pteH) == asid) | ||
406 | break; | ||
407 | } | ||
408 | return ((unsigned long) ((count < MAX_TLBs) ? base : -1)); | ||
409 | } | ||
410 | |||
411 | unsigned long lookup_dtlb(unsigned long Eaddr) | ||
412 | { | ||
413 | unsigned long asid = get_asid(); | ||
414 | return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid)); | ||
415 | } | ||
416 | |||
417 | unsigned long lookup_itlb(unsigned long Eaddr) | ||
418 | { | ||
419 | unsigned long asid = get_asid(); | ||
420 | return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid)); | ||
421 | } | ||
422 | |||
423 | void print_page(struct page *page) | ||
424 | { | ||
425 | printk(" page[%p] -> index 0x%lx, count 0x%x, flags 0x%lx\n", | ||
426 | page, page->index, page_count(page), page->flags); | ||
427 | printk(" address_space = %p, pages =%ld\n", page->mapping, | ||
428 | page->mapping->nrpages); | ||
429 | |||
430 | } | ||
diff --git a/arch/sh/lib64/memcpy.c b/arch/sh/lib64/memcpy.c new file mode 100644 index 000000000000..fba436a92bfa --- /dev/null +++ b/arch/sh/lib64/memcpy.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 Mark Debbage (Mark.Debbage@superh.com) | ||
3 | * | ||
4 | * May be copied or modified under the terms of the GNU General Public | ||
5 | * License. See linux/COPYING for more information. | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <asm/string.h> | ||
11 | |||
12 | // This is a simplistic optimization of memcpy to increase the | ||
13 | // granularity of access beyond one byte using aligned | ||
14 | // loads and stores. This is not an optimal implementation | ||
15 | // for SH-5 (especially with regard to prefetching and the cache), | ||
16 | // and a better version should be provided later ... | ||
17 | |||
18 | void *memcpy(void *dest, const void *src, size_t count) | ||
19 | { | ||
20 | char *d = (char *) dest, *s = (char *) src; | ||
21 | |||
22 | if (count >= 32) { | ||
23 | int i = 8 - (((unsigned long) d) & 0x7); | ||
24 | |||
25 | if (i != 8) | ||
26 | while (i-- && count--) { | ||
27 | *d++ = *s++; | ||
28 | } | ||
29 | |||
30 | if (((((unsigned long) d) & 0x7) == 0) && | ||
31 | ((((unsigned long) s) & 0x7) == 0)) { | ||
32 | while (count >= 32) { | ||
33 | unsigned long long t1, t2, t3, t4; | ||
34 | t1 = *(unsigned long long *) (s); | ||
35 | t2 = *(unsigned long long *) (s + 8); | ||
36 | t3 = *(unsigned long long *) (s + 16); | ||
37 | t4 = *(unsigned long long *) (s + 24); | ||
38 | *(unsigned long long *) (d) = t1; | ||
39 | *(unsigned long long *) (d + 8) = t2; | ||
40 | *(unsigned long long *) (d + 16) = t3; | ||
41 | *(unsigned long long *) (d + 24) = t4; | ||
42 | d += 32; | ||
43 | s += 32; | ||
44 | count -= 32; | ||
45 | } | ||
46 | while (count >= 8) { | ||
47 | *(unsigned long long *) d = | ||
48 | *(unsigned long long *) s; | ||
49 | d += 8; | ||
50 | s += 8; | ||
51 | count -= 8; | ||
52 | } | ||
53 | } | ||
54 | |||
55 | if (((((unsigned long) d) & 0x3) == 0) && | ||
56 | ((((unsigned long) s) & 0x3) == 0)) { | ||
57 | while (count >= 4) { | ||
58 | *(unsigned long *) d = *(unsigned long *) s; | ||
59 | d += 4; | ||
60 | s += 4; | ||
61 | count -= 4; | ||
62 | } | ||
63 | } | ||
64 | |||
65 | if (((((unsigned long) d) & 0x1) == 0) && | ||
66 | ((((unsigned long) s) & 0x1) == 0)) { | ||
67 | while (count >= 2) { | ||
68 | *(unsigned short *) d = *(unsigned short *) s; | ||
69 | d += 2; | ||
70 | s += 2; | ||
71 | count -= 2; | ||
72 | } | ||
73 | } | ||
74 | } | ||
75 | |||
76 | while (count--) { | ||
77 | *d++ = *s++; | ||
78 | } | ||
79 | |||
80 | return d; | ||
81 | } | ||
diff --git a/arch/sh/lib64/panic.c b/arch/sh/lib64/panic.c new file mode 100644 index 000000000000..ff559e2a96f7 --- /dev/null +++ b/arch/sh/lib64/panic.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Richard Curnow, SuperH UK Limited | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/cpu/registers.h> | ||
12 | |||
13 | /* THIS IS A PHYSICAL ADDRESS */ | ||
14 | #define HDSP2534_ADDR (0x04002100) | ||
15 | |||
16 | #ifdef CONFIG_SH_CAYMAN | ||
17 | |||
18 | static void poor_mans_delay(void) | ||
19 | { | ||
20 | int i; | ||
21 | for (i = 0; i < 2500000; i++) { | ||
22 | } /* poor man's delay */ | ||
23 | } | ||
24 | |||
25 | static void show_value(unsigned long x) | ||
26 | { | ||
27 | int i; | ||
28 | unsigned nibble; | ||
29 | for (i = 0; i < 8; i++) { | ||
30 | nibble = ((x >> (i * 4)) & 0xf); | ||
31 | |||
32 | ctrl_outb(nibble + ((nibble > 9) ? 55 : 48), | ||
33 | HDSP2534_ADDR + 0xe0 + ((7 - i) << 2)); | ||
34 | } | ||
35 | } | ||
36 | |||
37 | #endif | ||
38 | |||
39 | void | ||
40 | panic_handler(unsigned long panicPC, unsigned long panicSSR, | ||
41 | unsigned long panicEXPEVT) | ||
42 | { | ||
43 | #ifdef CONFIG_SH_CAYMAN | ||
44 | while (1) { | ||
45 | /* This piece of code displays the PC on the LED display */ | ||
46 | show_value(panicPC); | ||
47 | poor_mans_delay(); | ||
48 | show_value(panicSSR); | ||
49 | poor_mans_delay(); | ||
50 | show_value(panicEXPEVT); | ||
51 | poor_mans_delay(); | ||
52 | } | ||
53 | #endif | ||
54 | |||
55 | /* Never return from the panic handler */ | ||
56 | for (;;) ; | ||
57 | |||
58 | } | ||
diff --git a/arch/sh/lib64/udelay.c b/arch/sh/lib64/udelay.c new file mode 100644 index 000000000000..23c7d17fb9f7 --- /dev/null +++ b/arch/sh/lib64/udelay.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * arch/sh/lib64/udelay.c | ||
3 | * | ||
4 | * Delay routines, using a pre-computed "loops_per_jiffy" value. | ||
5 | * | ||
6 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
7 | * Copyright (C) 2003, 2004 Paul Mundt | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | #include <linux/sched.h> | ||
14 | #include <asm/param.h> | ||
15 | |||
16 | /* | ||
17 | * Use only for very small delays (< 1 msec). | ||
18 | * | ||
19 | * The active part of our cycle counter is only 32-bits wide, and | ||
20 | * we're treating the difference between two marks as signed. On | ||
21 | * a 1GHz box, that's about 2 seconds. | ||
22 | */ | ||
23 | |||
24 | void __delay(int loops) | ||
25 | { | ||
26 | long long dummy; | ||
27 | __asm__ __volatile__("gettr tr0, %1\n\t" | ||
28 | "pta $+4, tr0\n\t" | ||
29 | "addi %0, -1, %0\n\t" | ||
30 | "bne %0, r63, tr0\n\t" | ||
31 | "ptabs %1, tr0\n\t":"=r"(loops), | ||
32 | "=r"(dummy) | ||
33 | :"0"(loops)); | ||
34 | } | ||
35 | |||
36 | void __udelay(unsigned long long usecs, unsigned long lpj) | ||
37 | { | ||
38 | usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj; | ||
39 | __delay((long long) usecs >> 32); | ||
40 | } | ||
41 | |||
42 | void __ndelay(unsigned long long nsecs, unsigned long lpj) | ||
43 | { | ||
44 | nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj; | ||
45 | __delay((long long) nsecs >> 32); | ||
46 | } | ||
47 | |||
48 | void udelay(unsigned long usecs) | ||
49 | { | ||
50 | __udelay(usecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy); | ||
51 | } | ||
52 | |||
53 | void ndelay(unsigned long nsecs) | ||
54 | { | ||
55 | __ndelay(nsecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy); | ||
56 | } | ||