aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/lib64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/lib64')
-rw-r--r--arch/sh/lib64/.gitignore1
-rw-r--r--arch/sh/lib64/Makefile19
-rw-r--r--arch/sh/lib64/c-checksum.c217
-rw-r--r--arch/sh/lib64/copy_user_memcpy.S217
-rw-r--r--arch/sh/lib64/dbg.c430
-rw-r--r--arch/sh/lib64/io.c128
-rw-r--r--arch/sh/lib64/iomap.c54
-rw-r--r--arch/sh/lib64/memcpy.c81
-rw-r--r--arch/sh/lib64/page_clear.S54
-rw-r--r--arch/sh/lib64/page_copy.S91
-rw-r--r--arch/sh/lib64/panic.c58
-rw-r--r--arch/sh/lib64/udelay.c59
12 files changed, 1409 insertions, 0 deletions
diff --git a/arch/sh/lib64/.gitignore b/arch/sh/lib64/.gitignore
new file mode 100644
index 000000000000..3508c2cb23c4
--- /dev/null
+++ b/arch/sh/lib64/.gitignore
@@ -0,0 +1 @@
syscalltab.h
diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile
new file mode 100644
index 000000000000..6a4cc3f9c0b1
--- /dev/null
+++ b/arch/sh/lib64/Makefile
@@ -0,0 +1,19 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2000, 2001 Paolo Alberelli
7# Coprygith (C) 2003 Paul Mundt
8#
9# Makefile for the SH-5 specific library files..
10#
11# Note! Dependencies are done automagically by 'make dep', which also
12# removes any old dependencies. DON'T put your own dependencies here
13# unless it's something special (ie not a .c file).
14#
15
16# Panic should really be compiled as PIC
17lib-y := udelay.o c-checksum.o dbg.o io.o panic.o memcpy.o copy_user_memcpy.o \
18 page_copy.o page_clear.o iomap.o
19
diff --git a/arch/sh/lib64/c-checksum.c b/arch/sh/lib64/c-checksum.c
new file mode 100644
index 000000000000..053137abd8a0
--- /dev/null
+++ b/arch/sh/lib64/c-checksum.c
@@ -0,0 +1,217 @@
1/*
2 * arch/sh64/lib/c-checksum.c
3 *
4 * This file contains network checksum routines that are better done
5 * in an architecture-specific manner due to speed..
6 */
7
8#undef DEBUG
9
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <asm/byteorder.h>
15#include <asm/uaccess.h>
16
17static inline unsigned short from64to16(unsigned long long x)
18{
19 /* add up 32-bit words for 33 bits */
20 x = (x & 0xffffffff) + (x >> 32);
21 /* add up 16-bit and 17-bit words for 17+c bits */
22 x = (x & 0xffff) + (x >> 16);
23 /* add up 16-bit and 2-bit for 16+c bit */
24 x = (x & 0xffff) + (x >> 16);
25 /* add up carry.. */
26 x = (x & 0xffff) + (x >> 16);
27 return x;
28}
29
30static inline unsigned short foldto16(unsigned long x)
31{
32 /* add up 16-bit for 17 bits */
33 x = (x & 0xffff) + (x >> 16);
34 /* add up carry.. */
35 x = (x & 0xffff) + (x >> 16);
36 return x;
37}
38
39static inline unsigned short myfoldto16(unsigned long long x)
40{
41 /* Fold down to 32-bits so we don't loose in the typedef-less
42 network stack. */
43 /* 64 to 33 */
44 x = (x & 0xffffffff) + (x >> 32);
45 /* 33 to 32 */
46 x = (x & 0xffffffff) + (x >> 32);
47
48 /* add up 16-bit for 17 bits */
49 x = (x & 0xffff) + (x >> 16);
50 /* add up carry.. */
51 x = (x & 0xffff) + (x >> 16);
52 return x;
53}
54
55#define odd(x) ((x)&1)
56#define U16(x) ntohs(x)
57
58static unsigned long do_csum(const unsigned char *buff, int len)
59{
60 int odd, count;
61 unsigned long result = 0;
62
63 pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len);
64#ifdef DEBUG
65 for (i = 0; i < len; i++) {
66 if ((i % 26) == 0)
67 printk("\n");
68 printk("%02X ", buff[i]);
69 }
70#endif
71
72 if (len <= 0)
73 goto out;
74
75 odd = 1 & (unsigned long) buff;
76 if (odd) {
77 result = *buff << 8;
78 len--;
79 buff++;
80 }
81 count = len >> 1; /* nr of 16-bit words.. */
82 if (count) {
83 if (2 & (unsigned long) buff) {
84 result += *(unsigned short *) buff;
85 count--;
86 len -= 2;
87 buff += 2;
88 }
89 count >>= 1; /* nr of 32-bit words.. */
90 if (count) {
91 unsigned long carry = 0;
92 do {
93 unsigned long w = *(unsigned long *) buff;
94 buff += 4;
95 count--;
96 result += carry;
97 result += w;
98 carry = (w > result);
99 } while (count);
100 result += carry;
101 result = (result & 0xffff) + (result >> 16);
102 }
103 if (len & 2) {
104 result += *(unsigned short *) buff;
105 buff += 2;
106 }
107 }
108 if (len & 1)
109 result += *buff;
110 result = foldto16(result);
111 if (odd)
112 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
113
114 pr_debug("\nCHECKSUM is 0x%lx\n", result);
115
116 out:
117 return result;
118}
119
120/* computes the checksum of a memory block at buff, length len,
121 and adds in "sum" (32-bit) */
122__wsum csum_partial(const void *buff, int len, __wsum sum)
123{
124 unsigned long long result = do_csum(buff, len);
125
126 /* add in old sum, and carry.. */
127 result += (__force u32)sum;
128 /* 32+c bits -> 32 bits */
129 result = (result & 0xffffffff) + (result >> 32);
130
131 pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n",
132 buff, len, sum, result);
133
134 return (__force __wsum)result;
135}
136
137/* Copy while checksumming, otherwise like csum_partial. */
138__wsum
139csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
140{
141 sum = csum_partial(src, len, sum);
142 memcpy(dst, src, len);
143
144 return sum;
145}
146
147/* Copy from userspace and compute checksum. If we catch an exception
148 then zero the rest of the buffer. */
149__wsum
150csum_partial_copy_from_user(const void __user *src, void *dst, int len,
151 __wsum sum, int *err_ptr)
152{
153 int missing;
154
155 pr_debug
156 ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n",
157 src, dst, len, sum, err_ptr);
158 missing = copy_from_user(dst, src, len);
159 pr_debug(" access_ok %d\n", __access_ok((unsigned long) src, len));
160 pr_debug(" missing %d\n", missing);
161 if (missing) {
162 memset(dst + len - missing, 0, missing);
163 *err_ptr = -EFAULT;
164 }
165
166 return csum_partial(dst, len, sum);
167}
168
169/* Copy to userspace and compute checksum. */
170__wsum
171csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
172 __wsum sum, int *err_ptr)
173{
174 sum = csum_partial(src, len, sum);
175
176 if (copy_to_user(dst, src, len))
177 *err_ptr = -EFAULT;
178
179 return sum;
180}
181
182/*
183 * This is a version of ip_compute_csum() optimized for IP headers,
184 * which always checksum on 4 octet boundaries.
185 */
186__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
187{
188 pr_debug("ip_fast_csum %p,%d\n", iph, ihl);
189
190 return (__force __sum16)~do_csum(iph, ihl * 4);
191}
192
193__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
194 unsigned short len,
195 unsigned short proto, __wsum sum)
196{
197 unsigned long long result;
198
199 pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead));
200 pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead));
201
202 result = (__force u64) saddr + (__force u64) daddr +
203 (__force u64) sum + ((len + proto) << 8);
204
205 /* Fold down to 32-bits so we don't loose in the typedef-less
206 network stack. */
207 /* 64 to 33 */
208 result = (result & 0xffffffff) + (result >> 32);
209 /* 33 to 32 */
210 result = (result & 0xffffffff) + (result >> 32);
211
212 pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n",
213 __FUNCTION__, saddr, daddr, len, proto, sum, result);
214
215 return (__wsum)result;
216}
217EXPORT_SYMBOL(csum_tcpudp_nofold);
diff --git a/arch/sh/lib64/copy_user_memcpy.S b/arch/sh/lib64/copy_user_memcpy.S
new file mode 100644
index 000000000000..2a62816d2ddd
--- /dev/null
+++ b/arch/sh/lib64/copy_user_memcpy.S
@@ -0,0 +1,217 @@
1!
2! Fast SH memcpy
3!
4! by Toshiyasu Morita (tm@netcom.com)
5! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
6! SH5 code Copyright 2002 SuperH Ltd.
7!
8! Entry: ARG0: destination pointer
9! ARG1: source pointer
10! ARG2: byte count
11!
12! Exit: RESULT: destination pointer
13! any other registers in the range r0-r7: trashed
14!
15! Notes: Usually one wants to do small reads and write a longword, but
16! unfortunately it is difficult in some cases to concatanate bytes
17! into a longword on the SH, so this does a longword read and small
18! writes.
19!
20! This implementation makes two assumptions about how it is called:
21!
22! 1.: If the byte count is nonzero, the address of the last byte to be
23! copied is unsigned greater than the address of the first byte to
24! be copied. This could be easily swapped for a signed comparison,
25! but the algorithm used needs some comparison.
26!
27! 2.: When there are two or three bytes in the last word of an 11-or-more
28! bytes memory chunk to b copied, the rest of the word can be read
29! without side effects.
30! This could be easily changed by increasing the minumum size of
31! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
32! however, this would cost a few extra cyles on average.
33! For SHmedia, the assumption is that any quadword can be read in its
34! enirety if at least one byte is included in the copy.
35
36/* Imported into Linux kernel by Richard Curnow. This is used to implement the
37 __copy_user function in the general case, so it has to be a distinct
38 function from intra-kernel memcpy to allow for exception fix-ups in the
39 event that the user pointer is bad somewhere in the copy (e.g. due to
40 running off the end of the vma).
41
42 Note, this algorithm will be slightly wasteful in the case where the source
43 and destination pointers are equally aligned, because the stlo/sthi pairs
44 could then be merged back into single stores. If there are a lot of cache
45 misses, this is probably offset by the stall lengths on the preloads.
46
47*/
48
49/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
50 * erratum. The first two prefetches are nop-ed out to avoid upsetting the
51 * instruction counts used in the jump address calculation.
52 * */
53
54 .section .text..SHmedia32,"ax"
55 .little
56 .balign 32
57 .global copy_user_memcpy
58 .global copy_user_memcpy_end
59copy_user_memcpy:
60
61#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
62#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
63#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
64#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
65
66 nop ! ld.b r3,0,r63 ! TAKum03020
67 pta/l Large,tr0
68 movi 25,r0
69 bgeu/u r4,r0,tr0
70 nsb r4,r0
71 shlli r0,5,r0
72 movi (L1-L0+63*32 + 1) & 0xffff,r1
73 sub r1, r0, r0
74L0: ptrel r0,tr0
75 add r2,r4,r5
76 ptabs r18,tr1
77 add r3,r4,r6
78 blink tr0,r63
79
80/* Rearranged to make cut2 safe */
81 .balign 8
82L4_7: /* 4..7 byte memcpy cntd. */
83 stlo.l r2, 0, r0
84 or r6, r7, r6
85 sthi.l r5, -1, r6
86 stlo.l r5, -4, r6
87 blink tr1,r63
88
89 .balign 8
90L1: /* 0 byte memcpy */
91 nop
92 blink tr1,r63
93 nop
94 nop
95 nop
96 nop
97
98L2_3: /* 2 or 3 byte memcpy cntd. */
99 st.b r5,-1,r6
100 blink tr1,r63
101
102 /* 1 byte memcpy */
103 ld.b r3,0,r0
104 st.b r2,0,r0
105 blink tr1,r63
106
107L8_15: /* 8..15 byte memcpy cntd. */
108 stlo.q r2, 0, r0
109 or r6, r7, r6
110 sthi.q r5, -1, r6
111 stlo.q r5, -8, r6
112 blink tr1,r63
113
114 /* 2 or 3 byte memcpy */
115 ld.b r3,0,r0
116 nop ! ld.b r2,0,r63 ! TAKum03020
117 ld.b r3,1,r1
118 st.b r2,0,r0
119 pta/l L2_3,tr0
120 ld.b r6,-1,r6
121 st.b r2,1,r1
122 blink tr0, r63
123
124 /* 4 .. 7 byte memcpy */
125 LDUAL (r3, 0, r0, r1)
126 pta L4_7, tr0
127 ldlo.l r6, -4, r7
128 or r0, r1, r0
129 sthi.l r2, 3, r0
130 ldhi.l r6, -1, r6
131 blink tr0, r63
132
133 /* 8 .. 15 byte memcpy */
134 LDUAQ (r3, 0, r0, r1)
135 pta L8_15, tr0
136 ldlo.q r6, -8, r7
137 or r0, r1, r0
138 sthi.q r2, 7, r0
139 ldhi.q r6, -1, r6
140 blink tr0, r63
141
142 /* 16 .. 24 byte memcpy */
143 LDUAQ (r3, 0, r0, r1)
144 LDUAQ (r3, 8, r8, r9)
145 or r0, r1, r0
146 sthi.q r2, 7, r0
147 or r8, r9, r8
148 sthi.q r2, 15, r8
149 ldlo.q r6, -8, r7
150 ldhi.q r6, -1, r6
151 stlo.q r2, 8, r8
152 stlo.q r2, 0, r0
153 or r6, r7, r6
154 sthi.q r5, -1, r6
155 stlo.q r5, -8, r6
156 blink tr1,r63
157
158Large:
159 ! ld.b r2, 0, r63 ! TAKum03020
160 pta/l Loop_ua, tr1
161 ori r3, -8, r7
162 sub r2, r7, r22
163 sub r3, r2, r6
164 add r2, r4, r5
165 ldlo.q r3, 0, r0
166 addi r5, -16, r5
167 movi 64+8, r27 ! could subtract r7 from that.
168 stlo.q r2, 0, r0
169 sthi.q r2, 7, r0
170 ldx.q r22, r6, r0
171 bgtu/l r27, r4, tr1
172
173 addi r5, -48, r27
174 pta/l Loop_line, tr0
175 addi r6, 64, r36
176 addi r6, -24, r19
177 addi r6, -16, r20
178 addi r6, -8, r21
179
180Loop_line:
181 ! ldx.q r22, r36, r63 ! TAKum03020
182 alloco r22, 32
183 synco
184 addi r22, 32, r22
185 ldx.q r22, r19, r23
186 sthi.q r22, -25, r0
187 ldx.q r22, r20, r24
188 ldx.q r22, r21, r25
189 stlo.q r22, -32, r0
190 ldx.q r22, r6, r0
191 sthi.q r22, -17, r23
192 sthi.q r22, -9, r24
193 sthi.q r22, -1, r25
194 stlo.q r22, -24, r23
195 stlo.q r22, -16, r24
196 stlo.q r22, -8, r25
197 bgeu r27, r22, tr0
198
199Loop_ua:
200 addi r22, 8, r22
201 sthi.q r22, -1, r0
202 stlo.q r22, -8, r0
203 ldx.q r22, r6, r0
204 bgtu/l r5, r22, tr1
205
206 add r3, r4, r7
207 ldlo.q r7, -8, r1
208 sthi.q r22, 7, r0
209 ldhi.q r7, -1, r7
210 ptabs r18,tr1
211 stlo.q r22, 0, r0
212 or r1, r7, r1
213 sthi.q r5, 15, r1
214 stlo.q r5, 8, r1
215 blink tr1, r63
216copy_user_memcpy_end:
217 nop
diff --git a/arch/sh/lib64/dbg.c b/arch/sh/lib64/dbg.c
new file mode 100644
index 000000000000..97816e0baf19
--- /dev/null
+++ b/arch/sh/lib64/dbg.c
@@ -0,0 +1,430 @@
1/*--------------------------------------------------------------------------
2--
3-- Identity : Linux50 Debug Funcions
4--
5-- File : arch/sh64/lib/dbg.C
6--
7-- Copyright 2000, 2001 STMicroelectronics Limited.
8-- Copyright 2004 Richard Curnow (evt_debug etc)
9--
10--------------------------------------------------------------------------*/
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/fs.h>
16#include <asm/mmu_context.h>
17
18typedef u64 regType_t;
19
20static regType_t getConfigReg(u64 id)
21{
22 register u64 reg __asm__("r2");
23 asm volatile ("getcfg %1, 0, %0":"=r" (reg):"r"(id));
24 return (reg);
25}
26
27/* ======================================================================= */
28
29static char *szTab[] = { "4k", "64k", "1M", "512M" };
30static char *protTab[] = { "----",
31 "---R",
32 "--X-",
33 "--XR",
34 "-W--",
35 "-W-R",
36 "-WX-",
37 "-WXR",
38 "U---",
39 "U--R",
40 "U-X-",
41 "U-XR",
42 "UW--",
43 "UW-R",
44 "UWX-",
45 "UWXR"
46};
47#define ITLB_BASE 0x00000000
48#define DTLB_BASE 0x00800000
49#define MAX_TLBs 64
50/* PTE High */
51#define GET_VALID(pte) ((pte) & 0x1)
52#define GET_SHARED(pte) ((pte) & 0x2)
53#define GET_ASID(pte) ((pte >> 2) & 0x0ff)
54#define GET_EPN(pte) ((pte) & 0xfffff000)
55
56/* PTE Low */
57#define GET_CBEHAVIOR(pte) ((pte) & 0x3)
58#define GET_PAGE_SIZE(pte) szTab[((pte >> 3) & 0x3)]
59#define GET_PROTECTION(pte) protTab[((pte >> 6) & 0xf)]
60#define GET_PPN(pte) ((pte) & 0xfffff000)
61
62#define PAGE_1K_MASK 0x00000000
63#define PAGE_4K_MASK 0x00000010
64#define PAGE_64K_MASK 0x00000080
65#define MMU_PAGESIZE_MASK (PAGE_64K_MASK | PAGE_4K_MASK)
66#define PAGE_1MB_MASK MMU_PAGESIZE_MASK
67#define PAGE_1K (1024)
68#define PAGE_4K (1024 * 4)
69#define PAGE_64K (1024 * 64)
70#define PAGE_1MB (1024 * 1024)
71
72#define HOW_TO_READ_TLB_CONTENT \
73 "[ ID] PPN EPN ASID Share CB P.Size PROT.\n"
74
75void print_single_tlb(unsigned long tlb, int single_print)
76{
77 regType_t pteH;
78 regType_t pteL;
79 unsigned int valid, shared, asid, epn, cb, ppn;
80 char *pSize;
81 char *pProt;
82
83 /*
84 ** in case of single print <single_print> is true, this implies:
85 ** 1) print the TLB in any case also if NOT VALID
86 ** 2) print out the header
87 */
88
89 pteH = getConfigReg(tlb);
90 valid = GET_VALID(pteH);
91 if (single_print)
92 printk(HOW_TO_READ_TLB_CONTENT);
93 else if (!valid)
94 return;
95
96 pteL = getConfigReg(tlb + 1);
97
98 shared = GET_SHARED(pteH);
99 asid = GET_ASID(pteH);
100 epn = GET_EPN(pteH);
101 cb = GET_CBEHAVIOR(pteL);
102 pSize = GET_PAGE_SIZE(pteL);
103 pProt = GET_PROTECTION(pteL);
104 ppn = GET_PPN(pteL);
105 printk("[%c%2ld] 0x%08x 0x%08x %03d %02x %02x %4s %s\n",
106 ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP),
107 ppn, epn, asid, shared, cb, pSize, pProt);
108}
109
110void print_dtlb(void)
111{
112 int count;
113 unsigned long tlb;
114
115 printk(" ================= SH-5 D-TLBs Status ===================\n");
116 printk(HOW_TO_READ_TLB_CONTENT);
117 tlb = DTLB_BASE;
118 for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
119 print_single_tlb(tlb, 0);
120 printk
121 (" =============================================================\n");
122}
123
124void print_itlb(void)
125{
126 int count;
127 unsigned long tlb;
128
129 printk(" ================= SH-5 I-TLBs Status ===================\n");
130 printk(HOW_TO_READ_TLB_CONTENT);
131 tlb = ITLB_BASE;
132 for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
133 print_single_tlb(tlb, 0);
134 printk
135 (" =============================================================\n");
136}
137
138/* ======================================================================= */
139
140#ifdef CONFIG_POOR_MANS_STRACE
141
142#include "syscalltab.h"
143
144struct ring_node {
145 int evt;
146 int ret_addr;
147 int event;
148 int tra;
149 int pid;
150 unsigned long sp;
151 unsigned long pc;
152};
153
154static struct ring_node event_ring[16];
155static int event_ptr = 0;
156
157struct stored_syscall_data {
158 int pid;
159 int syscall_number;
160};
161
162#define N_STORED_SYSCALLS 16
163
164static struct stored_syscall_data stored_syscalls[N_STORED_SYSCALLS];
165static int syscall_next=0;
166static int syscall_next_print=0;
167
168void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
169{
170 int syscallno = tra & 0xff;
171 unsigned long sp;
172 unsigned long stack_bottom;
173 int pid;
174 struct ring_node *rr;
175
176 pid = current->pid;
177 stack_bottom = (unsigned long) task_stack_page(current);
178 asm volatile("ori r15, 0, %0" : "=r" (sp));
179 rr = event_ring + event_ptr;
180 rr->evt = evt;
181 rr->ret_addr = ret_addr;
182 rr->event = event;
183 rr->tra = tra;
184 rr->pid = pid;
185 rr->sp = sp;
186 rr->pc = regs->pc;
187
188 if (sp < stack_bottom + 3092) {
189 printk("evt_debug : stack underflow report\n");
190 int i, j;
191 for (j=0, i = event_ptr; j<16; j++) {
192 rr = event_ring + i;
193 printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n",
194 rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc);
195 i--;
196 i &= 15;
197 }
198 panic("STACK UNDERFLOW\n");
199 }
200
201 event_ptr = (event_ptr + 1) & 15;
202
203 if ((event == 2) && (evt == 0x160)) {
204 if (syscallno < NUM_SYSCALL_INFO_ENTRIES) {
205 /* Store the syscall information to print later. We
206 * can't print this now - currently we're running with
207 * SR.BL=1, so we can't take a tlbmiss (which could occur
208 * in the console drivers under printk).
209 *
210 * Just overwrite old entries on ring overflow - this
211 * is only for last-hope debugging. */
212 stored_syscalls[syscall_next].pid = current->pid;
213 stored_syscalls[syscall_next].syscall_number = syscallno;
214 syscall_next++;
215 syscall_next &= (N_STORED_SYSCALLS - 1);
216 }
217 }
218}
219
220static void drain_syscalls(void) {
221 while (syscall_next_print != syscall_next) {
222 printk("Task %d: %s()\n",
223 stored_syscalls[syscall_next_print].pid,
224 syscall_info_table[stored_syscalls[syscall_next_print].syscall_number].name);
225 syscall_next_print++;
226 syscall_next_print &= (N_STORED_SYSCALLS - 1);
227 }
228}
229
230void evt_debug2(unsigned int ret)
231{
232 drain_syscalls();
233 printk("Task %d: syscall returns %08x\n", current->pid, ret);
234}
235
236void evt_debug_ret_from_irq(struct pt_regs *regs)
237{
238 int pid;
239 struct ring_node *rr;
240
241 pid = current->pid;
242 rr = event_ring + event_ptr;
243 rr->evt = 0xffff;
244 rr->ret_addr = 0;
245 rr->event = 0;
246 rr->tra = 0;
247 rr->pid = pid;
248 rr->pc = regs->pc;
249 event_ptr = (event_ptr + 1) & 15;
250}
251
252void evt_debug_ret_from_exc(struct pt_regs *regs)
253{
254 int pid;
255 struct ring_node *rr;
256
257 pid = current->pid;
258 rr = event_ring + event_ptr;
259 rr->evt = 0xfffe;
260 rr->ret_addr = 0;
261 rr->event = 0;
262 rr->tra = 0;
263 rr->pid = pid;
264 rr->pc = regs->pc;
265 event_ptr = (event_ptr + 1) & 15;
266}
267
268#endif /* CONFIG_POOR_MANS_STRACE */
269
270/* ======================================================================= */
271
272void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs)
273{
274
275 unsigned long long ah, al, bh, bl, ch, cl;
276
277 printk("\n");
278 printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n",
279 ((from) ? from : "???"), current->pid, trapnr, signr);
280
281 asm volatile ("getcon " __EXPEVT ", %0":"=r"(ah));
282 asm volatile ("getcon " __EXPEVT ", %0":"=r"(al));
283 ah = (ah) >> 32;
284 al = (al) & 0xffffffff;
285 asm volatile ("getcon " __KCR1 ", %0":"=r"(bh));
286 asm volatile ("getcon " __KCR1 ", %0":"=r"(bl));
287 bh = (bh) >> 32;
288 bl = (bl) & 0xffffffff;
289 asm volatile ("getcon " __INTEVT ", %0":"=r"(ch));
290 asm volatile ("getcon " __INTEVT ", %0":"=r"(cl));
291 ch = (ch) >> 32;
292 cl = (cl) & 0xffffffff;
293 printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n",
294 ah, al, bh, bl, ch, cl);
295
296 asm volatile ("getcon " __PEXPEVT ", %0":"=r"(ah));
297 asm volatile ("getcon " __PEXPEVT ", %0":"=r"(al));
298 ah = (ah) >> 32;
299 al = (al) & 0xffffffff;
300 asm volatile ("getcon " __PSPC ", %0":"=r"(bh));
301 asm volatile ("getcon " __PSPC ", %0":"=r"(bl));
302 bh = (bh) >> 32;
303 bl = (bl) & 0xffffffff;
304 asm volatile ("getcon " __PSSR ", %0":"=r"(ch));
305 asm volatile ("getcon " __PSSR ", %0":"=r"(cl));
306 ch = (ch) >> 32;
307 cl = (cl) & 0xffffffff;
308 printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n",
309 ah, al, bh, bl, ch, cl);
310
311 ah = (regs->pc) >> 32;
312 al = (regs->pc) & 0xffffffff;
313 bh = (regs->regs[18]) >> 32;
314 bl = (regs->regs[18]) & 0xffffffff;
315 ch = (regs->regs[15]) >> 32;
316 cl = (regs->regs[15]) & 0xffffffff;
317 printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
318 ah, al, bh, bl, ch, cl);
319
320 ah = (regs->sr) >> 32;
321 al = (regs->sr) & 0xffffffff;
322 asm volatile ("getcon " __TEA ", %0":"=r"(bh));
323 asm volatile ("getcon " __TEA ", %0":"=r"(bl));
324 bh = (bh) >> 32;
325 bl = (bl) & 0xffffffff;
326 asm volatile ("getcon " __KCR0 ", %0":"=r"(ch));
327 asm volatile ("getcon " __KCR0 ", %0":"=r"(cl));
328 ch = (ch) >> 32;
329 cl = (cl) & 0xffffffff;
330 printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
331 ah, al, bh, bl, ch, cl);
332
333 ah = (regs->regs[0]) >> 32;
334 al = (regs->regs[0]) & 0xffffffff;
335 bh = (regs->regs[1]) >> 32;
336 bl = (regs->regs[1]) & 0xffffffff;
337 ch = (regs->regs[2]) >> 32;
338 cl = (regs->regs[2]) & 0xffffffff;
339 printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
340 ah, al, bh, bl, ch, cl);
341
342 ah = (regs->regs[3]) >> 32;
343 al = (regs->regs[3]) & 0xffffffff;
344 bh = (regs->regs[4]) >> 32;
345 bl = (regs->regs[4]) & 0xffffffff;
346 ch = (regs->regs[5]) >> 32;
347 cl = (regs->regs[5]) & 0xffffffff;
348 printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
349 ah, al, bh, bl, ch, cl);
350
351 ah = (regs->regs[6]) >> 32;
352 al = (regs->regs[6]) & 0xffffffff;
353 bh = (regs->regs[7]) >> 32;
354 bl = (regs->regs[7]) & 0xffffffff;
355 ch = (regs->regs[8]) >> 32;
356 cl = (regs->regs[8]) & 0xffffffff;
357 printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
358 ah, al, bh, bl, ch, cl);
359
360 ah = (regs->regs[9]) >> 32;
361 al = (regs->regs[9]) & 0xffffffff;
362 bh = (regs->regs[10]) >> 32;
363 bl = (regs->regs[10]) & 0xffffffff;
364 ch = (regs->regs[11]) >> 32;
365 cl = (regs->regs[11]) & 0xffffffff;
366 printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
367 ah, al, bh, bl, ch, cl);
368 printk("....\n");
369
370 ah = (regs->tregs[0]) >> 32;
371 al = (regs->tregs[0]) & 0xffffffff;
372 bh = (regs->tregs[1]) >> 32;
373 bl = (regs->tregs[1]) & 0xffffffff;
374 ch = (regs->tregs[2]) >> 32;
375 cl = (regs->tregs[2]) & 0xffffffff;
376 printk("T0 : %08Lx%08Lx T1 : %08Lx%08Lx T2 : %08Lx%08Lx\n",
377 ah, al, bh, bl, ch, cl);
378 printk("....\n");
379
380 print_dtlb();
381 print_itlb();
382}
383
384/* ======================================================================= */
385
386/*
387** Depending on <base> scan the MMU, Data or Instruction side
388** looking for a valid mapping matching Eaddr & asid.
389** Return -1 if not found or the TLB id entry otherwise.
390** Note: it works only for 4k pages!
391*/
392static unsigned long
393lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid)
394{
395 regType_t pteH;
396 unsigned long epn;
397 int count;
398
399 epn = Eaddr & 0xfffff000;
400
401 for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) {
402 pteH = getConfigReg(base);
403 if (GET_VALID(pteH))
404 if ((unsigned long) GET_EPN(pteH) == epn)
405 if ((unsigned long) GET_ASID(pteH) == asid)
406 break;
407 }
408 return ((unsigned long) ((count < MAX_TLBs) ? base : -1));
409}
410
411unsigned long lookup_dtlb(unsigned long Eaddr)
412{
413 unsigned long asid = get_asid();
414 return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid));
415}
416
417unsigned long lookup_itlb(unsigned long Eaddr)
418{
419 unsigned long asid = get_asid();
420 return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid));
421}
422
423void print_page(struct page *page)
424{
425 printk(" page[%p] -> index 0x%lx, count 0x%x, flags 0x%lx\n",
426 page, page->index, page_count(page), page->flags);
427 printk(" address_space = %p, pages =%ld\n", page->mapping,
428 page->mapping->nrpages);
429
430}
diff --git a/arch/sh/lib64/io.c b/arch/sh/lib64/io.c
new file mode 100644
index 000000000000..a3f3a2b8e25b
--- /dev/null
+++ b/arch/sh/lib64/io.c
@@ -0,0 +1,128 @@
1/*
2 * Copyright (C) 2000 David J. Mckay (david.mckay@st.com)
3 *
4 * May be copied or modified under the terms of the GNU General Public
5 * License. See linux/COPYING for more information.
6 *
7 * This file contains the I/O routines for use on the overdrive board
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/delay.h>
14#include <linux/module.h>
15#include <asm/system.h>
16#include <asm/processor.h>
17#include <asm/io.h>
18
19/* Now for the string version of these functions */
20void outsb(unsigned long port, const void *addr, unsigned long count)
21{
22 int i;
23 unsigned char *p = (unsigned char *) addr;
24
25 for (i = 0; i < count; i++, p++) {
26 outb(*p, port);
27 }
28}
29EXPORT_SYMBOL(outsb);
30
31void insb(unsigned long port, void *addr, unsigned long count)
32{
33 int i;
34 unsigned char *p = (unsigned char *) addr;
35
36 for (i = 0; i < count; i++, p++) {
37 *p = inb(port);
38 }
39}
40EXPORT_SYMBOL(insb);
41
42/* For the 16 and 32 bit string functions, we have to worry about alignment.
43 * The SH does not do unaligned accesses, so we have to read as bytes and
44 * then write as a word or dword.
45 * This can be optimised a lot more, especially in the case where the data
46 * is aligned
47 */
48
49void outsw(unsigned long port, const void *addr, unsigned long count)
50{
51 int i;
52 unsigned short tmp;
53 unsigned char *p = (unsigned char *) addr;
54
55 for (i = 0; i < count; i++, p += 2) {
56 tmp = (*p) | ((*(p + 1)) << 8);
57 outw(tmp, port);
58 }
59}
60EXPORT_SYMBOL(outsw);
61
62void insw(unsigned long port, void *addr, unsigned long count)
63{
64 int i;
65 unsigned short tmp;
66 unsigned char *p = (unsigned char *) addr;
67
68 for (i = 0; i < count; i++, p += 2) {
69 tmp = inw(port);
70 p[0] = tmp & 0xff;
71 p[1] = (tmp >> 8) & 0xff;
72 }
73}
74EXPORT_SYMBOL(insw);
75
76void outsl(unsigned long port, const void *addr, unsigned long count)
77{
78 int i;
79 unsigned tmp;
80 unsigned char *p = (unsigned char *) addr;
81
82 for (i = 0; i < count; i++, p += 4) {
83 tmp = (*p) | ((*(p + 1)) << 8) | ((*(p + 2)) << 16) |
84 ((*(p + 3)) << 24);
85 outl(tmp, port);
86 }
87}
88EXPORT_SYMBOL(outsl);
89
90void insl(unsigned long port, void *addr, unsigned long count)
91{
92 int i;
93 unsigned tmp;
94 unsigned char *p = (unsigned char *) addr;
95
96 for (i = 0; i < count; i++, p += 4) {
97 tmp = inl(port);
98 p[0] = tmp & 0xff;
99 p[1] = (tmp >> 8) & 0xff;
100 p[2] = (tmp >> 16) & 0xff;
101 p[3] = (tmp >> 24) & 0xff;
102
103 }
104}
105EXPORT_SYMBOL(insl);
106
107void memcpy_toio(void __iomem *to, const void *from, long count)
108{
109 unsigned char *p = (unsigned char *) from;
110
111 while (count) {
112 count--;
113 writeb(*p++, to++);
114 }
115}
116EXPORT_SYMBOL(memcpy_toio);
117
118void memcpy_fromio(void *to, void __iomem *from, long count)
119{
120 int i;
121 unsigned char *p = (unsigned char *) to;
122
123 for (i = 0; i < count; i++) {
124 p[i] = readb(from);
125 from++;
126 }
127}
128EXPORT_SYMBOL(memcpy_fromio);
diff --git a/arch/sh/lib64/iomap.c b/arch/sh/lib64/iomap.c
new file mode 100644
index 000000000000..253d1e351d49
--- /dev/null
+++ b/arch/sh/lib64/iomap.c
@@ -0,0 +1,54 @@
1/*
2 * arch/sh64/lib/iomap.c
3 *
4 * Generic sh64 iomap interface
5 *
6 * Copyright (C) 2004 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/pci.h>
13#include <asm/io.h>
14
15void __iomem *__attribute__ ((weak))
16ioport_map(unsigned long port, unsigned int len)
17{
18 return (void __iomem *)port;
19}
20EXPORT_SYMBOL(ioport_map);
21
22void ioport_unmap(void __iomem *addr)
23{
24 /* Nothing .. */
25}
26EXPORT_SYMBOL(ioport_unmap);
27
28#ifdef CONFIG_PCI
29void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
30{
31 unsigned long start = pci_resource_start(dev, bar);
32 unsigned long len = pci_resource_len(dev, bar);
33 unsigned long flags = pci_resource_flags(dev, bar);
34
35 if (!len)
36 return NULL;
37 if (max && len > max)
38 len = max;
39 if (flags & IORESOURCE_IO)
40 return ioport_map(start + pciio_virt, len);
41 if (flags & IORESOURCE_MEM)
42 return (void __iomem *)start;
43
44 /* What? */
45 return NULL;
46}
47EXPORT_SYMBOL(pci_iomap);
48
49void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
50{
51 /* Nothing .. */
52}
53EXPORT_SYMBOL(pci_iounmap);
54#endif
diff --git a/arch/sh/lib64/memcpy.c b/arch/sh/lib64/memcpy.c
new file mode 100644
index 000000000000..fba436a92bfa
--- /dev/null
+++ b/arch/sh/lib64/memcpy.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright (C) 2002 Mark Debbage (Mark.Debbage@superh.com)
3 *
4 * May be copied or modified under the terms of the GNU General Public
5 * License. See linux/COPYING for more information.
6 *
7 */
8
9#include <linux/types.h>
10#include <asm/string.h>
11
12// This is a simplistic optimization of memcpy to increase the
13// granularity of access beyond one byte using aligned
14// loads and stores. This is not an optimal implementation
15// for SH-5 (especially with regard to prefetching and the cache),
16// and a better version should be provided later ...
17
18void *memcpy(void *dest, const void *src, size_t count)
19{
20 char *d = (char *) dest, *s = (char *) src;
21
22 if (count >= 32) {
23 int i = 8 - (((unsigned long) d) & 0x7);
24
25 if (i != 8)
26 while (i-- && count--) {
27 *d++ = *s++;
28 }
29
30 if (((((unsigned long) d) & 0x7) == 0) &&
31 ((((unsigned long) s) & 0x7) == 0)) {
32 while (count >= 32) {
33 unsigned long long t1, t2, t3, t4;
34 t1 = *(unsigned long long *) (s);
35 t2 = *(unsigned long long *) (s + 8);
36 t3 = *(unsigned long long *) (s + 16);
37 t4 = *(unsigned long long *) (s + 24);
38 *(unsigned long long *) (d) = t1;
39 *(unsigned long long *) (d + 8) = t2;
40 *(unsigned long long *) (d + 16) = t3;
41 *(unsigned long long *) (d + 24) = t4;
42 d += 32;
43 s += 32;
44 count -= 32;
45 }
46 while (count >= 8) {
47 *(unsigned long long *) d =
48 *(unsigned long long *) s;
49 d += 8;
50 s += 8;
51 count -= 8;
52 }
53 }
54
55 if (((((unsigned long) d) & 0x3) == 0) &&
56 ((((unsigned long) s) & 0x3) == 0)) {
57 while (count >= 4) {
58 *(unsigned long *) d = *(unsigned long *) s;
59 d += 4;
60 s += 4;
61 count -= 4;
62 }
63 }
64
65 if (((((unsigned long) d) & 0x1) == 0) &&
66 ((((unsigned long) s) & 0x1) == 0)) {
67 while (count >= 2) {
68 *(unsigned short *) d = *(unsigned short *) s;
69 d += 2;
70 s += 2;
71 count -= 2;
72 }
73 }
74 }
75
76 while (count--) {
77 *d++ = *s++;
78 }
79
80 return d;
81}
diff --git a/arch/sh/lib64/page_clear.S b/arch/sh/lib64/page_clear.S
new file mode 100644
index 000000000000..ac0111d669a3
--- /dev/null
+++ b/arch/sh/lib64/page_clear.S
@@ -0,0 +1,54 @@
1/*
2 Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
3
4 This file is subject to the terms and conditions of the GNU General Public
5 License. See the file "COPYING" in the main directory of this archive
6 for more details.
7
8 Tight version of memset for the case of just clearing a page. It turns out
9 that having the alloco's spaced out slightly due to the increment/branch
10 pair causes them to contend less for access to the cache. Similarly,
11 keeping the stores apart from the allocos causes less contention. => Do two
12 separate loops. Do multiple stores per loop to amortise the
13 increment/branch cost a little.
14
15 Parameters:
16 r2 : source effective address (start of page)
17
18 Always clears 4096 bytes.
19
20 Note : alloco guarded by synco to avoid TAKum03020 erratum
21
22*/
23
24 .section .text..SHmedia32,"ax"
25 .little
26
27 .balign 8
28 .global sh64_page_clear
29sh64_page_clear:
30 pta/l 1f, tr1
31 pta/l 2f, tr2
32 ptabs/l r18, tr0
33
34 movi 4096, r7
35 add r2, r7, r7
36 add r2, r63, r6
371:
38 alloco r6, 0
39 synco ! TAKum03020
40 addi r6, 32, r6
41 bgt/l r7, r6, tr1
42
43 add r2, r63, r6
442:
45 st.q r6, 0, r63
46 st.q r6, 8, r63
47 st.q r6, 16, r63
48 st.q r6, 24, r63
49 addi r6, 32, r6
50 bgt/l r7, r6, tr2
51
52 blink tr0, r63
53
54
diff --git a/arch/sh/lib64/page_copy.S b/arch/sh/lib64/page_copy.S
new file mode 100644
index 000000000000..e159c3cd2582
--- /dev/null
+++ b/arch/sh/lib64/page_copy.S
@@ -0,0 +1,91 @@
1/*
2 Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
3
4 This file is subject to the terms and conditions of the GNU General Public
5 License. See the file "COPYING" in the main directory of this archive
6 for more details.
7
8 Tight version of mempy for the case of just copying a page.
9 Prefetch strategy empirically optimised against RTL simulations
10 of SH5-101 cut2 eval chip with Cayman board DDR memory.
11
12 Parameters:
13 r2 : source effective address (start of page)
14 r3 : destination effective address (start of page)
15
16 Always copies 4096 bytes.
17
18 Points to review.
19 * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
20 It seems like the prefetch needs to be at at least 4 lines ahead to get
21 the data into the cache in time, and the allocos contend with outstanding
22 prefetches for the same cache set, so it's better to have the numbers
23 different.
24 */
25
26 .section .text..SHmedia32,"ax"
27 .little
28
29 .balign 8
30 .global sh64_page_copy
31sh64_page_copy:
32
33 /* Copy 4096 bytes worth of data from r2 to r3.
34 Do prefetches 4 lines ahead.
35 Do alloco 2 lines ahead */
36
37 pta 1f, tr1
38 pta 2f, tr2
39 pta 3f, tr3
40 ptabs r18, tr0
41
42#if 0
43 /* TAKum03020 */
44 ld.q r2, 0x00, r63
45 ld.q r2, 0x20, r63
46 ld.q r2, 0x40, r63
47 ld.q r2, 0x60, r63
48#endif
49 alloco r3, 0x00
50 synco ! TAKum03020
51 alloco r3, 0x20
52 synco ! TAKum03020
53
54 movi 3968, r6
55 add r3, r6, r6
56 addi r6, 64, r7
57 addi r7, 64, r8
58 sub r2, r3, r60
59 addi r60, 8, r61
60 addi r61, 8, r62
61 addi r62, 8, r23
62 addi r60, 0x80, r22
63
64/* Minimal code size. The extra branches inside the loop don't cost much
65 because they overlap with the time spent waiting for prefetches to
66 complete. */
671:
68#if 0
69 /* TAKum03020 */
70 bge/u r3, r6, tr2 ! skip prefetch for last 4 lines
71 ldx.q r3, r22, r63 ! prefetch 4 lines hence
72#endif
732:
74 bge/u r3, r7, tr3 ! skip alloco for last 2 lines
75 alloco r3, 0x40 ! alloc destination line 2 lines ahead
76 synco ! TAKum03020
773:
78 ldx.q r3, r60, r36
79 ldx.q r3, r61, r37
80 ldx.q r3, r62, r38
81 ldx.q r3, r23, r39
82 st.q r3, 0, r36
83 st.q r3, 8, r37
84 st.q r3, 16, r38
85 st.q r3, 24, r39
86 addi r3, 32, r3
87 bgt/l r8, r3, tr1
88
89 blink tr0, r63 ! return
90
91
diff --git a/arch/sh/lib64/panic.c b/arch/sh/lib64/panic.c
new file mode 100644
index 000000000000..c9eb1cb50d97
--- /dev/null
+++ b/arch/sh/lib64/panic.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2003 Richard Curnow, SuperH UK Limited
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#include <linux/kernel.h>
10#include <asm/io.h>
11#include <asm/registers.h>
12
13/* THIS IS A PHYSICAL ADDRESS */
14#define HDSP2534_ADDR (0x04002100)
15
16#ifdef CONFIG_SH_CAYMAN
17
18static void poor_mans_delay(void)
19{
20 int i;
21 for (i = 0; i < 2500000; i++) {
22 } /* poor man's delay */
23}
24
25static void show_value(unsigned long x)
26{
27 int i;
28 unsigned nibble;
29 for (i = 0; i < 8; i++) {
30 nibble = ((x >> (i * 4)) & 0xf);
31
32 ctrl_outb(nibble + ((nibble > 9) ? 55 : 48),
33 HDSP2534_ADDR + 0xe0 + ((7 - i) << 2));
34 }
35}
36
37#endif
38
39void
40panic_handler(unsigned long panicPC, unsigned long panicSSR,
41 unsigned long panicEXPEVT)
42{
43#ifdef CONFIG_SH_CAYMAN
44 while (1) {
45 /* This piece of code displays the PC on the LED display */
46 show_value(panicPC);
47 poor_mans_delay();
48 show_value(panicSSR);
49 poor_mans_delay();
50 show_value(panicEXPEVT);
51 poor_mans_delay();
52 }
53#endif
54
55 /* Never return from the panic handler */
56 for (;;) ;
57
58}
diff --git a/arch/sh/lib64/udelay.c b/arch/sh/lib64/udelay.c
new file mode 100644
index 000000000000..327653914007
--- /dev/null
+++ b/arch/sh/lib64/udelay.c
@@ -0,0 +1,59 @@
1/*
2 * arch/sh64/lib/udelay.c
3 *
4 * Delay routines, using a pre-computed "loops_per_jiffy" value.
5 *
6 * Copyright (C) 2000, 2001 Paolo Alberelli
7 * Copyright (C) 2003, 2004 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/sched.h>
14#include <asm/param.h>
15
16extern unsigned long loops_per_jiffy;
17
18/*
19 * Use only for very small delays (< 1 msec).
20 *
21 * The active part of our cycle counter is only 32-bits wide, and
22 * we're treating the difference between two marks as signed. On
23 * a 1GHz box, that's about 2 seconds.
24 */
25
26void __delay(int loops)
27{
28 long long dummy;
29 __asm__ __volatile__("gettr tr0, %1\n\t"
30 "pta $+4, tr0\n\t"
31 "addi %0, -1, %0\n\t"
32 "bne %0, r63, tr0\n\t"
33 "ptabs %1, tr0\n\t":"=r"(loops),
34 "=r"(dummy)
35 :"0"(loops));
36}
37
38void __udelay(unsigned long long usecs, unsigned long lpj)
39{
40 usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj;
41 __delay((long long) usecs >> 32);
42}
43
44void __ndelay(unsigned long long nsecs, unsigned long lpj)
45{
46 nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj;
47 __delay((long long) nsecs >> 32);
48}
49
50void udelay(unsigned long usecs)
51{
52 __udelay(usecs, loops_per_jiffy);
53}
54
55void ndelay(unsigned long nsecs)
56{
57 __ndelay(nsecs, loops_per_jiffy);
58}
59