aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh64/kernel')
-rw-r--r--arch/sh64/kernel/Makefile36
-rw-r--r--arch/sh64/kernel/alphanum.c45
-rw-r--r--arch/sh64/kernel/asm-offsets.c33
-rw-r--r--arch/sh64/kernel/dma.c297
-rw-r--r--arch/sh64/kernel/early_printk.c105
-rw-r--r--arch/sh64/kernel/entry.S2103
-rw-r--r--arch/sh64/kernel/fpu.c170
-rw-r--r--arch/sh64/kernel/head.S373
-rw-r--r--arch/sh64/kernel/init_task.c46
-rw-r--r--arch/sh64/kernel/irq.c116
-rw-r--r--arch/sh64/kernel/irq_intc.c272
-rw-r--r--arch/sh64/kernel/led.c41
-rw-r--r--arch/sh64/kernel/module.c161
-rw-r--r--arch/sh64/kernel/pci-dma.c50
-rw-r--r--arch/sh64/kernel/pci_sh5.c541
-rw-r--r--arch/sh64/kernel/pci_sh5.h107
-rw-r--r--arch/sh64/kernel/pcibios.c168
-rw-r--r--arch/sh64/kernel/process.c962
-rw-r--r--arch/sh64/kernel/ptrace.c376
-rw-r--r--arch/sh64/kernel/semaphore.c140
-rw-r--r--arch/sh64/kernel/setup.c385
-rw-r--r--arch/sh64/kernel/sh_ksyms.c89
-rw-r--r--arch/sh64/kernel/signal.c727
-rw-r--r--arch/sh64/kernel/switchto.S198
-rw-r--r--arch/sh64/kernel/sys_sh64.c300
-rw-r--r--arch/sh64/kernel/syscalls.S345
-rw-r--r--arch/sh64/kernel/time.c610
-rw-r--r--arch/sh64/kernel/traps.c961
-rw-r--r--arch/sh64/kernel/unwind.c326
-rw-r--r--arch/sh64/kernel/vmlinux.lds.S181
30 files changed, 10264 insertions, 0 deletions
diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile
new file mode 100644
index 000000000000..5816657c079c
--- /dev/null
+++ b/arch/sh64/kernel/Makefile
@@ -0,0 +1,36 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2000, 2001 Paolo Alberelli
7# Copyright (C) 2003 Paul Mundt
8#
9# Makefile for the Linux sh64 kernel.
10#
11# Note! Dependencies are done automagically by 'make dep', which also
12# removes any old dependencies. DON'T put your own dependencies here
13# unless it's something special (ie not a .c file).
14#
15
16extra-y := head.o init_task.o vmlinux.lds
17
18obj-y := process.o signal.o entry.o traps.o irq.o irq_intc.o \
19 ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \
20 switchto.o syscalls.o
21
22obj-$(CONFIG_HEARTBEAT) += led.o
23obj-$(CONFIG_SH_ALPHANUMERIC) += alphanum.o
24obj-$(CONFIG_SH_DMA) += dma.o
25obj-$(CONFIG_SH_FPU) += fpu.o
26obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
27obj-$(CONFIG_KALLSYMS) += unwind.o
28obj-$(CONFIG_PCI) += pci-dma.o pcibios.o
29obj-$(CONFIG_MODULES) += module.o
30
31ifeq ($(CONFIG_PCI),y)
32obj-$(CONFIG_CPU_SH5) += pci_sh5.o
33endif
34
35USE_STANDARD_AS_RULE := true
36
diff --git a/arch/sh64/kernel/alphanum.c b/arch/sh64/kernel/alphanum.c
new file mode 100644
index 000000000000..56d6f9f71524
--- /dev/null
+++ b/arch/sh64/kernel/alphanum.c
@@ -0,0 +1,45 @@
1/*
2 * arch/sh64/kernel/alpanum.c
3 *
4 * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Machine-independent functions for handling 8-digit alphanumeric display
10 * (e.g. Agilent HDSP-253x)
11 */
12#include <linux/config.h>
13#include <linux/stddef.h>
14#include <linux/sched.h>
15
16void mach_alphanum(int pos, unsigned char val);
17void mach_led(int pos, int val);
18
19void print_seg(char *file, int line)
20{
21 int i;
22 unsigned int nibble;
23
24 for (i = 0; i < 5; i++) {
25 mach_alphanum(i, file[i]);
26 }
27
28 for (i = 0; i < 3; i++) {
29 nibble = ((line >> (i * 4)) & 0xf);
30 mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
31 }
32}
33
34void print_seg_num(unsigned num)
35{
36 int i;
37 unsigned int nibble;
38
39 for (i = 0; i < 8; i++) {
40 nibble = ((num >> (i * 4)) & 0xf);
41
42 mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
43 }
44}
45
diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c
new file mode 100644
index 000000000000..ca76537c16c0
--- /dev/null
+++ b/arch/sh64/kernel/asm-offsets.c
@@ -0,0 +1,33 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#include <linux/stddef.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <asm/thread_info.h>
15
16#define DEFINE(sym, val) \
17 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
18
19#define BLANK() asm volatile("\n->" : : )
20
21int main(void)
22{
23 /* offsets into the thread_info struct */
24 DEFINE(TI_TASK, offsetof(struct thread_info, task));
25 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
26 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
27 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
28 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
29 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
30 DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
31
32 return 0;
33}
diff --git a/arch/sh64/kernel/dma.c b/arch/sh64/kernel/dma.c
new file mode 100644
index 000000000000..09cd9f4670b5
--- /dev/null
+++ b/arch/sh64/kernel/dma.c
@@ -0,0 +1,297 @@
1/*
2 * arch/sh64/kernel/dma.c
3 *
4 * DMA routines for the SH-5 DMAC.
5 *
6 * Copyright (C) 2003 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/types.h>
16#include <linux/irq.h>
17#include <linux/spinlock.h>
18#include <linux/mm.h>
19#include <asm/hardware.h>
20#include <asm/dma.h>
21#include <asm/signal.h>
22#include <asm/errno.h>
23#include <asm/io.h>
24
25typedef struct {
26 unsigned long dev_addr;
27 unsigned long mem_addr;
28
29 unsigned int mode;
30 unsigned int count;
31} dma_info_t;
32
33static dma_info_t dma_info[MAX_DMA_CHANNELS];
34static DEFINE_SPINLOCK(dma_spin_lock);
35
36/* arch/sh64/kernel/irq_intc.c */
37extern void make_intc_irq(unsigned int irq);
38
39/* DMAC Interrupts */
40#define DMA_IRQ_DMTE0 18
41#define DMA_IRQ_DERR 22
42
43#define DMAC_COMMON_BASE (dmac_base + 0x08)
44#define DMAC_SAR_BASE (dmac_base + 0x10)
45#define DMAC_DAR_BASE (dmac_base + 0x18)
46#define DMAC_COUNT_BASE (dmac_base + 0x20)
47#define DMAC_CTRL_BASE (dmac_base + 0x28)
48#define DMAC_STATUS_BASE (dmac_base + 0x30)
49
50#define DMAC_SAR(n) (DMAC_SAR_BASE + ((n) * 0x28))
51#define DMAC_DAR(n) (DMAC_DAR_BASE + ((n) * 0x28))
52#define DMAC_COUNT(n) (DMAC_COUNT_BASE + ((n) * 0x28))
53#define DMAC_CTRL(n) (DMAC_CTRL_BASE + ((n) * 0x28))
54#define DMAC_STATUS(n) (DMAC_STATUS_BASE + ((n) * 0x28))
55
56/* DMAC.COMMON Bit Definitions */
57#define DMAC_COMMON_PR 0x00000001 /* Priority */
58 /* Bits 1-2 Reserved */
59#define DMAC_COMMON_ME 0x00000008 /* Master Enable */
60#define DMAC_COMMON_NMI 0x00000010 /* NMI Flag */
61 /* Bits 5-6 Reserved */
62#define DMAC_COMMON_ER 0x00000780 /* Error Response */
63#define DMAC_COMMON_AAE 0x00007800 /* Address Alignment Error */
64 /* Bits 15-63 Reserved */
65
66/* DMAC.SAR Bit Definitions */
67#define DMAC_SAR_ADDR 0xffffffff /* Source Address */
68
69/* DMAC.DAR Bit Definitions */
70#define DMAC_DAR_ADDR 0xffffffff /* Destination Address */
71
72/* DMAC.COUNT Bit Definitions */
73#define DMAC_COUNT_CNT 0xffffffff /* Transfer Count */
74
75/* DMAC.CTRL Bit Definitions */
76#define DMAC_CTRL_TS 0x00000007 /* Transfer Size */
77#define DMAC_CTRL_SI 0x00000018 /* Source Increment */
78#define DMAC_CTRL_DI 0x00000060 /* Destination Increment */
79#define DMAC_CTRL_RS 0x00000780 /* Resource Select */
80#define DMAC_CTRL_IE 0x00000800 /* Interrupt Enable */
81#define DMAC_CTRL_TE 0x00001000 /* Transfer Enable */
82 /* Bits 15-63 Reserved */
83
84/* DMAC.STATUS Bit Definitions */
85#define DMAC_STATUS_TE 0x00000001 /* Transfer End */
86#define DMAC_STATUS_AAE 0x00000002 /* Address Alignment Error */
87 /* Bits 2-63 Reserved */
88
89static unsigned long dmac_base;
90
91void set_dma_count(unsigned int chan, unsigned int count);
92void set_dma_addr(unsigned int chan, unsigned int addr);
93
94static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs)
95{
96 unsigned int chan = irq - DMA_IRQ_DMTE0;
97 dma_info_t *info = dma_info + chan;
98 u64 status;
99
100 if (info->mode & DMA_MODE_WRITE) {
101 sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan));
102 } else {
103 sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan));
104 }
105
106 set_dma_count(chan, info->count);
107
108 /* Clear the TE bit */
109 status = sh64_in64(DMAC_STATUS(chan));
110 status &= ~DMAC_STATUS_TE;
111 sh64_out64(status, DMAC_STATUS(chan));
112
113 return IRQ_HANDLED;
114}
115
116static struct irqaction irq_dmte = {
117 .handler = dma_mte,
118 .flags = SA_INTERRUPT,
119 .name = "DMA MTE",
120};
121
122static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
123{
124 u64 tmp;
125 u8 chan;
126
127 printk(KERN_NOTICE "DMAC: Got a DMA Error!\n");
128
129 tmp = sh64_in64(DMAC_COMMON_BASE);
130
131 /* Check for the type of error */
132 if ((chan = tmp & DMAC_COMMON_AAE)) {
133 /* It's an address alignment error.. */
134 printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan);
135
136 printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n",
137 (sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR),
138 (sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR),
139 (sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT));
140
141 } else if ((chan = tmp & DMAC_COMMON_ER)) {
142 /* Something else went wrong.. */
143 printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan);
144 }
145
146 /* Reset the ME bit to clear the interrupt */
147 tmp |= DMAC_COMMON_ME;
148 sh64_out64(tmp, DMAC_COMMON_BASE);
149
150 return IRQ_HANDLED;
151}
152
153static struct irqaction irq_derr = {
154 .handler = dma_err,
155 .flags = SA_INTERRUPT,
156 .name = "DMA Error",
157};
158
159static inline unsigned long calc_xmit_shift(unsigned int chan)
160{
161 return sh64_in64(DMAC_CTRL(chan)) & 0x03;
162}
163
164void setup_dma(unsigned int chan, dma_info_t *info)
165{
166 unsigned int irq = DMA_IRQ_DMTE0 + chan;
167 dma_info_t *dma = dma_info + chan;
168
169 make_intc_irq(irq);
170 setup_irq(irq, &irq_dmte);
171 dma = info;
172}
173
174void enable_dma(unsigned int chan)
175{
176 u64 ctrl;
177
178 ctrl = sh64_in64(DMAC_CTRL(chan));
179 ctrl |= DMAC_CTRL_TE;
180 sh64_out64(ctrl, DMAC_CTRL(chan));
181}
182
183void disable_dma(unsigned int chan)
184{
185 u64 ctrl;
186
187 ctrl = sh64_in64(DMAC_CTRL(chan));
188 ctrl &= ~DMAC_CTRL_TE;
189 sh64_out64(ctrl, DMAC_CTRL(chan));
190}
191
192void set_dma_mode(unsigned int chan, char mode)
193{
194 dma_info_t *info = dma_info + chan;
195
196 info->mode = mode;
197
198 set_dma_addr(chan, info->mem_addr);
199 set_dma_count(chan, info->count);
200}
201
202void set_dma_addr(unsigned int chan, unsigned int addr)
203{
204 dma_info_t *info = dma_info + chan;
205 unsigned long sar, dar;
206
207 info->mem_addr = addr;
208 sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr;
209 dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr;
210
211 sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan));
212 sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan));
213}
214
215void set_dma_count(unsigned int chan, unsigned int count)
216{
217 dma_info_t *info = dma_info + chan;
218 u64 tmp;
219
220 info->count = count;
221
222 tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT;
223
224 sh64_out64(tmp, DMAC_COUNT(chan));
225}
226
227unsigned long claim_dma_lock(void)
228{
229 unsigned long flags;
230
231 spin_lock_irqsave(&dma_spin_lock, flags);
232
233 return flags;
234}
235
236void release_dma_lock(unsigned long flags)
237{
238 spin_unlock_irqrestore(&dma_spin_lock, flags);
239}
240
241int get_dma_residue(unsigned int chan)
242{
243 return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan));
244}
245
246int __init init_dma(void)
247{
248 struct vcr_info vcr;
249 u64 tmp;
250
251 /* Remap the DMAC */
252 dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC");
253 if (!dmac_base) {
254 printk(KERN_ERR "Unable to remap DMAC\n");
255 return -ENOMEM;
256 }
257
258 /* Report DMAC.VCR Info */
259 vcr = sh64_get_vcr_info(dmac_base);
260 printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n",
261 vcr.mod_id, vcr.mod_vers);
262
263 /* Set the ME bit */
264 tmp = sh64_in64(DMAC_COMMON_BASE);
265 tmp |= DMAC_COMMON_ME;
266 sh64_out64(tmp, DMAC_COMMON_BASE);
267
268 /* Enable the DMAC Error Interrupt */
269 make_intc_irq(DMA_IRQ_DERR);
270 setup_irq(DMA_IRQ_DERR, &irq_derr);
271
272 return 0;
273}
274
275static void __exit exit_dma(void)
276{
277 onchip_unmap(dmac_base);
278 free_irq(DMA_IRQ_DERR, 0);
279}
280
281module_init(init_dma);
282module_exit(exit_dma);
283
284MODULE_AUTHOR("Paul Mundt");
285MODULE_DESCRIPTION("DMA API for SH-5 DMAC");
286MODULE_LICENSE("GPL");
287
288EXPORT_SYMBOL(setup_dma);
289EXPORT_SYMBOL(claim_dma_lock);
290EXPORT_SYMBOL(release_dma_lock);
291EXPORT_SYMBOL(enable_dma);
292EXPORT_SYMBOL(disable_dma);
293EXPORT_SYMBOL(set_dma_mode);
294EXPORT_SYMBOL(set_dma_addr);
295EXPORT_SYMBOL(set_dma_count);
296EXPORT_SYMBOL(get_dma_residue);
297
diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c
new file mode 100644
index 000000000000..8c8a76e180aa
--- /dev/null
+++ b/arch/sh64/kernel/early_printk.c
@@ -0,0 +1,105 @@
1/*
2 * arch/sh64/kernel/early_printk.c
3 *
4 * SH-5 Early SCIF console (cloned and hacked from sh implementation)
5 *
6 * Copyright (C) 2003, 2004 Paul Mundt <lethal@linux-sh.org>
7 * Copyright (C) 2002 M. R. Brown <mrbrown@0xd6.org>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/console.h>
14#include <linux/tty.h>
15#include <linux/init.h>
16#include <asm/io.h>
17#include <asm/hardware.h>
18
19#define SCIF_BASE_ADDR 0x01030000
20#define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
21
22/*
23 * Fixed virtual address where SCIF is mapped (should already be done
24 * in arch/sh64/kernel/head.S!).
25 */
26#define SCIF_REG 0xfa030000
27
28enum {
29 SCIF_SCSMR2 = SCIF_REG + 0x00,
30 SCIF_SCBRR2 = SCIF_REG + 0x04,
31 SCIF_SCSCR2 = SCIF_REG + 0x08,
32 SCIF_SCFTDR2 = SCIF_REG + 0x0c,
33 SCIF_SCFSR2 = SCIF_REG + 0x10,
34 SCIF_SCFRDR2 = SCIF_REG + 0x14,
35 SCIF_SCFCR2 = SCIF_REG + 0x18,
36 SCIF_SCFDR2 = SCIF_REG + 0x1c,
37 SCIF_SCSPTR2 = SCIF_REG + 0x20,
38 SCIF_SCLSR2 = SCIF_REG + 0x24,
39};
40
41static void sh_console_putc(int c)
42{
43 while (!(ctrl_inw(SCIF_SCFSR2) & 0x20))
44 cpu_relax();
45
46 ctrl_outb(c, SCIF_SCFTDR2);
47 ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2);
48
49 if (c == '\n')
50 sh_console_putc('\r');
51}
52
53static void sh_console_flush(void)
54{
55 ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
56
57 while (!(ctrl_inw(SCIF_SCFSR2) & 0x40))
58 cpu_relax();
59
60 ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
61}
62
63static void sh_console_write(struct console *con, const char *s, unsigned count)
64{
65 while (count-- > 0)
66 sh_console_putc(*s++);
67
68 sh_console_flush();
69}
70
71static int __init sh_console_setup(struct console *con, char *options)
72{
73 con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8;
74
75 return 0;
76}
77
78static struct console sh_console = {
79 .name = "scifcon",
80 .write = sh_console_write,
81 .setup = sh_console_setup,
82 .flags = CON_PRINTBUFFER,
83 .index = -1,
84};
85
86void __init enable_early_printk(void)
87{
88 ctrl_outb(0x2a, SCIF_SCBRR2); /* 19200bps */
89
90 ctrl_outw(0x04, SCIF_SCFCR2); /* Reset TFRST */
91 ctrl_outw(0x10, SCIF_SCFCR2); /* TTRG0=1 */
92
93 ctrl_outw(0, SCIF_SCSPTR2);
94 ctrl_outw(0x60, SCIF_SCFSR2);
95 ctrl_outw(0, SCIF_SCLSR2);
96 ctrl_outw(0x30, SCIF_SCSCR2);
97
98 register_console(&sh_console);
99}
100
101void disable_early_printk(void)
102{
103 unregister_console(&sh_console);
104}
105
diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S
new file mode 100644
index 000000000000..2e2cfe20b426
--- /dev/null
+++ b/arch/sh64/kernel/entry.S
@@ -0,0 +1,2103 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/entry.S
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2004, 2005 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/errno.h>
16#include <linux/sys.h>
17
18#include <asm/processor.h>
19#include <asm/registers.h>
20#include <asm/unistd.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23
24/*
25 * SR fields.
26 */
27#define SR_ASID_MASK 0x00ff0000
28#define SR_FD_MASK 0x00008000
29#define SR_SS 0x08000000
30#define SR_BL 0x10000000
31#define SR_MD 0x40000000
32
33/*
34 * Event code.
35 */
36#define EVENT_INTERRUPT 0
37#define EVENT_FAULT_TLB 1
38#define EVENT_FAULT_NOT_TLB 2
39#define EVENT_DEBUG 3
40
41/* EXPEVT values */
42#define RESET_CAUSE 0x20
43#define DEBUGSS_CAUSE 0x980
44
45/*
46 * Frame layout. Quad index.
47 */
48#define FRAME_T(x) FRAME_TBASE+(x*8)
49#define FRAME_R(x) FRAME_RBASE+(x*8)
50#define FRAME_S(x) FRAME_SBASE+(x*8)
51#define FSPC 0
52#define FSSR 1
53#define FSYSCALL_ID 2
54
55/* Arrange the save frame to be a multiple of 32 bytes long */
56#define FRAME_SBASE 0
57#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
58#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
59#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
60#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
61
62#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
63#define FP_FRAME_BASE 0
64
65#define SAVED_R2 0*8
66#define SAVED_R3 1*8
67#define SAVED_R4 2*8
68#define SAVED_R5 3*8
69#define SAVED_R18 4*8
70#define SAVED_R6 5*8
71#define SAVED_TR0 6*8
72
73/* These are the registers saved in the TLB path that aren't saved in the first
74 level of the normal one. */
75#define TLB_SAVED_R25 7*8
76#define TLB_SAVED_TR1 8*8
77#define TLB_SAVED_TR2 9*8
78#define TLB_SAVED_TR3 10*8
79#define TLB_SAVED_TR4 11*8
80/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
81 breakage otherwise. */
82#define TLB_SAVED_R0 12*8
83#define TLB_SAVED_R1 13*8
84
85#define CLI() \
86 getcon SR, r6; \
87 ori r6, 0xf0, r6; \
88 putcon r6, SR;
89
90#define STI() \
91 getcon SR, r6; \
92 andi r6, ~0xf0, r6; \
93 putcon r6, SR;
94
95#ifdef CONFIG_PREEMPT
96# define preempt_stop() CLI()
97#else
98# define preempt_stop()
99# define resume_kernel restore_all
100#endif
101
102 .section .data, "aw"
103
104#define FAST_TLBMISS_STACK_CACHELINES 4
105#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
106
107/* Register back-up area for all exceptions */
108 .balign 32
109 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
110 * register saves etc. */
111 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
112/* This is 32 byte aligned by construction */
113/* Register back-up area for all exceptions */
114reg_save_area:
115 .quad 0
116 .quad 0
117 .quad 0
118 .quad 0
119
120 .quad 0
121 .quad 0
122 .quad 0
123 .quad 0
124
125 .quad 0
126 .quad 0
127 .quad 0
128 .quad 0
129
130 .quad 0
131 .quad 0
132
133/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
134 * reentrancy. Note this area may be accessed via physical address.
135 * Align so this fits a whole single cache line, for ease of purging.
136 */
137 .balign 32,0,32
138resvec_save_area:
139 .quad 0
140 .quad 0
141 .quad 0
142 .quad 0
143 .quad 0
144 .balign 32,0,32
145
146/* Jump table of 3rd level handlers */
147trap_jtable:
148 .long do_exception_error /* 0x000 */
149 .long do_exception_error /* 0x020 */
150 .long tlb_miss_load /* 0x040 */
151 .long tlb_miss_store /* 0x060 */
152 ! ARTIFICIAL pseudo-EXPEVT setting
153 .long do_debug_interrupt /* 0x080 */
154 .long tlb_miss_load /* 0x0A0 */
155 .long tlb_miss_store /* 0x0C0 */
156 .long do_address_error_load /* 0x0E0 */
157 .long do_address_error_store /* 0x100 */
158#ifdef CONFIG_SH_FPU
159 .long do_fpu_error /* 0x120 */
160#else
161 .long do_exception_error /* 0x120 */
162#endif
163 .long do_exception_error /* 0x140 */
164 .long system_call /* 0x160 */
165 .long do_reserved_inst /* 0x180 */
166 .long do_illegal_slot_inst /* 0x1A0 */
167 .long do_NMI /* 0x1C0 */
168 .long do_exception_error /* 0x1E0 */
169 .rept 15
170 .long do_IRQ /* 0x200 - 0x3C0 */
171 .endr
172 .long do_exception_error /* 0x3E0 */
173 .rept 32
174 .long do_IRQ /* 0x400 - 0x7E0 */
175 .endr
176 .long fpu_error_or_IRQA /* 0x800 */
177 .long fpu_error_or_IRQB /* 0x820 */
178 .long do_IRQ /* 0x840 */
179 .long do_IRQ /* 0x860 */
180 .rept 6
181 .long do_exception_error /* 0x880 - 0x920 */
182 .endr
183 .long do_software_break_point /* 0x940 */
184 .long do_exception_error /* 0x960 */
185 .long do_single_step /* 0x980 */
186
187 .rept 3
188 .long do_exception_error /* 0x9A0 - 0x9E0 */
189 .endr
190 .long do_IRQ /* 0xA00 */
191 .long do_IRQ /* 0xA20 */
192 .long itlb_miss_or_IRQ /* 0xA40 */
193 .long do_IRQ /* 0xA60 */
194 .long do_IRQ /* 0xA80 */
195 .long itlb_miss_or_IRQ /* 0xAA0 */
196 .long do_exception_error /* 0xAC0 */
197 .long do_address_error_exec /* 0xAE0 */
198 .rept 8
199 .long do_exception_error /* 0xB00 - 0xBE0 */
200 .endr
201 .rept 18
202 .long do_IRQ /* 0xC00 - 0xE20 */
203 .endr
204
205 .section .text64, "ax"
206
207/*
208 * --- Exception/Interrupt/Event Handling Section
209 */
210
211/*
212 * VBR and RESVEC blocks.
213 *
214 * First level handler for VBR-based exceptions.
215 *
216 * To avoid waste of space, align to the maximum text block size.
217 * This is assumed to be at most 128 bytes or 32 instructions.
218 * DO NOT EXCEED 32 instructions on the first level handlers !
219 *
220 * Also note that RESVEC is contained within the VBR block
221 * where the room left (1KB - TEXT_SIZE) allows placing
222 * the RESVEC block (at most 512B + TEXT_SIZE).
223 *
224 * So first (and only) level handler for RESVEC-based exceptions.
225 *
226 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
227 * and interrupt) we are a lot tight with register space until
228 * saving onto the stack frame, which is done in handle_exception().
229 *
230 */
231
232#define TEXT_SIZE 128
233#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
234
235 .balign TEXT_SIZE
236LVBR_block:
237 .space 256, 0 /* Power-on class handler, */
238 /* not required here */
239not_a_tlb_miss:
240 synco /* TAKum03020 (but probably a good idea anyway.) */
241 /* Save original stack pointer into KCR1 */
242 putcon SP, KCR1
243
244 /* Save other original registers into reg_save_area */
245 movi reg_save_area, SP
246 st.q SP, SAVED_R2, r2
247 st.q SP, SAVED_R3, r3
248 st.q SP, SAVED_R4, r4
249 st.q SP, SAVED_R5, r5
250 st.q SP, SAVED_R6, r6
251 st.q SP, SAVED_R18, r18
252 gettr tr0, r3
253 st.q SP, SAVED_TR0, r3
254
255 /* Set args for Non-debug, Not a TLB miss class handler */
256 getcon EXPEVT, r2
257 movi ret_from_exception, r3
258 ori r3, 1, r3
259 movi EVENT_FAULT_NOT_TLB, r4
260 or SP, ZERO, r5
261 getcon KCR1, SP
262 pta handle_exception, tr0
263 blink tr0, ZERO
264
265 .balign 256
266 ! VBR+0x200
267 nop
268 .balign 256
269 ! VBR+0x300
270 nop
271 .balign 256
272 /*
273 * Instead of the natural .balign 1024 place RESVEC here
274 * respecting the final 1KB alignment.
275 */
276 .balign TEXT_SIZE
277 /*
278 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
279 * block making sure the final alignment is correct.
280 */
281tlb_miss:
282 synco /* TAKum03020 (but probably a good idea anyway.) */
283 putcon SP, KCR1
284 movi reg_save_area, SP
285 /* SP is guaranteed 32-byte aligned. */
286 st.q SP, TLB_SAVED_R0 , r0
287 st.q SP, TLB_SAVED_R1 , r1
288 st.q SP, SAVED_R2 , r2
289 st.q SP, SAVED_R3 , r3
290 st.q SP, SAVED_R4 , r4
291 st.q SP, SAVED_R5 , r5
292 st.q SP, SAVED_R6 , r6
293 st.q SP, SAVED_R18, r18
294
295 /* Save R25 for safety; as/ld may want to use it to achieve the call to
296 * the code in mm/tlbmiss.c */
297 st.q SP, TLB_SAVED_R25, r25
298 gettr tr0, r2
299 gettr tr1, r3
300 gettr tr2, r4
301 gettr tr3, r5
302 gettr tr4, r18
303 st.q SP, SAVED_TR0 , r2
304 st.q SP, TLB_SAVED_TR1 , r3
305 st.q SP, TLB_SAVED_TR2 , r4
306 st.q SP, TLB_SAVED_TR3 , r5
307 st.q SP, TLB_SAVED_TR4 , r18
308
309 pt do_fast_page_fault, tr0
310 getcon SSR, r2
311 getcon EXPEVT, r3
312 getcon TEA, r4
313 shlri r2, 30, r2
314 andi r2, 1, r2 /* r2 = SSR.MD */
315 blink tr0, LINK
316
317 pt fixup_to_invoke_general_handler, tr1
318
319 /* If the fast path handler fixed the fault, just drop through quickly
320 to the restore code right away to return to the excepting context.
321 */
322 beqi/u r2, 0, tr1
323
324fast_tlb_miss_restore:
325 ld.q SP, SAVED_TR0, r2
326 ld.q SP, TLB_SAVED_TR1, r3
327 ld.q SP, TLB_SAVED_TR2, r4
328
329 ld.q SP, TLB_SAVED_TR3, r5
330 ld.q SP, TLB_SAVED_TR4, r18
331
332 ptabs r2, tr0
333 ptabs r3, tr1
334 ptabs r4, tr2
335 ptabs r5, tr3
336 ptabs r18, tr4
337
338 ld.q SP, TLB_SAVED_R0, r0
339 ld.q SP, TLB_SAVED_R1, r1
340 ld.q SP, SAVED_R2, r2
341 ld.q SP, SAVED_R3, r3
342 ld.q SP, SAVED_R4, r4
343 ld.q SP, SAVED_R5, r5
344 ld.q SP, SAVED_R6, r6
345 ld.q SP, SAVED_R18, r18
346 ld.q SP, TLB_SAVED_R25, r25
347
348 getcon KCR1, SP
349 rte
350 nop /* for safety, in case the code is run on sh5-101 cut1.x */
351
352fixup_to_invoke_general_handler:
353
354 /* OK, new method. Restore stuff that's not expected to get saved into
355 the 'first-level' reg save area, then just fall through to setting
356 up the registers and calling the second-level handler. */
357
358 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
359 r25,tr1-4 and save r6 to get into the right state. */
360
361 ld.q SP, TLB_SAVED_TR1, r3
362 ld.q SP, TLB_SAVED_TR2, r4
363 ld.q SP, TLB_SAVED_TR3, r5
364 ld.q SP, TLB_SAVED_TR4, r18
365 ld.q SP, TLB_SAVED_R25, r25
366
367 ld.q SP, TLB_SAVED_R0, r0
368 ld.q SP, TLB_SAVED_R1, r1
369
370 ptabs/u r3, tr1
371 ptabs/u r4, tr2
372 ptabs/u r5, tr3
373 ptabs/u r18, tr4
374
375 /* Set args for Non-debug, TLB miss class handler */
376 getcon EXPEVT, r2
377 movi ret_from_exception, r3
378 ori r3, 1, r3
379 movi EVENT_FAULT_TLB, r4
380 or SP, ZERO, r5
381 getcon KCR1, SP
382 pta handle_exception, tr0
383 blink tr0, ZERO
384
385/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
386 DOES END UP AT VBR+0x600 */
387 nop
388 nop
389 nop
390 nop
391 nop
392 nop
393
394 .balign 256
395 /* VBR + 0x600 */
396
397interrupt:
398 synco /* TAKum03020 (but probably a good idea anyway.) */
399 /* Save original stack pointer into KCR1 */
400 putcon SP, KCR1
401
402 /* Save other original registers into reg_save_area */
403 movi reg_save_area, SP
404 st.q SP, SAVED_R2, r2
405 st.q SP, SAVED_R3, r3
406 st.q SP, SAVED_R4, r4
407 st.q SP, SAVED_R5, r5
408 st.q SP, SAVED_R6, r6
409 st.q SP, SAVED_R18, r18
410 gettr tr0, r3
411 st.q SP, SAVED_TR0, r3
412
413 /* Set args for interrupt class handler */
414 getcon INTEVT, r2
415 movi ret_from_irq, r3
416 ori r3, 1, r3
417 movi EVENT_INTERRUPT, r4
418 or SP, ZERO, r5
419 getcon KCR1, SP
420 pta handle_exception, tr0
421 blink tr0, ZERO
422 .balign TEXT_SIZE /* let's waste the bare minimum */
423
424LVBR_block_end: /* Marker. Used for total checking */
425
426 .balign 256
427LRESVEC_block:
428 /* Panic handler. Called with MMU off. Possible causes/actions:
429 * - Reset: Jump to program start.
430 * - Single Step: Turn off Single Step & return.
431 * - Others: Call panic handler, passing PC as arg.
432 * (this may need to be extended...)
433 */
434reset_or_panic:
435 synco /* TAKum03020 (but probably a good idea anyway.) */
436 putcon SP, DCR
437 /* First save r0-1 and tr0, as we need to use these */
438 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
439 st.q SP, 0, r0
440 st.q SP, 8, r1
441 gettr tr0, r0
442 st.q SP, 32, r0
443
444 /* Check cause */
445 getcon EXPEVT, r0
446 movi RESET_CAUSE, r1
447 sub r1, r0, r1 /* r1=0 if reset */
448 movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0
449 ori r0, 1, r0
450 ptabs r0, tr0
451 beqi r1, 0, tr0 /* Jump to start address if reset */
452
453 getcon EXPEVT, r0
454 movi DEBUGSS_CAUSE, r1
455 sub r1, r0, r1 /* r1=0 if single step */
456 pta single_step_panic, tr0
457 beqi r1, 0, tr0 /* jump if single step */
458
459 /* Now jump to where we save the registers. */
460 movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1
461 ptabs r1, tr0
462 blink tr0, r63
463
464single_step_panic:
465 /* We are in a handler with Single Step set. We need to resume the
466 * handler, by turning on MMU & turning off Single Step. */
467 getcon SSR, r0
468 movi SR_MMU, r1
469 or r0, r1, r0
470 movi ~SR_SS, r1
471 and r0, r1, r0
472 putcon r0, SSR
473 /* Restore EXPEVT, as the rte won't do this */
474 getcon PEXPEVT, r0
475 putcon r0, EXPEVT
476 /* Restore regs */
477 ld.q SP, 32, r0
478 ptabs r0, tr0
479 ld.q SP, 0, r0
480 ld.q SP, 8, r1
481 getcon DCR, SP
482 synco
483 rte
484
485
486 .balign 256
487debug_exception:
488 synco /* TAKum03020 (but probably a good idea anyway.) */
489 /*
490 * Single step/software_break_point first level handler.
491 * Called with MMU off, so the first thing we do is enable it
492 * by doing an rte with appropriate SSR.
493 */
494 putcon SP, DCR
495 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
496 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
497
498 /* With the MMU off, we are bypassing the cache, so purge any
499 * data that will be made stale by the following stores.
500 */
501 ocbp SP, 0
502 synco
503
504 st.q SP, 0, r0
505 st.q SP, 8, r1
506 getcon SPC, r0
507 st.q SP, 16, r0
508 getcon SSR, r0
509 st.q SP, 24, r0
510
511 /* Enable MMU, block exceptions, set priv mode, disable single step */
512 movi SR_MMU | SR_BL | SR_MD, r1
513 or r0, r1, r0
514 movi ~SR_SS, r1
515 and r0, r1, r0
516 putcon r0, SSR
517 /* Force control to debug_exception_2 when rte is executed */
518 movi debug_exeception_2, r0
519 ori r0, 1, r0 /* force SHmedia, just in case */
520 putcon r0, SPC
521 getcon DCR, SP
522 synco
523 rte
524debug_exeception_2:
525 /* Restore saved regs */
526 putcon SP, KCR1
527 movi resvec_save_area, SP
528 ld.q SP, 24, r0
529 putcon r0, SSR
530 ld.q SP, 16, r0
531 putcon r0, SPC
532 ld.q SP, 0, r0
533 ld.q SP, 8, r1
534
535 /* Save other original registers into reg_save_area */
536 movi reg_save_area, SP
537 st.q SP, SAVED_R2, r2
538 st.q SP, SAVED_R3, r3
539 st.q SP, SAVED_R4, r4
540 st.q SP, SAVED_R5, r5
541 st.q SP, SAVED_R6, r6
542 st.q SP, SAVED_R18, r18
543 gettr tr0, r3
544 st.q SP, SAVED_TR0, r3
545
546 /* Set args for debug class handler */
547 getcon EXPEVT, r2
548 movi ret_from_exception, r3
549 ori r3, 1, r3
550 movi EVENT_DEBUG, r4
551 or SP, ZERO, r5
552 getcon KCR1, SP
553 pta handle_exception, tr0
554 blink tr0, ZERO
555
556 .balign 256
557debug_interrupt:
558 /* !!! WE COME HERE IN REAL MODE !!! */
559 /* Hook-up debug interrupt to allow various debugging options to be
560 * hooked into its handler. */
561 /* Save original stack pointer into KCR1 */
562 synco
563 putcon SP, KCR1
564 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
565 ocbp SP, 0
566 ocbp SP, 32
567 synco
568
569 /* Save other original registers into reg_save_area thru real addresses */
570 st.q SP, SAVED_R2, r2
571 st.q SP, SAVED_R3, r3
572 st.q SP, SAVED_R4, r4
573 st.q SP, SAVED_R5, r5
574 st.q SP, SAVED_R6, r6
575 st.q SP, SAVED_R18, r18
576 gettr tr0, r3
577 st.q SP, SAVED_TR0, r3
578
579 /* move (spc,ssr)->(pspc,pssr). The rte will shift
580 them back again, so that they look like the originals
581 as far as the real handler code is concerned. */
582 getcon spc, r6
583 putcon r6, pspc
584 getcon ssr, r6
585 putcon r6, pssr
586
587 ! construct useful SR for handle_exception
588 movi 3, r6
589 shlli r6, 30, r6
590 getcon sr, r18
591 or r18, r6, r6
592 putcon r6, ssr
593
594 ! SSR is now the current SR with the MD and MMU bits set
595 ! i.e. the rte will switch back to priv mode and put
596 ! the mmu back on
597
598 ! construct spc
599 movi handle_exception, r18
600 ori r18, 1, r18 ! for safety (do we need this?)
601 putcon r18, spc
602
603 /* Set args for Non-debug, Not a TLB miss class handler */
604
605 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
606 ! debug interrupt handler in the vectoring table
607 movi 0x80, r2
608 movi ret_from_exception, r3
609 ori r3, 1, r3
610 movi EVENT_FAULT_NOT_TLB, r4
611
612 or SP, ZERO, r5
613 movi CONFIG_CACHED_MEMORY_OFFSET, r6
614 add r6, r5, r5
615 getcon KCR1, SP
616
617 synco ! for safety
618 rte ! -> handle_exception, switch back to priv mode again
619
620LRESVEC_block_end: /* Marker. Unused. */
621
622 .balign TEXT_SIZE
623
624/*
625 * Second level handler for VBR-based exceptions. Pre-handler.
626 * In common to all stack-frame sensitive handlers.
627 *
628 * Inputs:
629 * (KCR0) Current [current task union]
630 * (KCR1) Original SP
631 * (r2) INTEVT/EXPEVT
632 * (r3) appropriate return address
633 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
634 * (r5) Pointer to reg_save_area
635 * (SP) Original SP
636 *
637 * Available registers:
638 * (r6)
639 * (r18)
640 * (tr0)
641 *
642 */
643handle_exception:
644 /* Common 2nd level handler. */
645
646 /* First thing we need an appropriate stack pointer */
647 getcon SSR, r6
648 shlri r6, 30, r6
649 andi r6, 1, r6
650 pta stack_ok, tr0
651 bne r6, ZERO, tr0 /* Original stack pointer is fine */
652
653 /* Set stack pointer for user fault */
654 getcon KCR0, SP
655 movi THREAD_SIZE, r6 /* Point to the end */
656 add SP, r6, SP
657
658stack_ok:
659
660/* DEBUG : check for underflow/overflow of the kernel stack */
661 pta no_underflow, tr0
662 getcon KCR0, r6
663 movi 1024, r18
664 add r6, r18, r6
665 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
666
667/* Just panic to cause a crash. */
668bad_sp:
669 ld.b r63, 0, r6
670 nop
671
672no_underflow:
673 pta bad_sp, tr0
674 getcon kcr0, r6
675 movi THREAD_SIZE, r18
676 add r18, r6, r6
677 bgt SP, r6, tr0 ! sp above the stack
678
679 /* Make some room for the BASIC frame. */
680 movi -(FRAME_SIZE), r6
681 add SP, r6, SP
682
683/* Could do this with no stalling if we had another spare register, but the
684 code below will be OK. */
685 ld.q r5, SAVED_R2, r6
686 ld.q r5, SAVED_R3, r18
687 st.q SP, FRAME_R(2), r6
688 ld.q r5, SAVED_R4, r6
689 st.q SP, FRAME_R(3), r18
690 ld.q r5, SAVED_R5, r18
691 st.q SP, FRAME_R(4), r6
692 ld.q r5, SAVED_R6, r6
693 st.q SP, FRAME_R(5), r18
694 ld.q r5, SAVED_R18, r18
695 st.q SP, FRAME_R(6), r6
696 ld.q r5, SAVED_TR0, r6
697 st.q SP, FRAME_R(18), r18
698 st.q SP, FRAME_T(0), r6
699
700 /* Keep old SP around */
701 getcon KCR1, r6
702
703 /* Save the rest of the general purpose registers */
704 st.q SP, FRAME_R(0), r0
705 st.q SP, FRAME_R(1), r1
706 st.q SP, FRAME_R(7), r7
707 st.q SP, FRAME_R(8), r8
708 st.q SP, FRAME_R(9), r9
709 st.q SP, FRAME_R(10), r10
710 st.q SP, FRAME_R(11), r11
711 st.q SP, FRAME_R(12), r12
712 st.q SP, FRAME_R(13), r13
713 st.q SP, FRAME_R(14), r14
714
715 /* SP is somewhere else */
716 st.q SP, FRAME_R(15), r6
717
718 st.q SP, FRAME_R(16), r16
719 st.q SP, FRAME_R(17), r17
720 /* r18 is saved earlier. */
721 st.q SP, FRAME_R(19), r19
722 st.q SP, FRAME_R(20), r20
723 st.q SP, FRAME_R(21), r21
724 st.q SP, FRAME_R(22), r22
725 st.q SP, FRAME_R(23), r23
726 st.q SP, FRAME_R(24), r24
727 st.q SP, FRAME_R(25), r25
728 st.q SP, FRAME_R(26), r26
729 st.q SP, FRAME_R(27), r27
730 st.q SP, FRAME_R(28), r28
731 st.q SP, FRAME_R(29), r29
732 st.q SP, FRAME_R(30), r30
733 st.q SP, FRAME_R(31), r31
734 st.q SP, FRAME_R(32), r32
735 st.q SP, FRAME_R(33), r33
736 st.q SP, FRAME_R(34), r34
737 st.q SP, FRAME_R(35), r35
738 st.q SP, FRAME_R(36), r36
739 st.q SP, FRAME_R(37), r37
740 st.q SP, FRAME_R(38), r38
741 st.q SP, FRAME_R(39), r39
742 st.q SP, FRAME_R(40), r40
743 st.q SP, FRAME_R(41), r41
744 st.q SP, FRAME_R(42), r42
745 st.q SP, FRAME_R(43), r43
746 st.q SP, FRAME_R(44), r44
747 st.q SP, FRAME_R(45), r45
748 st.q SP, FRAME_R(46), r46
749 st.q SP, FRAME_R(47), r47
750 st.q SP, FRAME_R(48), r48
751 st.q SP, FRAME_R(49), r49
752 st.q SP, FRAME_R(50), r50
753 st.q SP, FRAME_R(51), r51
754 st.q SP, FRAME_R(52), r52
755 st.q SP, FRAME_R(53), r53
756 st.q SP, FRAME_R(54), r54
757 st.q SP, FRAME_R(55), r55
758 st.q SP, FRAME_R(56), r56
759 st.q SP, FRAME_R(57), r57
760 st.q SP, FRAME_R(58), r58
761 st.q SP, FRAME_R(59), r59
762 st.q SP, FRAME_R(60), r60
763 st.q SP, FRAME_R(61), r61
764 st.q SP, FRAME_R(62), r62
765
766 /*
767 * Save the S* registers.
768 */
769 getcon SSR, r61
770 st.q SP, FRAME_S(FSSR), r61
771 getcon SPC, r62
772 st.q SP, FRAME_S(FSPC), r62
773 movi -1, r62 /* Reset syscall_nr */
774 st.q SP, FRAME_S(FSYSCALL_ID), r62
775
776 /* Save the rest of the target registers */
777 gettr tr1, r6
778 st.q SP, FRAME_T(1), r6
779 gettr tr2, r6
780 st.q SP, FRAME_T(2), r6
781 gettr tr3, r6
782 st.q SP, FRAME_T(3), r6
783 gettr tr4, r6
784 st.q SP, FRAME_T(4), r6
785 gettr tr5, r6
786 st.q SP, FRAME_T(5), r6
787 gettr tr6, r6
788 st.q SP, FRAME_T(6), r6
789 gettr tr7, r6
790 st.q SP, FRAME_T(7), r6
791
792 ! setup FP so that unwinder can wind back through nested kernel mode
793 ! exceptions
794 add SP, ZERO, r14
795
796#ifdef CONFIG_POOR_MANS_STRACE
797 /* We've pushed all the registers now, so only r2-r4 hold anything
798 * useful. Move them into callee save registers */
799 or r2, ZERO, r28
800 or r3, ZERO, r29
801 or r4, ZERO, r30
802
803 /* Preserve r2 as the event code */
804 movi evt_debug, r3
805 ori r3, 1, r3
806 ptabs r3, tr0
807
808 or SP, ZERO, r6
809 getcon TRA, r5
810 blink tr0, LINK
811
812 or r28, ZERO, r2
813 or r29, ZERO, r3
814 or r30, ZERO, r4
815#endif
816
817 /* For syscall and debug race condition, get TRA now */
818 getcon TRA, r5
819
820 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
821 * Also set FD, to catch FPU usage in the kernel.
822 *
823 * benedict.gaster@superh.com 29/07/2002
824 *
825 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
826 * same time change BL from 1->0, as any pending interrupt of a level
827 * higher than he previous value of IMASK will leak through and be
828 * taken unexpectedly.
829 *
830 * To avoid this we raise the IMASK and then issue another PUTCON to
831 * enable interrupts.
832 */
833 getcon SR, r6
834 movi SR_IMASK | SR_FD, r7
835 or r6, r7, r6
836 putcon r6, SR
837 movi SR_UNBLOCK_EXC, r7
838 and r6, r7, r6
839 putcon r6, SR
840
841
842 /* Now call the appropriate 3rd level handler */
843 or r3, ZERO, LINK
844 movi trap_jtable, r3
845 shlri r2, 3, r2
846 ldx.l r2, r3, r3
847 shlri r2, 2, r2
848 ptabs r3, tr0
849 or SP, ZERO, r3
850 blink tr0, ZERO
851
852/*
853 * Second level handler for VBR-based exceptions. Post-handlers.
854 *
855 * Post-handlers for interrupts (ret_from_irq), exceptions
856 * (ret_from_exception) and common reentrance doors (restore_all
857 * to get back to the original context, ret_from_syscall loop to
858 * check kernel exiting).
859 *
860 * ret_with_reschedule and work_notifysig are an inner lables of
861 * the ret_from_syscall loop.
862 *
863 * In common to all stack-frame sensitive handlers.
864 *
865 * Inputs:
866 * (SP) struct pt_regs *, original register's frame pointer (basic)
867 *
868 */
869 .global ret_from_irq
870ret_from_irq:
871#ifdef CONFIG_POOR_MANS_STRACE
872 pta evt_debug_ret_from_irq, tr0
873 ori SP, 0, r2
874 blink tr0, LINK
875#endif
876 ld.q SP, FRAME_S(FSSR), r6
877 shlri r6, 30, r6
878 andi r6, 1, r6
879 pta resume_kernel, tr0
880 bne r6, ZERO, tr0 /* no further checks */
881 STI()
882 pta ret_with_reschedule, tr0
883 blink tr0, ZERO /* Do not check softirqs */
884
885 .global ret_from_exception
886ret_from_exception:
887 preempt_stop()
888
889#ifdef CONFIG_POOR_MANS_STRACE
890 pta evt_debug_ret_from_exc, tr0
891 ori SP, 0, r2
892 blink tr0, LINK
893#endif
894
895 ld.q SP, FRAME_S(FSSR), r6
896 shlri r6, 30, r6
897 andi r6, 1, r6
898 pta resume_kernel, tr0
899 bne r6, ZERO, tr0 /* no further checks */
900
901 /* Check softirqs */
902
903#ifdef CONFIG_PREEMPT
904 pta ret_from_syscall, tr0
905 blink tr0, ZERO
906
907resume_kernel:
908 pta restore_all, tr0
909
910 getcon KCR0, r6
911 ld.l r6, TI_PRE_COUNT, r7
912 beq/u r7, ZERO, tr0
913
914need_resched:
915 ld.l r6, TI_FLAGS, r7
916 movi (1 << TIF_NEED_RESCHED), r8
917 and r8, r7, r8
918 bne r8, ZERO, tr0
919
920 getcon SR, r7
921 andi r7, 0xf0, r7
922 bne r7, ZERO, tr0
923
924 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
925 shori (PREEMPT_ACTIVE & 65535), r8
926 st.l r6, TI_PRE_COUNT, r8
927
928 STI()
929 movi schedule, r7
930 ori r7, 1, r7
931 ptabs r7, tr1
932 blink tr1, LINK
933
934 st.l r6, TI_PRE_COUNT, ZERO
935 CLI()
936
937 pta need_resched, tr1
938 blink tr1, ZERO
939#endif
940
941 .global ret_from_syscall
942ret_from_syscall:
943
944ret_with_reschedule:
945 getcon KCR0, r6 ! r6 contains current_thread_info
946 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
947
948 ! FIXME:!!!
949 ! no handling of TIF_SYSCALL_TRACE yet!!
950
951 movi (1 << TIF_NEED_RESCHED), r8
952 and r8, r7, r8
953 pta work_resched, tr0
954 bne r8, ZERO, tr0
955
956 pta restore_all, tr1
957
958 movi (1 << TIF_SIGPENDING), r8
959 and r8, r7, r8
960 pta work_notifysig, tr0
961 bne r8, ZERO, tr0
962
963 blink tr1, ZERO
964
965work_resched:
966 pta ret_from_syscall, tr0
967 gettr tr0, LINK
968 movi schedule, r6
969 ptabs r6, tr0
970 blink tr0, ZERO /* Call schedule(), return on top */
971
972work_notifysig:
973 gettr tr1, LINK
974
975 movi do_signal, r6
976 ptabs r6, tr0
977 or SP, ZERO, r2
978 or ZERO, ZERO, r3
979 blink tr0, LINK /* Call do_signal(regs, 0), return here */
980
981restore_all:
982 /* Do prefetches */
983
984 ld.q SP, FRAME_T(0), r6
985 ld.q SP, FRAME_T(1), r7
986 ld.q SP, FRAME_T(2), r8
987 ld.q SP, FRAME_T(3), r9
988 ptabs r6, tr0
989 ptabs r7, tr1
990 ptabs r8, tr2
991 ptabs r9, tr3
992 ld.q SP, FRAME_T(4), r6
993 ld.q SP, FRAME_T(5), r7
994 ld.q SP, FRAME_T(6), r8
995 ld.q SP, FRAME_T(7), r9
996 ptabs r6, tr4
997 ptabs r7, tr5
998 ptabs r8, tr6
999 ptabs r9, tr7
1000
1001 ld.q SP, FRAME_R(0), r0
1002 ld.q SP, FRAME_R(1), r1
1003 ld.q SP, FRAME_R(2), r2
1004 ld.q SP, FRAME_R(3), r3
1005 ld.q SP, FRAME_R(4), r4
1006 ld.q SP, FRAME_R(5), r5
1007 ld.q SP, FRAME_R(6), r6
1008 ld.q SP, FRAME_R(7), r7
1009 ld.q SP, FRAME_R(8), r8
1010 ld.q SP, FRAME_R(9), r9
1011 ld.q SP, FRAME_R(10), r10
1012 ld.q SP, FRAME_R(11), r11
1013 ld.q SP, FRAME_R(12), r12
1014 ld.q SP, FRAME_R(13), r13
1015 ld.q SP, FRAME_R(14), r14
1016
1017 ld.q SP, FRAME_R(16), r16
1018 ld.q SP, FRAME_R(17), r17
1019 ld.q SP, FRAME_R(18), r18
1020 ld.q SP, FRAME_R(19), r19
1021 ld.q SP, FRAME_R(20), r20
1022 ld.q SP, FRAME_R(21), r21
1023 ld.q SP, FRAME_R(22), r22
1024 ld.q SP, FRAME_R(23), r23
1025 ld.q SP, FRAME_R(24), r24
1026 ld.q SP, FRAME_R(25), r25
1027 ld.q SP, FRAME_R(26), r26
1028 ld.q SP, FRAME_R(27), r27
1029 ld.q SP, FRAME_R(28), r28
1030 ld.q SP, FRAME_R(29), r29
1031 ld.q SP, FRAME_R(30), r30
1032 ld.q SP, FRAME_R(31), r31
1033 ld.q SP, FRAME_R(32), r32
1034 ld.q SP, FRAME_R(33), r33
1035 ld.q SP, FRAME_R(34), r34
1036 ld.q SP, FRAME_R(35), r35
1037 ld.q SP, FRAME_R(36), r36
1038 ld.q SP, FRAME_R(37), r37
1039 ld.q SP, FRAME_R(38), r38
1040 ld.q SP, FRAME_R(39), r39
1041 ld.q SP, FRAME_R(40), r40
1042 ld.q SP, FRAME_R(41), r41
1043 ld.q SP, FRAME_R(42), r42
1044 ld.q SP, FRAME_R(43), r43
1045 ld.q SP, FRAME_R(44), r44
1046 ld.q SP, FRAME_R(45), r45
1047 ld.q SP, FRAME_R(46), r46
1048 ld.q SP, FRAME_R(47), r47
1049 ld.q SP, FRAME_R(48), r48
1050 ld.q SP, FRAME_R(49), r49
1051 ld.q SP, FRAME_R(50), r50
1052 ld.q SP, FRAME_R(51), r51
1053 ld.q SP, FRAME_R(52), r52
1054 ld.q SP, FRAME_R(53), r53
1055 ld.q SP, FRAME_R(54), r54
1056 ld.q SP, FRAME_R(55), r55
1057 ld.q SP, FRAME_R(56), r56
1058 ld.q SP, FRAME_R(57), r57
1059 ld.q SP, FRAME_R(58), r58
1060
1061 getcon SR, r59
1062 movi SR_BLOCK_EXC, r60
1063 or r59, r60, r59
1064 putcon r59, SR /* SR.BL = 1, keep nesting out */
1065 ld.q SP, FRAME_S(FSSR), r61
1066 ld.q SP, FRAME_S(FSPC), r62
1067 movi SR_ASID_MASK, r60
1068 and r59, r60, r59
1069 andc r61, r60, r61 /* Clear out older ASID */
1070 or r59, r61, r61 /* Retain current ASID */
1071 putcon r61, SSR
1072 putcon r62, SPC
1073
1074 /* Ignore FSYSCALL_ID */
1075
1076 ld.q SP, FRAME_R(59), r59
1077 ld.q SP, FRAME_R(60), r60
1078 ld.q SP, FRAME_R(61), r61
1079 ld.q SP, FRAME_R(62), r62
1080
1081 /* Last touch */
1082 ld.q SP, FRAME_R(15), SP
1083 rte
1084 nop
1085
1086/*
1087 * Third level handlers for VBR-based exceptions. Adapting args to
1088 * and/or deflecting to fourth level handlers.
1089 *
1090 * Fourth level handlers interface.
1091 * Most are C-coded handlers directly pointed by the trap_jtable.
1092 * (Third = Fourth level)
1093 * Inputs:
1094 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1095 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1096 * (r3) struct pt_regs *, original register's frame pointer
1097 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1098 * (r5) TRA control register (for syscall/debug benefit only)
1099 * (LINK) return address
1100 * (SP) = r3
1101 *
1102 * Kernel TLB fault handlers will get a slightly different interface.
1103 * (r2) struct pt_regs *, original register's frame pointer
1104 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1105 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1106 * (r5) Effective Address of fault
1107 * (LINK) return address
1108 * (SP) = r2
1109 *
1110 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1111 *
1112 */
1113tlb_miss_load:
1114 or SP, ZERO, r2
1115 or ZERO, ZERO, r3 /* Read */
1116 or ZERO, ZERO, r4 /* Data */
1117 getcon TEA, r5
1118 pta call_do_page_fault, tr0
1119 beq ZERO, ZERO, tr0
1120
1121tlb_miss_store:
1122 or SP, ZERO, r2
1123 movi 1, r3 /* Write */
1124 or ZERO, ZERO, r4 /* Data */
1125 getcon TEA, r5
1126 pta call_do_page_fault, tr0
1127 beq ZERO, ZERO, tr0
1128
1129itlb_miss_or_IRQ:
1130 pta its_IRQ, tr0
1131 beqi/u r4, EVENT_INTERRUPT, tr0
1132 or SP, ZERO, r2
1133 or ZERO, ZERO, r3 /* Read */
1134 movi 1, r4 /* Text */
1135 getcon TEA, r5
1136 /* Fall through */
1137
1138call_do_page_fault:
1139 movi do_page_fault, r6
1140 ptabs r6, tr0
1141 blink tr0, ZERO
1142
1143fpu_error_or_IRQA:
1144 pta its_IRQ, tr0
1145 beqi/l r4, EVENT_INTERRUPT, tr0
1146#ifdef CONFIG_SH_FPU
1147 movi do_fpu_state_restore, r6
1148#else
1149 movi do_exception_error, r6
1150#endif
1151 ptabs r6, tr0
1152 blink tr0, ZERO
1153
1154fpu_error_or_IRQB:
1155 pta its_IRQ, tr0
1156 beqi/l r4, EVENT_INTERRUPT, tr0
1157#ifdef CONFIG_SH_FPU
1158 movi do_fpu_state_restore, r6
1159#else
1160 movi do_exception_error, r6
1161#endif
1162 ptabs r6, tr0
1163 blink tr0, ZERO
1164
1165its_IRQ:
1166 movi do_IRQ, r6
1167 ptabs r6, tr0
1168 blink tr0, ZERO
1169
1170/*
1171 * system_call/unknown_trap third level handler:
1172 *
1173 * Inputs:
1174 * (r2) fault/interrupt code, entry number (TRAP = 11)
1175 * (r3) struct pt_regs *, original register's frame pointer
1176 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1177 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1178 * (SP) = r3
1179 * (LINK) return address: ret_from_exception
1180 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1181 *
1182 * Outputs:
1183 * (*r3) Syscall reply (Saved r2)
1184 * (LINK) In case of syscall only it can be scrapped.
1185 * Common second level post handler will be ret_from_syscall.
1186 * Common (non-trace) exit point to that is syscall_ret (saving
1187 * result to r2). Common bad exit point is syscall_bad (returning
1188 * ENOSYS then saved to r2).
1189 *
1190 */
1191
1192unknown_trap:
1193 /* Unknown Trap or User Trace */
1194 movi do_unknown_trapa, r6
1195 ptabs r6, tr0
1196 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1197 andi r2, 0x1ff, r2 /* r2 = syscall # */
1198 blink tr0, LINK
1199
1200 pta syscall_ret, tr0
1201 blink tr0, ZERO
1202
1203 /* New syscall implementation*/
1204system_call:
1205 pta unknown_trap, tr0
1206 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1207 shlri r4, 20, r4
1208 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1209
1210 /* It's a system call */
1211 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1212 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1213
1214 STI()
1215
1216 pta syscall_allowed, tr0
1217 movi NR_syscalls - 1, r4 /* Last valid */
1218 bgeu/l r4, r5, tr0
1219
1220syscall_bad:
1221 /* Return ENOSYS ! */
1222 movi -(ENOSYS), r2 /* Fall-through */
1223
1224 .global syscall_ret
1225syscall_ret:
1226 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1227
1228#ifdef CONFIG_POOR_MANS_STRACE
1229 /* nothing useful in registers at this point */
1230
1231 movi evt_debug2, r5
1232 ori r5, 1, r5
1233 ptabs r5, tr0
1234 ld.q SP, FRAME_R(9), r2
1235 or SP, ZERO, r3
1236 blink tr0, LINK
1237#endif
1238
1239 ld.q SP, FRAME_S(FSPC), r2
1240 addi r2, 4, r2 /* Move PC, being pre-execution event */
1241 st.q SP, FRAME_S(FSPC), r2
1242 pta ret_from_syscall, tr0
1243 blink tr0, ZERO
1244
1245
1246/* A different return path for ret_from_fork, because we now need
1247 * to call schedule_tail with the later kernels. Because prev is
1248 * loaded into r2 by switch_to() means we can just call it straight away
1249 */
1250
1251.global ret_from_fork
1252ret_from_fork:
1253
1254 movi schedule_tail,r5
1255 ori r5, 1, r5
1256 ptabs r5, tr0
1257 blink tr0, LINK
1258
1259#ifdef CONFIG_POOR_MANS_STRACE
1260 /* nothing useful in registers at this point */
1261
1262 movi evt_debug2, r5
1263 ori r5, 1, r5
1264 ptabs r5, tr0
1265 ld.q SP, FRAME_R(9), r2
1266 or SP, ZERO, r3
1267 blink tr0, LINK
1268#endif
1269
1270 ld.q SP, FRAME_S(FSPC), r2
1271 addi r2, 4, r2 /* Move PC, being pre-execution event */
1272 st.q SP, FRAME_S(FSPC), r2
1273 pta ret_from_syscall, tr0
1274 blink tr0, ZERO
1275
1276
1277
1278syscall_allowed:
1279 /* Use LINK to deflect the exit point, default is syscall_ret */
1280 pta syscall_ret, tr0
1281 gettr tr0, LINK
1282 pta syscall_notrace, tr0
1283
1284 getcon KCR0, r2
1285 ld.l r2, TI_FLAGS, r4
1286 movi (1 << TIF_SYSCALL_TRACE), r6
1287 and r6, r4, r6
1288 beq/l r6, ZERO, tr0
1289
1290 /* Trace it by calling syscall_trace before and after */
1291 movi syscall_trace, r4
1292 ptabs r4, tr0
1293 blink tr0, LINK
1294 /* Reload syscall number as r5 is trashed by syscall_trace */
1295 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1296 andi r5, 0x1ff, r5
1297
1298 pta syscall_ret_trace, tr0
1299 gettr tr0, LINK
1300
1301syscall_notrace:
1302 /* Now point to the appropriate 4th level syscall handler */
1303 movi sys_call_table, r4
1304 shlli r5, 2, r5
1305 ldx.l r4, r5, r5
1306 ptabs r5, tr0
1307
1308 /* Prepare original args */
1309 ld.q SP, FRAME_R(2), r2
1310 ld.q SP, FRAME_R(3), r3
1311 ld.q SP, FRAME_R(4), r4
1312 ld.q SP, FRAME_R(5), r5
1313 ld.q SP, FRAME_R(6), r6
1314 ld.q SP, FRAME_R(7), r7
1315
1316 /* And now the trick for those syscalls requiring regs * ! */
1317 or SP, ZERO, r8
1318
1319 /* Call it */
1320 blink tr0, ZERO /* LINK is already properly set */
1321
1322syscall_ret_trace:
1323 /* We get back here only if under trace */
1324 st.q SP, FRAME_R(9), r2 /* Save return value */
1325
1326 movi syscall_trace, LINK
1327 ptabs LINK, tr0
1328 blink tr0, LINK
1329
1330 /* This needs to be done after any syscall tracing */
1331 ld.q SP, FRAME_S(FSPC), r2
1332 addi r2, 4, r2 /* Move PC, being pre-execution event */
1333 st.q SP, FRAME_S(FSPC), r2
1334
1335 pta ret_from_syscall, tr0
1336 blink tr0, ZERO /* Resume normal return sequence */
1337
1338/*
1339 * --- Switch to running under a particular ASID and return the previous ASID value
1340 * --- The caller is assumed to have done a cli before calling this.
1341 *
1342 * Input r2 : new ASID
1343 * Output r2 : old ASID
1344 */
1345
1346 .global switch_and_save_asid
1347switch_and_save_asid:
1348 getcon sr, r0
1349 movi 255, r4
1350 shlli r4, 16, r4 /* r4 = mask to select ASID */
1351 and r0, r4, r3 /* r3 = shifted old ASID */
1352 andi r2, 255, r2 /* mask down new ASID */
1353 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1354 andc r0, r4, r0 /* efface old ASID from SR */
1355 or r0, r2, r0 /* insert the new ASID */
1356 putcon r0, ssr
1357 movi 1f, r0
1358 putcon r0, spc
1359 rte
1360 nop
13611:
1362 ptabs LINK, tr0
1363 shlri r3, 16, r2 /* r2 = old ASID */
1364 blink tr0, r63
1365
1366 .global route_to_panic_handler
1367route_to_panic_handler:
1368 /* Switch to real mode, goto panic_handler, don't return. Useful for
1369 last-chance debugging, e.g. if no output wants to go to the console.
1370 */
1371
1372 movi panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
1373 ptabs r1, tr0
1374 pta 1f, tr1
1375 gettr tr1, r0
1376 putcon r0, spc
1377 getcon sr, r0
1378 movi 1, r1
1379 shlli r1, 31, r1
1380 andc r0, r1, r0
1381 putcon r0, ssr
1382 rte
1383 nop
13841: /* Now in real mode */
1385 blink tr0, r63
1386 nop
1387
1388 .global peek_real_address_q
1389peek_real_address_q:
1390 /* Two args:
1391 r2 : real mode address to peek
1392 r2(out) : result quadword
1393
1394 This is provided as a cheapskate way of manipulating device
1395 registers for debugging (to avoid the need to onchip_remap the debug
1396 module, and to avoid the need to onchip_remap the watchpoint
1397 controller in a way that identity maps sufficient bits to avoid the
1398 SH5-101 cut2 silicon defect).
1399
1400 This code is not performance critical
1401 */
1402
1403 add.l r2, r63, r2 /* sign extend address */
1404 getcon sr, r0 /* r0 = saved original SR */
1405 movi 1, r1
1406 shlli r1, 28, r1
1407 or r0, r1, r1 /* r0 with block bit set */
1408 putcon r1, sr /* now in critical section */
1409 movi 1, r36
1410 shlli r36, 31, r36
1411 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1412
1413 putcon r1, ssr
1414 movi .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1415 movi 1f, r37 /* virtual mode return addr */
1416 putcon r36, spc
1417
1418 synco
1419 rte
1420 nop
1421
1422.peek0: /* come here in real mode, don't touch caches!!
1423 still in critical section (sr.bl==1) */
1424 putcon r0, ssr
1425 putcon r37, spc
1426 /* Here's the actual peek. If the address is bad, all bets are now off
1427 * what will happen (handlers invoked in real-mode = bad news) */
1428 ld.q r2, 0, r2
1429 synco
1430 rte /* Back to virtual mode */
1431 nop
1432
14331:
1434 ptabs LINK, tr0
1435 blink tr0, r63
1436
1437 .global poke_real_address_q
1438poke_real_address_q:
1439 /* Two args:
1440 r2 : real mode address to poke
1441 r3 : quadword value to write.
1442
1443 This is provided as a cheapskate way of manipulating device
1444 registers for debugging (to avoid the need to onchip_remap the debug
1445 module, and to avoid the need to onchip_remap the watchpoint
1446 controller in a way that identity maps sufficient bits to avoid the
1447 SH5-101 cut2 silicon defect).
1448
1449 This code is not performance critical
1450 */
1451
1452 add.l r2, r63, r2 /* sign extend address */
1453 getcon sr, r0 /* r0 = saved original SR */
1454 movi 1, r1
1455 shlli r1, 28, r1
1456 or r0, r1, r1 /* r0 with block bit set */
1457 putcon r1, sr /* now in critical section */
1458 movi 1, r36
1459 shlli r36, 31, r36
1460 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1461
1462 putcon r1, ssr
1463 movi .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1464 movi 1f, r37 /* virtual mode return addr */
1465 putcon r36, spc
1466
1467 synco
1468 rte
1469 nop
1470
1471.poke0: /* come here in real mode, don't touch caches!!
1472 still in critical section (sr.bl==1) */
1473 putcon r0, ssr
1474 putcon r37, spc
1475 /* Here's the actual poke. If the address is bad, all bets are now off
1476 * what will happen (handlers invoked in real-mode = bad news) */
1477 st.q r2, 0, r3
1478 synco
1479 rte /* Back to virtual mode */
1480 nop
1481
14821:
1483 ptabs LINK, tr0
1484 blink tr0, r63
1485
1486/*
1487 * --- User Access Handling Section
1488 */
1489
1490/*
1491 * User Access support. It all moved to non inlined Assembler
1492 * functions in here.
1493 *
1494 * __kernel_size_t __copy_user(void *__to, const void *__from,
1495 * __kernel_size_t __n)
1496 *
1497 * Inputs:
1498 * (r2) target address
1499 * (r3) source address
1500 * (r4) size in bytes
1501 *
1502 * Ouputs:
1503 * (*r2) target data
1504 * (r2) non-copied bytes
1505 *
1506 * If a fault occurs on the user pointer, bail out early and return the
1507 * number of bytes not copied in r2.
1508 * Strategy : for large blocks, call a real memcpy function which can
1509 * move >1 byte at a time using unaligned ld/st instructions, and can
1510 * manipulate the cache using prefetch + alloco to improve the speed
1511 * further. If a fault occurs in that function, just revert to the
1512 * byte-by-byte approach used for small blocks; this is rare so the
1513 * performance hit for that case does not matter.
1514 *
1515 * For small blocks it's not worth the overhead of setting up and calling
1516 * the memcpy routine; do the copy a byte at a time.
1517 *
1518 */
1519 .global __copy_user
1520__copy_user:
1521 pta __copy_user_byte_by_byte, tr1
1522 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1523 bge/u r0, r4, tr1
1524 pta copy_user_memcpy, tr0
1525 addi SP, -32, SP
1526 /* Save arguments in case we have to fix-up unhandled page fault */
1527 st.q SP, 0, r2
1528 st.q SP, 8, r3
1529 st.q SP, 16, r4
1530 st.q SP, 24, r35 ! r35 is callee-save
1531 /* Save LINK in a register to reduce RTS time later (otherwise
1532 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1533 ori LINK, 0, r35
1534 blink tr0, LINK
1535
1536 /* Copy completed normally if we get back here */
1537 ptabs r35, tr0
1538 ld.q SP, 24, r35
1539 /* don't restore r2-r4, pointless */
1540 /* set result=r2 to zero as the copy must have succeeded. */
1541 or r63, r63, r2
1542 addi SP, 32, SP
1543 blink tr0, r63 ! RTS
1544
1545 .global __copy_user_fixup
1546__copy_user_fixup:
1547 /* Restore stack frame */
1548 ori r35, 0, LINK
1549 ld.q SP, 24, r35
1550 ld.q SP, 16, r4
1551 ld.q SP, 8, r3
1552 ld.q SP, 0, r2
1553 addi SP, 32, SP
1554 /* Fall through to original code, in the 'same' state we entered with */
1555
1556/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1557 user address. In that rare case, the speed drop can be tolerated. */
1558__copy_user_byte_by_byte:
1559 pta ___copy_user_exit, tr1
1560 pta ___copy_user1, tr0
1561 beq/u r4, r63, tr1 /* early exit for zero length copy */
1562 sub r2, r3, r0
1563 addi r0, -1, r0
1564
1565___copy_user1:
1566 ld.b r3, 0, r5 /* Fault address 1 */
1567
1568 /* Could rewrite this to use just 1 add, but the second comes 'free'
1569 due to load latency */
1570 addi r3, 1, r3
1571 addi r4, -1, r4 /* No real fixup required */
1572___copy_user2:
1573 stx.b r3, r0, r5 /* Fault address 2 */
1574 bne r4, ZERO, tr0
1575
1576___copy_user_exit:
1577 or r4, ZERO, r2
1578 ptabs LINK, tr0
1579 blink tr0, ZERO
1580
1581/*
1582 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1583 *
1584 * Inputs:
1585 * (r2) target address
1586 * (r3) size in bytes
1587 *
1588 * Ouputs:
1589 * (*r2) zero-ed target data
1590 * (r2) non-zero-ed bytes
1591 */
1592 .global __clear_user
1593__clear_user:
1594 pta ___clear_user_exit, tr1
1595 pta ___clear_user1, tr0
1596 beq/u r3, r63, tr1
1597
1598___clear_user1:
1599 st.b r2, 0, ZERO /* Fault address */
1600 addi r2, 1, r2
1601 addi r3, -1, r3 /* No real fixup required */
1602 bne r3, ZERO, tr0
1603
1604___clear_user_exit:
1605 or r3, ZERO, r2
1606 ptabs LINK, tr0
1607 blink tr0, ZERO
1608
1609
1610/*
1611 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1612 * int __count)
1613 *
1614 * Inputs:
1615 * (r2) target address
1616 * (r3) source address
1617 * (r4) maximum size in bytes
1618 *
1619 * Ouputs:
1620 * (*r2) copied data
1621 * (r2) -EFAULT (in case of faulting)
1622 * copied data (otherwise)
1623 */
1624 .global __strncpy_from_user
1625__strncpy_from_user:
1626 pta ___strncpy_from_user1, tr0
1627 pta ___strncpy_from_user_done, tr1
1628 or r4, ZERO, r5 /* r5 = original count */
1629 beq/u r4, r63, tr1 /* early exit if r4==0 */
1630 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1631 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1632
1633___strncpy_from_user1:
1634 ld.b r3, 0, r7 /* Fault address: only in reading */
1635 st.b r2, 0, r7
1636 addi r2, 1, r2
1637 addi r3, 1, r3
1638 beq/u ZERO, r7, tr1
1639 addi r4, -1, r4 /* return real number of copied bytes */
1640 bne/l ZERO, r4, tr0
1641
1642___strncpy_from_user_done:
1643 sub r5, r4, r6 /* If done, return copied */
1644
1645___strncpy_from_user_exit:
1646 or r6, ZERO, r2
1647 ptabs LINK, tr0
1648 blink tr0, ZERO
1649
1650/*
1651 * extern long __strnlen_user(const char *__s, long __n)
1652 *
1653 * Inputs:
1654 * (r2) source address
1655 * (r3) source size in bytes
1656 *
1657 * Ouputs:
1658 * (r2) -EFAULT (in case of faulting)
1659 * string length (otherwise)
1660 */
1661 .global __strnlen_user
1662__strnlen_user:
1663 pta ___strnlen_user_set_reply, tr0
1664 pta ___strnlen_user1, tr1
1665 or ZERO, ZERO, r5 /* r5 = counter */
1666 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1667 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1668 beq r3, ZERO, tr0
1669
1670___strnlen_user1:
1671 ldx.b r2, r5, r7 /* Fault address: only in reading */
1672 addi r3, -1, r3 /* No real fixup */
1673 addi r5, 1, r5
1674 beq r3, ZERO, tr0
1675 bne r7, ZERO, tr1
1676! The line below used to be active. This meant led to a junk byte lying between each pair
1677! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1678! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1679! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1680! addi r5, 1, r5 /* Include '\0' */
1681
1682___strnlen_user_set_reply:
1683 or r5, ZERO, r6 /* If done, return counter */
1684
1685___strnlen_user_exit:
1686 or r6, ZERO, r2
1687 ptabs LINK, tr0
1688 blink tr0, ZERO
1689
1690/*
1691 * extern long __get_user_asm_?(void *val, long addr)
1692 *
1693 * Inputs:
1694 * (r2) dest address
1695 * (r3) source address (in User Space)
1696 *
1697 * Ouputs:
1698 * (r2) -EFAULT (faulting)
1699 * 0 (not faulting)
1700 */
1701 .global __get_user_asm_b
1702__get_user_asm_b:
1703 or r2, ZERO, r4
1704 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1705
1706___get_user_asm_b1:
1707 ld.b r3, 0, r5 /* r5 = data */
1708 st.b r4, 0, r5
1709 or ZERO, ZERO, r2
1710
1711___get_user_asm_b_exit:
1712 ptabs LINK, tr0
1713 blink tr0, ZERO
1714
1715
1716 .global __get_user_asm_w
1717__get_user_asm_w:
1718 or r2, ZERO, r4
1719 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1720
1721___get_user_asm_w1:
1722 ld.w r3, 0, r5 /* r5 = data */
1723 st.w r4, 0, r5
1724 or ZERO, ZERO, r2
1725
1726___get_user_asm_w_exit:
1727 ptabs LINK, tr0
1728 blink tr0, ZERO
1729
1730
1731 .global __get_user_asm_l
1732__get_user_asm_l:
1733 or r2, ZERO, r4
1734 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1735
1736___get_user_asm_l1:
1737 ld.l r3, 0, r5 /* r5 = data */
1738 st.l r4, 0, r5
1739 or ZERO, ZERO, r2
1740
1741___get_user_asm_l_exit:
1742 ptabs LINK, tr0
1743 blink tr0, ZERO
1744
1745
1746 .global __get_user_asm_q
1747__get_user_asm_q:
1748 or r2, ZERO, r4
1749 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1750
1751___get_user_asm_q1:
1752 ld.q r3, 0, r5 /* r5 = data */
1753 st.q r4, 0, r5
1754 or ZERO, ZERO, r2
1755
1756___get_user_asm_q_exit:
1757 ptabs LINK, tr0
1758 blink tr0, ZERO
1759
1760/*
1761 * extern long __put_user_asm_?(void *pval, long addr)
1762 *
1763 * Inputs:
1764 * (r2) kernel pointer to value
1765 * (r3) dest address (in User Space)
1766 *
1767 * Ouputs:
1768 * (r2) -EFAULT (faulting)
1769 * 0 (not faulting)
1770 */
1771 .global __put_user_asm_b
1772__put_user_asm_b:
1773 ld.b r2, 0, r4 /* r4 = data */
1774 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1775
1776___put_user_asm_b1:
1777 st.b r3, 0, r4
1778 or ZERO, ZERO, r2
1779
1780___put_user_asm_b_exit:
1781 ptabs LINK, tr0
1782 blink tr0, ZERO
1783
1784
1785 .global __put_user_asm_w
1786__put_user_asm_w:
1787 ld.w r2, 0, r4 /* r4 = data */
1788 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1789
1790___put_user_asm_w1:
1791 st.w r3, 0, r4
1792 or ZERO, ZERO, r2
1793
1794___put_user_asm_w_exit:
1795 ptabs LINK, tr0
1796 blink tr0, ZERO
1797
1798
1799 .global __put_user_asm_l
1800__put_user_asm_l:
1801 ld.l r2, 0, r4 /* r4 = data */
1802 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1803
1804___put_user_asm_l1:
1805 st.l r3, 0, r4
1806 or ZERO, ZERO, r2
1807
1808___put_user_asm_l_exit:
1809 ptabs LINK, tr0
1810 blink tr0, ZERO
1811
1812
1813 .global __put_user_asm_q
1814__put_user_asm_q:
1815 ld.q r2, 0, r4 /* r4 = data */
1816 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1817
1818___put_user_asm_q1:
1819 st.q r3, 0, r4
1820 or ZERO, ZERO, r2
1821
1822___put_user_asm_q_exit:
1823 ptabs LINK, tr0
1824 blink tr0, ZERO
1825
1826panic_stash_regs:
1827 /* The idea is : when we get an unhandled panic, we dump the registers
1828 to a known memory location, the just sit in a tight loop.
1829 This allows the human to look at the memory region through the GDB
1830 session (assuming the debug module's SHwy initiator isn't locked up
1831 or anything), to hopefully analyze the cause of the panic. */
1832
1833 /* On entry, former r15 (SP) is in DCR
1834 former r0 is at resvec_saved_area + 0
1835 former r1 is at resvec_saved_area + 8
1836 former tr0 is at resvec_saved_area + 32
1837 DCR is the only register whose value is lost altogether.
1838 */
1839
1840 movi 0xffffffff80000000, r0 ! phy of dump area
1841 ld.q SP, 0x000, r1 ! former r0
1842 st.q r0, 0x000, r1
1843 ld.q SP, 0x008, r1 ! former r1
1844 st.q r0, 0x008, r1
1845 st.q r0, 0x010, r2
1846 st.q r0, 0x018, r3
1847 st.q r0, 0x020, r4
1848 st.q r0, 0x028, r5
1849 st.q r0, 0x030, r6
1850 st.q r0, 0x038, r7
1851 st.q r0, 0x040, r8
1852 st.q r0, 0x048, r9
1853 st.q r0, 0x050, r10
1854 st.q r0, 0x058, r11
1855 st.q r0, 0x060, r12
1856 st.q r0, 0x068, r13
1857 st.q r0, 0x070, r14
1858 getcon dcr, r14
1859 st.q r0, 0x078, r14
1860 st.q r0, 0x080, r16
1861 st.q r0, 0x088, r17
1862 st.q r0, 0x090, r18
1863 st.q r0, 0x098, r19
1864 st.q r0, 0x0a0, r20
1865 st.q r0, 0x0a8, r21
1866 st.q r0, 0x0b0, r22
1867 st.q r0, 0x0b8, r23
1868 st.q r0, 0x0c0, r24
1869 st.q r0, 0x0c8, r25
1870 st.q r0, 0x0d0, r26
1871 st.q r0, 0x0d8, r27
1872 st.q r0, 0x0e0, r28
1873 st.q r0, 0x0e8, r29
1874 st.q r0, 0x0f0, r30
1875 st.q r0, 0x0f8, r31
1876 st.q r0, 0x100, r32
1877 st.q r0, 0x108, r33
1878 st.q r0, 0x110, r34
1879 st.q r0, 0x118, r35
1880 st.q r0, 0x120, r36
1881 st.q r0, 0x128, r37
1882 st.q r0, 0x130, r38
1883 st.q r0, 0x138, r39
1884 st.q r0, 0x140, r40
1885 st.q r0, 0x148, r41
1886 st.q r0, 0x150, r42
1887 st.q r0, 0x158, r43
1888 st.q r0, 0x160, r44
1889 st.q r0, 0x168, r45
1890 st.q r0, 0x170, r46
1891 st.q r0, 0x178, r47
1892 st.q r0, 0x180, r48
1893 st.q r0, 0x188, r49
1894 st.q r0, 0x190, r50
1895 st.q r0, 0x198, r51
1896 st.q r0, 0x1a0, r52
1897 st.q r0, 0x1a8, r53
1898 st.q r0, 0x1b0, r54
1899 st.q r0, 0x1b8, r55
1900 st.q r0, 0x1c0, r56
1901 st.q r0, 0x1c8, r57
1902 st.q r0, 0x1d0, r58
1903 st.q r0, 0x1d8, r59
1904 st.q r0, 0x1e0, r60
1905 st.q r0, 0x1e8, r61
1906 st.q r0, 0x1f0, r62
1907 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1908
1909 ld.q SP, 0x020, r1 ! former tr0
1910 st.q r0, 0x200, r1
1911 gettr tr1, r1
1912 st.q r0, 0x208, r1
1913 gettr tr2, r1
1914 st.q r0, 0x210, r1
1915 gettr tr3, r1
1916 st.q r0, 0x218, r1
1917 gettr tr4, r1
1918 st.q r0, 0x220, r1
1919 gettr tr5, r1
1920 st.q r0, 0x228, r1
1921 gettr tr6, r1
1922 st.q r0, 0x230, r1
1923 gettr tr7, r1
1924 st.q r0, 0x238, r1
1925
1926 getcon sr, r1
1927 getcon ssr, r2
1928 getcon pssr, r3
1929 getcon spc, r4
1930 getcon pspc, r5
1931 getcon intevt, r6
1932 getcon expevt, r7
1933 getcon pexpevt, r8
1934 getcon tra, r9
1935 getcon tea, r10
1936 getcon kcr0, r11
1937 getcon kcr1, r12
1938 getcon vbr, r13
1939 getcon resvec, r14
1940
1941 st.q r0, 0x240, r1
1942 st.q r0, 0x248, r2
1943 st.q r0, 0x250, r3
1944 st.q r0, 0x258, r4
1945 st.q r0, 0x260, r5
1946 st.q r0, 0x268, r6
1947 st.q r0, 0x270, r7
1948 st.q r0, 0x278, r8
1949 st.q r0, 0x280, r9
1950 st.q r0, 0x288, r10
1951 st.q r0, 0x290, r11
1952 st.q r0, 0x298, r12
1953 st.q r0, 0x2a0, r13
1954 st.q r0, 0x2a8, r14
1955
1956 getcon SPC,r2
1957 getcon SSR,r3
1958 getcon EXPEVT,r4
1959 /* Prepare to jump to C - physical address */
1960 movi panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
1961 ori r1, 1, r1
1962 ptabs r1, tr0
1963 getcon DCR, SP
1964 blink tr0, ZERO
1965 nop
1966 nop
1967 nop
1968 nop
1969
1970
1971
1972
1973/*
1974 * --- Signal Handling Section
1975 */
1976
1977/*
1978 * extern long long _sa_default_rt_restorer
1979 * extern long long _sa_default_restorer
1980 *
1981 * or, better,
1982 *
1983 * extern void _sa_default_rt_restorer(void)
1984 * extern void _sa_default_restorer(void)
1985 *
1986 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1987 * from user space. Copied into user space by signal management.
1988 * Both must be quad aligned and 2 quad long (4 instructions).
1989 *
1990 */
1991 .balign 8
1992 .global sa_default_rt_restorer
1993sa_default_rt_restorer:
1994 movi 0x10, r9
1995 shori __NR_rt_sigreturn, r9
1996 trapa r9
1997 nop
1998
1999 .balign 8
2000 .global sa_default_restorer
2001sa_default_restorer:
2002 movi 0x10, r9
2003 shori __NR_sigreturn, r9
2004 trapa r9
2005 nop
2006
2007/*
2008 * --- __ex_table Section
2009 */
2010
2011/*
2012 * User Access Exception Table.
2013 */
2014 .section __ex_table, "a"
2015
2016 .global asm_uaccess_start /* Just a marker */
2017asm_uaccess_start:
2018
2019 .long ___copy_user1, ___copy_user_exit
2020 .long ___copy_user2, ___copy_user_exit
2021 .long ___clear_user1, ___clear_user_exit
2022 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2023 .long ___strnlen_user1, ___strnlen_user_exit
2024 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2025 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2026 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2027 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2028 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2029 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2030 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2031 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2032
2033 .global asm_uaccess_end /* Just a marker */
2034asm_uaccess_end:
2035
2036
2037
2038
2039/*
2040 * --- .text.init Section
2041 */
2042
2043 .section .text.init, "ax"
2044
2045/*
2046 * void trap_init (void)
2047 *
2048 */
2049 .global trap_init
2050trap_init:
2051 addi SP, -24, SP /* Room to save r28/r29/r30 */
2052 st.q SP, 0, r28
2053 st.q SP, 8, r29
2054 st.q SP, 16, r30
2055
2056 /* Set VBR and RESVEC */
2057 movi LVBR_block, r19
2058 andi r19, -4, r19 /* reset MMUOFF + reserved */
2059 /* For RESVEC exceptions we force the MMU off, which means we need the
2060 physical address. */
2061 movi LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
2062 andi r20, -4, r20 /* reset reserved */
2063 ori r20, 1, r20 /* set MMUOFF */
2064 putcon r19, VBR
2065 putcon r20, RESVEC
2066
2067 /* Sanity check */
2068 movi LVBR_block_end, r21
2069 andi r21, -4, r21
2070 movi BLOCK_SIZE, r29 /* r29 = expected size */
2071 or r19, ZERO, r30
2072 add r19, r29, r19
2073
2074 /*
2075 * Ugly, but better loop forever now than crash afterwards.
2076 * We should print a message, but if we touch LVBR or
2077 * LRESVEC blocks we should not be surprised if we get stuck
2078 * in trap_init().
2079 */
2080 pta trap_init_loop, tr1
2081 gettr tr1, r28 /* r28 = trap_init_loop */
2082 sub r21, r30, r30 /* r30 = actual size */
2083
2084 /*
2085 * VBR/RESVEC handlers overlap by being bigger than
2086 * allowed. Very bad. Just loop forever.
2087 * (r28) panic/loop address
2088 * (r29) expected size
2089 * (r30) actual size
2090 */
2091trap_init_loop:
2092 bne r19, r21, tr1
2093
2094 /* Now that exception vectors are set up reset SR.BL */
2095 getcon SR, r22
2096 movi SR_UNBLOCK_EXC, r23
2097 and r22, r23, r22
2098 putcon r22, SR
2099
2100 addi SP, 24, SP
2101 ptabs LINK, tr0
2102 blink tr0, ZERO
2103
diff --git a/arch/sh64/kernel/fpu.c b/arch/sh64/kernel/fpu.c
new file mode 100644
index 000000000000..8ad4ed6a6c9b
--- /dev/null
+++ b/arch/sh64/kernel/fpu.c
@@ -0,0 +1,170 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/fpu.c
7 *
8 * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
9 * Copyright (C) 2002 STMicroelectronics Limited
10 * Author : Stuart Menefy
11 *
12 * Started from SH4 version:
13 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
14 *
15 */
16
17#include <linux/sched.h>
18#include <linux/signal.h>
19#include <asm/processor.h>
20#include <asm/user.h>
21#include <asm/io.h>
22
23/*
24 * Initially load the FPU with signalling NANS. This bit pattern
25 * has the property that no matter whether considered as single or as
26 * double precision, it still represents a signalling NAN.
27 */
28#define sNAN64 0xFFFFFFFFFFFFFFFFULL
29#define sNAN32 0xFFFFFFFFUL
30
31static union sh_fpu_union init_fpuregs = {
32 .hard = {
33 .fp_regs = { [0 ... 63] = sNAN32 },
34 .fpscr = FPSCR_INIT
35 }
36};
37
38inline void fpsave(struct sh_fpu_hard_struct *fpregs)
39{
40 asm volatile("fst.p %0, (0*8), fp0\n\t"
41 "fst.p %0, (1*8), fp2\n\t"
42 "fst.p %0, (2*8), fp4\n\t"
43 "fst.p %0, (3*8), fp6\n\t"
44 "fst.p %0, (4*8), fp8\n\t"
45 "fst.p %0, (5*8), fp10\n\t"
46 "fst.p %0, (6*8), fp12\n\t"
47 "fst.p %0, (7*8), fp14\n\t"
48 "fst.p %0, (8*8), fp16\n\t"
49 "fst.p %0, (9*8), fp18\n\t"
50 "fst.p %0, (10*8), fp20\n\t"
51 "fst.p %0, (11*8), fp22\n\t"
52 "fst.p %0, (12*8), fp24\n\t"
53 "fst.p %0, (13*8), fp26\n\t"
54 "fst.p %0, (14*8), fp28\n\t"
55 "fst.p %0, (15*8), fp30\n\t"
56 "fst.p %0, (16*8), fp32\n\t"
57 "fst.p %0, (17*8), fp34\n\t"
58 "fst.p %0, (18*8), fp36\n\t"
59 "fst.p %0, (19*8), fp38\n\t"
60 "fst.p %0, (20*8), fp40\n\t"
61 "fst.p %0, (21*8), fp42\n\t"
62 "fst.p %0, (22*8), fp44\n\t"
63 "fst.p %0, (23*8), fp46\n\t"
64 "fst.p %0, (24*8), fp48\n\t"
65 "fst.p %0, (25*8), fp50\n\t"
66 "fst.p %0, (26*8), fp52\n\t"
67 "fst.p %0, (27*8), fp54\n\t"
68 "fst.p %0, (28*8), fp56\n\t"
69 "fst.p %0, (29*8), fp58\n\t"
70 "fst.p %0, (30*8), fp60\n\t"
71 "fst.p %0, (31*8), fp62\n\t"
72
73 "fgetscr fr63\n\t"
74 "fst.s %0, (32*8), fr63\n\t"
75 : /* no output */
76 : "r" (fpregs)
77 : "memory");
78}
79
80
81static inline void
82fpload(struct sh_fpu_hard_struct *fpregs)
83{
84 asm volatile("fld.p %0, (0*8), fp0\n\t"
85 "fld.p %0, (1*8), fp2\n\t"
86 "fld.p %0, (2*8), fp4\n\t"
87 "fld.p %0, (3*8), fp6\n\t"
88 "fld.p %0, (4*8), fp8\n\t"
89 "fld.p %0, (5*8), fp10\n\t"
90 "fld.p %0, (6*8), fp12\n\t"
91 "fld.p %0, (7*8), fp14\n\t"
92 "fld.p %0, (8*8), fp16\n\t"
93 "fld.p %0, (9*8), fp18\n\t"
94 "fld.p %0, (10*8), fp20\n\t"
95 "fld.p %0, (11*8), fp22\n\t"
96 "fld.p %0, (12*8), fp24\n\t"
97 "fld.p %0, (13*8), fp26\n\t"
98 "fld.p %0, (14*8), fp28\n\t"
99 "fld.p %0, (15*8), fp30\n\t"
100 "fld.p %0, (16*8), fp32\n\t"
101 "fld.p %0, (17*8), fp34\n\t"
102 "fld.p %0, (18*8), fp36\n\t"
103 "fld.p %0, (19*8), fp38\n\t"
104 "fld.p %0, (20*8), fp40\n\t"
105 "fld.p %0, (21*8), fp42\n\t"
106 "fld.p %0, (22*8), fp44\n\t"
107 "fld.p %0, (23*8), fp46\n\t"
108 "fld.p %0, (24*8), fp48\n\t"
109 "fld.p %0, (25*8), fp50\n\t"
110 "fld.p %0, (26*8), fp52\n\t"
111 "fld.p %0, (27*8), fp54\n\t"
112 "fld.p %0, (28*8), fp56\n\t"
113 "fld.p %0, (29*8), fp58\n\t"
114 "fld.p %0, (30*8), fp60\n\t"
115
116 "fld.s %0, (32*8), fr63\n\t"
117 "fputscr fr63\n\t"
118
119 "fld.p %0, (31*8), fp62\n\t"
120 : /* no output */
121 : "r" (fpregs) );
122}
123
124void fpinit(struct sh_fpu_hard_struct *fpregs)
125{
126 *fpregs = init_fpuregs.hard;
127}
128
129asmlinkage void
130do_fpu_error(unsigned long ex, struct pt_regs *regs)
131{
132 struct task_struct *tsk = current;
133
134 regs->pc += 4;
135
136 tsk->thread.trap_no = 11;
137 tsk->thread.error_code = 0;
138 force_sig(SIGFPE, tsk);
139}
140
141
142asmlinkage void
143do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
144{
145 void die(const char *str, struct pt_regs *regs, long err);
146
147 if (! user_mode(regs))
148 die("FPU used in kernel", regs, ex);
149
150 regs->sr &= ~SR_FD;
151
152 if (last_task_used_math == current)
153 return;
154
155 grab_fpu();
156 if (last_task_used_math != NULL) {
157 /* Other processes fpu state, save away */
158 fpsave(&last_task_used_math->thread.fpu.hard);
159 }
160 last_task_used_math = current;
161 if (used_math()) {
162 fpload(&current->thread.fpu.hard);
163 } else {
164 /* First time FPU user. */
165 fpload(&init_fpuregs.hard);
166 set_used_math();
167 }
168 release_fpu();
169}
170
diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S
new file mode 100644
index 000000000000..cc0b628a9ea7
--- /dev/null
+++ b/arch/sh64/kernel/head.S
@@ -0,0 +1,373 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/head.S
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 *
11 *
12 * benedict.gaster@superh.com: 2nd May 2002
13 * Moved definition of empty_zero_page to its own section allowing
14 * it to be placed at an absolute address known at load time.
15 *
16 * lethal@linux-sh.org: 9th May 2003
17 * Kill off GLOBAL_NAME() usage.
18 *
19 * lethal@linux-sh.org: 8th May 2004
20 * Add early SCIF console DTLB mapping.
21 */
22
23#include <linux/config.h>
24
25#include <asm/page.h>
26#include <asm/mmu_context.h>
27#include <asm/cache.h>
28#include <asm/tlb.h>
29#include <asm/processor.h>
30#include <asm/registers.h>
31#include <asm/thread_info.h>
32
33/*
34 * MMU defines: TLB boundaries.
35 */
36
37#define MMUIR_FIRST ITLB_FIXED
38#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
39#define MMUIR_STEP TLB_STEP
40
41#define MMUDR_FIRST DTLB_FIXED
42#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
43#define MMUDR_STEP TLB_STEP
44
45/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
46#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
47#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
48#endif
49
50/*
51 * MMU defines: Fixed TLBs.
52 */
53/* Deal safely with the case where the base of RAM is not 512Mb aligned */
54
55#define ALIGN_512M_MASK (0xffffffffe0000000)
56#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
57#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
58
59#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
60 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
61
62#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
63 /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
64
65#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
66 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
67#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
68 /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
69
70#ifdef CONFIG_ICACHE_DISABLED
71#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
72#else
73#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
74#endif
75#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
76
77#if defined (CONFIG_DCACHE_DISABLED)
78#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
79#elif defined (CONFIG_DCACHE_WRITE_THROUGH)
80#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
81 /* WT, invalidate */
82#elif defined (CONFIG_DCACHE_WRITE_BACK)
83#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
84 /* WB, invalidate */
85#else
86#error preprocessor flag CONFIG_DCACHE_... not recognized!
87#endif
88
89#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
90
91 .section .empty_zero_page, "aw"
92 .global empty_zero_page
93
94empty_zero_page:
95 .long 1 /* MOUNT_ROOT_RDONLY */
96 .long 0 /* RAMDISK_FLAGS */
97 .long 0x0200 /* ORIG_ROOT_DEV */
98 .long 1 /* LOADER_TYPE */
99 .long 0x00800000 /* INITRD_START */
100 .long 0x00800000 /* INITRD_SIZE */
101 .long 0
102
103 .text
104 .balign 4096,0,4096
105
106 .section .data, "aw"
107 .balign PAGE_SIZE
108
109 .section .data, "aw"
110 .balign PAGE_SIZE
111
112 .global swapper_pg_dir
113swapper_pg_dir:
114 .space PAGE_SIZE, 0
115
116 .global empty_bad_page
117empty_bad_page:
118 .space PAGE_SIZE, 0
119
120 .global empty_bad_pte_table
121empty_bad_pte_table:
122 .space PAGE_SIZE, 0
123
124 .global fpu_in_use
125fpu_in_use: .quad 0
126
127
128 .section .text, "ax"
129 .balign L1_CACHE_BYTES
130/*
131 * Condition at the entry of __stext:
132 * . Reset state:
133 * . SR.FD = 1 (FPU disabled)
134 * . SR.BL = 1 (Exceptions disabled)
135 * . SR.MD = 1 (Privileged Mode)
136 * . SR.MMU = 0 (MMU Disabled)
137 * . SR.CD = 0 (CTC User Visible)
138 * . SR.IMASK = Undefined (Interrupt Mask)
139 *
140 * Operations supposed to be performed by __stext:
141 * . prevent speculative fetch onto device memory while MMU is off
142 * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
143 * . first, save CPU state and set it to something harmless
144 * . any CPU detection and/or endianness settings (?)
145 * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
146 * . set initial TLB entries for cached and uncached regions
147 * (no fine granularity paging)
148 * . set initial cache state
149 * . enable MMU and caches
150 * . set CPU to a consistent state
151 * . registers (including stack pointer and current/KCR0)
152 * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
153 * at this stage. This is all to later Linux initialization steps.
154 * . initialize FPU
155 * . clear BSS
156 * . jump into start_kernel()
157 * . be prepared to hopeless start_kernel() returns.
158 *
159 */
160 .global _stext
161_stext:
162 /*
163 * Prevent speculative fetch on device memory due to
164 * uninitialized target registers.
165 */
166 ptabs/u ZERO, tr0
167 ptabs/u ZERO, tr1
168 ptabs/u ZERO, tr2
169 ptabs/u ZERO, tr3
170 ptabs/u ZERO, tr4
171 ptabs/u ZERO, tr5
172 ptabs/u ZERO, tr6
173 ptabs/u ZERO, tr7
174 synci
175
176 /*
177 * Read/Set CPU state. After this block:
178 * r29 = Initial SR
179 */
180 getcon SR, r29
181 movi SR_HARMLESS, r20
182 putcon r20, SR
183
184 /*
185 * Initialize EMI/LMI. To Be Done.
186 */
187
188 /*
189 * CPU detection and/or endianness settings (?). To Be Done.
190 * Pure PIC code here, please ! Just save state into r30.
191 * After this block:
192 * r30 = CPU type/Platform Endianness
193 */
194
195 /*
196 * Set initial TLB entries for cached and uncached regions.
197 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
198 */
199 /* Clear ITLBs */
200 pta clear_ITLB, tr1
201 movi MMUIR_FIRST, r21
202 movi MMUIR_END, r22
203clear_ITLB:
204 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
205 addi r21, MMUIR_STEP, r21
206 bne r21, r22, tr1
207
208 /* Clear DTLBs */
209 pta clear_DTLB, tr1
210 movi MMUDR_FIRST, r21
211 movi MMUDR_END, r22
212clear_DTLB:
213 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
214 addi r21, MMUDR_STEP, r21
215 bne r21, r22, tr1
216
217 /* Map one big (512Mb) page for ITLB */
218 movi MMUIR_FIRST, r21
219 movi MMUIR_TEXT_L, r22 /* PTEL first */
220 add.l r22, r63, r22 /* Sign extend */
221 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
222 movi MMUIR_TEXT_H, r22 /* PTEH last */
223 add.l r22, r63, r22 /* Sign extend */
224 putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
225
226 /* Map one big CACHED (512Mb) page for DTLB */
227 movi MMUDR_FIRST, r21
228 movi MMUDR_CACHED_L, r22 /* PTEL first */
229 add.l r22, r63, r22 /* Sign extend */
230 putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
231 movi MMUDR_CACHED_H, r22 /* PTEH last */
232 add.l r22, r63, r22 /* Sign extend */
233 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
234
235#ifdef CONFIG_EARLY_PRINTK
236 /*
237 * Setup a DTLB translation for SCIF phys.
238 */
239 addi r21, MMUDR_STEP, r21
240 movi 0x0a03, r22 /* SCIF phys */
241 shori 0x0148, r22
242 putcfg r21, 1, r22 /* PTEL first */
243 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
244 shori 0x0003, r22
245 putcfg r21, 0, r22 /* PTEH last */
246#endif
247
248 /*
249 * Set cache behaviours.
250 */
251 /* ICache */
252 movi ICCR_BASE, r21
253 movi ICCR0_INIT_VAL, r22
254 movi ICCR1_INIT_VAL, r23
255 putcfg r21, ICCR_REG0, r22
256 putcfg r21, ICCR_REG1, r23
257
258 /* OCache */
259 movi OCCR_BASE, r21
260 movi OCCR0_INIT_VAL, r22
261 movi OCCR1_INIT_VAL, r23
262 putcfg r21, OCCR_REG0, r22
263 putcfg r21, OCCR_REG1, r23
264
265
266 /*
267 * Enable Caches and MMU. Do the first non-PIC jump.
268 * Now head.S global variables, constants and externs
269 * can be used.
270 */
271 getcon SR, r21
272 movi SR_ENABLE_MMU, r22
273 or r21, r22, r21
274 putcon r21, SSR
275 movi hyperspace, r22
276 ori r22, 1, r22 /* Make it SHmedia, not required but..*/
277 putcon r22, SPC
278 synco
279 rte /* And now go into the hyperspace ... */
280hyperspace: /* ... that's the next instruction ! */
281
282 /*
283 * Set CPU to a consistent state.
284 * r31 = FPU support flag
285 * tr0/tr7 in use. Others give a chance to loop somewhere safe
286 */
287 movi start_kernel, r32
288 ori r32, 1, r32
289
290 ptabs r32, tr0 /* r32 = _start_kernel address */
291 pta/u hopeless, tr1
292 pta/u hopeless, tr2
293 pta/u hopeless, tr3
294 pta/u hopeless, tr4
295 pta/u hopeless, tr5
296 pta/u hopeless, tr6
297 pta/u hopeless, tr7
298 gettr tr1, r28 /* r28 = hopeless address */
299
300 /* Set initial stack pointer */
301 movi init_thread_union, SP
302 putcon SP, KCR0 /* Set current to init_task */
303 movi THREAD_SIZE, r22 /* Point to the end */
304 add SP, r22, SP
305
306 /*
307 * Initialize FPU.
308 * Keep FPU flag in r31. After this block:
309 * r31 = FPU flag
310 */
311 movi fpu_in_use, r31 /* Temporary */
312
313#ifdef CONFIG_SH_FPU
314 getcon SR, r21
315 movi SR_ENABLE_FPU, r22
316 and r21, r22, r22
317 putcon r22, SR /* Try to enable */
318 getcon SR, r22
319 xor r21, r22, r21
320 shlri r21, 15, r21 /* Supposedly 0/1 */
321 st.q r31, 0 , r21 /* Set fpu_in_use */
322#else
323 movi 0, r21
324 st.q r31, 0 , r21 /* Set fpu_in_use */
325#endif
326 or r21, ZERO, r31 /* Set FPU flag at last */
327
328#ifndef CONFIG_SH_NO_BSS_INIT
329/* Don't clear BSS if running on slow platforms such as an RTL simulation,
330 remote memory via SHdebug link, etc. For these the memory can be guaranteed
331 to be all zero on boot anyway. */
332 /*
333 * Clear bss
334 */
335 pta clear_quad, tr1
336 movi __bss_start, r22
337 movi _end, r23
338clear_quad:
339 st.q r22, 0, ZERO
340 addi r22, 8, r22
341 bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
342#endif
343 pta/u hopeless, tr1
344
345 /* Say bye to head.S but be prepared to wrongly get back ... */
346 blink tr0, LINK
347
348 /* If we ever get back here through LINK/tr1-tr7 */
349 pta/u hopeless, tr7
350
351hopeless:
352 /*
353 * Something's badly wrong here. Loop endlessly,
354 * there's nothing more we can do about it.
355 *
356 * Note on hopeless: it can be jumped into invariably
357 * before or after jumping into hyperspace. The only
358 * requirement is to be PIC called (PTA) before and
359 * any way (PTA/PTABS) after. According to Virtual
360 * to Physical mapping a simulator/emulator can easily
361 * tell where we came here from just looking at hopeless
362 * (PC) address.
363 *
364 * For debugging purposes:
365 * (r28) hopeless/loop address
366 * (r29) Original SR
367 * (r30) CPU type/Platform endianness
368 * (r31) FPU Support
369 * (r32) _start_kernel address
370 */
371 blink tr7, ZERO
372
373
diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c
new file mode 100644
index 000000000000..de2d07db1f88
--- /dev/null
+++ b/arch/sh64/kernel/init_task.c
@@ -0,0 +1,46 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/init_task.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 */
12#include <linux/rwsem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/init_task.h>
16#include <linux/mqueue.h>
17
18#include <asm/uaccess.h>
19#include <asm/pgtable.h>
20
21static struct fs_struct init_fs = INIT_FS;
22static struct files_struct init_files = INIT_FILES;
23static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
24static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
25struct mm_struct init_mm = INIT_MM(init_mm);
26
27struct pt_regs fake_swapper_regs;
28
29/*
30 * Initial thread structure.
31 *
32 * We need to make sure that this is THREAD_SIZE-byte aligned due
33 * to the way process stacks are handled. This is done by having a
34 * special "init_task" linker map entry..
35 */
36union thread_union init_thread_union
37 __attribute__((__section__(".data.init_task"))) =
38 { INIT_THREAD_INFO(init_task) };
39
40/*
41 * Initial task structure.
42 *
43 * All other task structs will be allocated on slabs in fork.c
44 */
45struct task_struct init_task = INIT_TASK(init_task);
46
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
new file mode 100644
index 000000000000..9fc2b71dbd84
--- /dev/null
+++ b/arch/sh64/kernel/irq.c
@@ -0,0 +1,116 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/irq.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 */
12
13/*
14 * IRQs are in fact implemented a bit like signal handlers for the kernel.
15 * Naturally it's not a 1:1 relation, but there are similarities.
16 */
17
18#include <linux/config.h>
19#include <linux/errno.h>
20#include <linux/kernel_stat.h>
21#include <linux/signal.h>
22#include <linux/rwsem.h>
23#include <linux/sched.h>
24#include <linux/ioport.h>
25#include <linux/interrupt.h>
26#include <linux/timex.h>
27#include <linux/slab.h>
28#include <linux/random.h>
29#include <linux/smp.h>
30#include <linux/smp_lock.h>
31#include <linux/init.h>
32#include <linux/seq_file.h>
33#include <linux/bitops.h>
34#include <asm/system.h>
35#include <asm/io.h>
36#include <asm/smp.h>
37#include <asm/pgalloc.h>
38#include <asm/delay.h>
39#include <asm/irq.h>
40#include <linux/irq.h>
41
42void ack_bad_irq(unsigned int irq)
43{
44 printk("unexpected IRQ trap at irq %02x\n", irq);
45}
46
47#if defined(CONFIG_PROC_FS)
48int show_interrupts(struct seq_file *p, void *v)
49{
50 int i = *(loff_t *) v, j;
51 struct irqaction * action;
52 unsigned long flags;
53
54 if (i == 0) {
55 seq_puts(p, " ");
56 for (j=0; j<NR_CPUS; j++)
57 if (cpu_online(j))
58 seq_printf(p, "CPU%d ",j);
59 seq_putc(p, '\n');
60 }
61
62 if (i < NR_IRQS) {
63 spin_lock_irqsave(&irq_desc[i].lock, flags);
64 action = irq_desc[i].action;
65 if (!action)
66 goto unlock;
67 seq_printf(p, "%3d: ",i);
68 seq_printf(p, "%10u ", kstat_irqs(i));
69 seq_printf(p, " %14s", irq_desc[i].handler->typename);
70 seq_printf(p, " %s", action->name);
71
72 for (action=action->next; action; action = action->next)
73 seq_printf(p, ", %s", action->name);
74 seq_putc(p, '\n');
75unlock:
76 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
77 }
78 return 0;
79}
80#endif
81
82/*
83 * do_NMI handles all Non-Maskable Interrupts.
84 */
85asmlinkage void do_NMI(unsigned long vector_num, struct pt_regs * regs)
86{
87 if (regs->sr & 0x40000000)
88 printk("unexpected NMI trap in system mode\n");
89 else
90 printk("unexpected NMI trap in user mode\n");
91
92 /* No statistics */
93}
94
95/*
96 * do_IRQ handles all normal device IRQ's.
97 */
98asmlinkage int do_IRQ(unsigned long vector_num, struct pt_regs * regs)
99{
100 int irq;
101
102 irq_enter();
103
104 irq = irq_demux(vector_num);
105
106 if (irq >= 0) {
107 __do_IRQ(irq, regs);
108 } else {
109 printk("unexpected IRQ trap at vector %03lx\n", vector_num);
110 }
111
112 irq_exit();
113
114 return 1;
115}
116
diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c
new file mode 100644
index 000000000000..43f88f3a78b0
--- /dev/null
+++ b/arch/sh64/kernel/irq_intc.c
@@ -0,0 +1,272 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/irq_intc.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 * Interrupt Controller support for SH5 INTC.
12 * Per-interrupt selective. IRLM=0 (Fixed priority) is not
13 * supported being useless without a cascaded interrupt
14 * controller.
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/init.h>
20#include <linux/irq.h>
21#include <linux/kernel.h>
22#include <linux/stddef.h>
23#include <linux/bitops.h> /* this includes also <asm/registers.h */
24 /* which is required to remap register */
25 /* names used into __asm__ blocks... */
26
27#include <asm/hardware.h>
28#include <asm/platform.h>
29#include <asm/page.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32
33/*
34 * Maybe the generic Peripheral block could move to a more
35 * generic include file. INTC Block will be defined here
36 * and only here to make INTC self-contained in a single
37 * file.
38 */
39#define INTC_BLOCK_OFFSET 0x01000000
40
41/* Base */
42#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
43 INTC_BLOCK_OFFSET
44
45/* Address */
46#define INTC_ICR_SET (intc_virt + 0x0)
47#define INTC_ICR_CLEAR (intc_virt + 0x8)
48#define INTC_INTPRI_0 (intc_virt + 0x10)
49#define INTC_INTSRC_0 (intc_virt + 0x50)
50#define INTC_INTSRC_1 (intc_virt + 0x58)
51#define INTC_INTREQ_0 (intc_virt + 0x60)
52#define INTC_INTREQ_1 (intc_virt + 0x68)
53#define INTC_INTENB_0 (intc_virt + 0x70)
54#define INTC_INTENB_1 (intc_virt + 0x78)
55#define INTC_INTDSB_0 (intc_virt + 0x80)
56#define INTC_INTDSB_1 (intc_virt + 0x88)
57
58#define INTC_ICR_IRLM 0x1
59#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
60#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
61
62
63/*
64 * Mapper between the vector ordinal and the IRQ number
65 * passed to kernel/device drivers.
66 */
67int intc_evt_to_irq[(0xE20/0x20)+1] = {
68 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
69 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
70 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
71 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
72 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
73 -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
74 -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
75 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
76 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
77 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
78 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
79 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
80 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
81 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
82 -1, -1 /* 0xE00 - 0xE20 */
83};
84
85/*
86 * Opposite mapper.
87 */
88static int IRQ_to_vectorN[NR_INTC_IRQS] = {
89 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
90 -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
91 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
92 -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
93 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
94 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
95 -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
96 -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
97
98};
99
100static unsigned long intc_virt;
101
102static unsigned int startup_intc_irq(unsigned int irq);
103static void shutdown_intc_irq(unsigned int irq);
104static void enable_intc_irq(unsigned int irq);
105static void disable_intc_irq(unsigned int irq);
106static void mask_and_ack_intc(unsigned int);
107static void end_intc_irq(unsigned int irq);
108
109static struct hw_interrupt_type intc_irq_type = {
110 "INTC",
111 startup_intc_irq,
112 shutdown_intc_irq,
113 enable_intc_irq,
114 disable_intc_irq,
115 mask_and_ack_intc,
116 end_intc_irq
117};
118
119static int irlm; /* IRL mode */
120
121static unsigned int startup_intc_irq(unsigned int irq)
122{
123 enable_intc_irq(irq);
124 return 0; /* never anything pending */
125}
126
127static void shutdown_intc_irq(unsigned int irq)
128{
129 disable_intc_irq(irq);
130}
131
132static void enable_intc_irq(unsigned int irq)
133{
134 unsigned long reg;
135 unsigned long bitmask;
136
137 if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
138 printk("Trying to use straight IRL0-3 with an encoding platform.\n");
139
140 if (irq < 32) {
141 reg = INTC_INTENB_0;
142 bitmask = 1 << irq;
143 } else {
144 reg = INTC_INTENB_1;
145 bitmask = 1 << (irq - 32);
146 }
147
148 ctrl_outl(bitmask, reg);
149}
150
151static void disable_intc_irq(unsigned int irq)
152{
153 unsigned long reg;
154 unsigned long bitmask;
155
156 if (irq < 32) {
157 reg = INTC_INTDSB_0;
158 bitmask = 1 << irq;
159 } else {
160 reg = INTC_INTDSB_1;
161 bitmask = 1 << (irq - 32);
162 }
163
164 ctrl_outl(bitmask, reg);
165}
166
167static void mask_and_ack_intc(unsigned int irq)
168{
169 disable_intc_irq(irq);
170}
171
172static void end_intc_irq(unsigned int irq)
173{
174 enable_intc_irq(irq);
175}
176
177/* For future use, if we ever support IRLM=0) */
178void make_intc_irq(unsigned int irq)
179{
180 disable_irq_nosync(irq);
181 irq_desc[irq].handler = &intc_irq_type;
182 disable_intc_irq(irq);
183}
184
185#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
186int intc_irq_describe(char* p, int irq)
187{
188 if (irq < NR_INTC_IRQS)
189 return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
190 else
191 return 0;
192}
193#endif
194
195void __init init_IRQ(void)
196{
197 unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
198 unsigned long reg;
199 unsigned long data;
200 int i;
201
202 intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
203 if (!intc_virt) {
204 panic("Unable to remap INTC\n");
205 }
206
207
208 /* Set default: per-line enable/disable, priority driven ack/eoi */
209 for (i = 0; i < NR_INTC_IRQS; i++) {
210 if (platform_int_priority[i] != NO_PRIORITY) {
211 irq_desc[i].handler = &intc_irq_type;
212 }
213 }
214
215
216 /* Disable all interrupts and set all priorities to 0 to avoid trouble */
217 ctrl_outl(-1, INTC_INTDSB_0);
218 ctrl_outl(-1, INTC_INTDSB_1);
219
220 for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
221 ctrl_outl( NO_PRIORITY, reg);
222
223
224 /* Set IRLM */
225 /* If all the priorities are set to 'no priority', then
226 * assume we are using encoded mode.
227 */
228 irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
229 platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
230
231 if (irlm == NO_PRIORITY) {
232 /* IRLM = 0 */
233 reg = INTC_ICR_CLEAR;
234 i = IRQ_INTA;
235 printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
236 } else {
237 /* IRLM = 1 */
238 reg = INTC_ICR_SET;
239 i = IRQ_IRL0;
240 }
241 ctrl_outl(INTC_ICR_IRLM, reg);
242
243 /* Set interrupt priorities according to platform description */
244 for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
245 data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
246 if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
247 /* Upon the 7th, set Priority Register */
248 ctrl_outl(data, reg);
249 data = 0;
250 reg += 8;
251 }
252 }
253
254#ifdef CONFIG_SH_CAYMAN
255 {
256 extern void init_cayman_irq(void);
257
258 init_cayman_irq();
259 }
260#endif
261
262 /*
263 * And now let interrupts come in.
264 * sti() is not enough, we need to
265 * lower priority, too.
266 */
267 __asm__ __volatile__("getcon " __SR ", %0\n\t"
268 "and %0, %1, %0\n\t"
269 "putcon %0, " __SR "\n\t"
270 : "=&r" (__dummy0)
271 : "r" (__dummy1));
272}
diff --git a/arch/sh64/kernel/led.c b/arch/sh64/kernel/led.c
new file mode 100644
index 000000000000..cf993c4a9fdc
--- /dev/null
+++ b/arch/sh64/kernel/led.c
@@ -0,0 +1,41 @@
1/*
2 * arch/sh64/kernel/led.c
3 *
4 * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Flash the LEDs
10 */
11#include <linux/config.h>
12#include <linux/stddef.h>
13#include <linux/sched.h>
14
15void mach_led(int pos, int val);
16
17/* acts like an actual heart beat -- ie thump-thump-pause... */
18void heartbeat(void)
19{
20 static unsigned int cnt = 0, period = 0, dist = 0;
21
22 if (cnt == 0 || cnt == dist) {
23 mach_led(-1, 1);
24 } else if (cnt == 7 || cnt == dist + 7) {
25 mach_led(-1, 0);
26 }
27
28 if (++cnt > period) {
29 cnt = 0;
30
31 /*
32 * The hyperbolic function below modifies the heartbeat period
33 * length in dependency of the current (5min) load. It goes
34 * through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30.
35 */
36 period = ((672 << FSHIFT) / (5 * avenrun[0] +
37 (7 << FSHIFT))) + 30;
38 dist = period / 4;
39 }
40}
41
diff --git a/arch/sh64/kernel/module.c b/arch/sh64/kernel/module.c
new file mode 100644
index 000000000000..2598f6b88b44
--- /dev/null
+++ b/arch/sh64/kernel/module.c
@@ -0,0 +1,161 @@
1/* Kernel module help for sh64.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; either version 2 of the License, or
6 (at your option) any later version.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16
17 Copyright 2004 SuperH (UK) Ltd
18 Author: Richard Curnow
19
20 Based on the sh version, and on code from the sh64-specific parts of
21 modutils, originally written by Richard Curnow and Ben Gaster.
22
23*/
24#include <linux/moduleloader.h>
25#include <linux/elf.h>
26#include <linux/vmalloc.h>
27#include <linux/fs.h>
28#include <linux/string.h>
29#include <linux/kernel.h>
30
31#if 0
32#define DEBUGP printk
33#else
34#define DEBUGP(fmt...)
35#endif
36
37void *module_alloc(unsigned long size)
38{
39 if (size == 0)
40 return NULL;
41 return vmalloc(size);
42}
43
44
45/* Free memory returned from module_alloc */
46void module_free(struct module *mod, void *module_region)
47{
48 vfree(module_region);
49 /* FIXME: If module_region == mod->init_region, trim exception
50 table entries. */
51}
52
53/* We don't need anything special. */
54int module_frob_arch_sections(Elf_Ehdr *hdr,
55 Elf_Shdr *sechdrs,
56 char *secstrings,
57 struct module *mod)
58{
59 return 0;
60}
61
62int apply_relocate_add(Elf32_Shdr *sechdrs,
63 const char *strtab,
64 unsigned int symindex,
65 unsigned int relsec,
66 struct module *me)
67{
68 unsigned int i;
69 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
70 Elf32_Sym *sym;
71 Elf32_Addr relocation;
72 uint32_t *location;
73 int align;
74 int is_shmedia;
75
76 DEBUGP("Applying relocate section %u to %u\n", relsec,
77 sechdrs[relsec].sh_info);
78 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
79 /* This is where to make the change */
80 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
81 + rel[i].r_offset;
82 /* This is the symbol it is referring to. Note that all
83 undefined symbols have been resolved. */
84 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
85 + ELF32_R_SYM(rel[i].r_info);
86 relocation = sym->st_value + rel[i].r_addend;
87 align = (int)location & 3;
88
89 /* For text addresses, bit2 of the st_other field indicates
90 * whether the symbol is SHmedia (1) or SHcompact (0). If
91 * SHmedia, the LSB of the symbol needs to be asserted
92 * for the CPU to be in SHmedia mode when it starts executing
93 * the branch target. */
94 is_shmedia = (sym->st_other & 4) ? 1 : 0;
95 if (is_shmedia) {
96 relocation |= 1;
97 }
98
99 switch (ELF32_R_TYPE(rel[i].r_info)) {
100 case R_SH_DIR32:
101 DEBUGP("R_SH_DIR32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
102 *location += relocation;
103 break;
104 case R_SH_REL32:
105 DEBUGP("R_SH_REL32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
106 relocation -= (Elf32_Addr) location;
107 *location += relocation;
108 break;
109 case R_SH_IMM_LOW16:
110 DEBUGP("R_SH_IMM_LOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
111 *location = (*location & ~0x3fffc00) |
112 ((relocation & 0xffff) << 10);
113 break;
114 case R_SH_IMM_MEDLOW16:
115 DEBUGP("R_SH_IMM_MEDLOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
116 *location = (*location & ~0x3fffc00) |
117 (((relocation >> 16) & 0xffff) << 10);
118 break;
119 case R_SH_IMM_LOW16_PCREL:
120 DEBUGP("R_SH_IMM_LOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
121 relocation -= (Elf32_Addr) location;
122 *location = (*location & ~0x3fffc00) |
123 ((relocation & 0xffff) << 10);
124 break;
125 case R_SH_IMM_MEDLOW16_PCREL:
126 DEBUGP("R_SH_IMM_MEDLOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
127 relocation -= (Elf32_Addr) location;
128 *location = (*location & ~0x3fffc00) |
129 (((relocation >> 16) & 0xffff) << 10);
130 break;
131 default:
132 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
133 me->name, ELF32_R_TYPE(rel[i].r_info));
134 return -ENOEXEC;
135 }
136 }
137 return 0;
138}
139
140int apply_relocate(Elf32_Shdr *sechdrs,
141 const char *strtab,
142 unsigned int symindex,
143 unsigned int relsec,
144 struct module *me)
145{
146 printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
147 me->name);
148 return -ENOEXEC;
149}
150
151int module_finalize(const Elf_Ehdr *hdr,
152 const Elf_Shdr *sechdrs,
153 struct module *me)
154{
155 return 0;
156}
157
158void module_arch_cleanup(struct module *mod)
159{
160}
161
diff --git a/arch/sh64/kernel/pci-dma.c b/arch/sh64/kernel/pci-dma.c
new file mode 100644
index 000000000000..a36c3d71a3fe
--- /dev/null
+++ b/arch/sh64/kernel/pci-dma.c
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
3 * Copyright (C) 2003 Paul Mundt (lethal@linux-sh.org)
4 *
5 * May be copied or modified under the terms of the GNU General Public
6 * License. See linux/COPYING for more information.
7 *
8 * Dynamic DMA mapping support.
9 */
10#include <linux/types.h>
11#include <linux/mm.h>
12#include <linux/string.h>
13#include <linux/pci.h>
14#include <asm/io.h>
15
16void *consistent_alloc(struct pci_dev *hwdev, size_t size,
17 dma_addr_t *dma_handle)
18{
19 void *ret;
20 int gfp = GFP_ATOMIC;
21 void *vp;
22
23 if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
24 gfp |= GFP_DMA;
25
26 ret = (void *)__get_free_pages(gfp, get_order(size));
27
28 /* now call our friend ioremap_nocache to give us an uncached area */
29 vp = ioremap_nocache(virt_to_phys(ret), size);
30
31 if (vp != NULL) {
32 memset(vp, 0, size);
33 *dma_handle = virt_to_bus(ret);
34 dma_cache_wback_inv((unsigned long)ret, size);
35 }
36
37 return vp;
38}
39
40void consistent_free(struct pci_dev *hwdev, size_t size,
41 void *vaddr, dma_addr_t dma_handle)
42{
43 void *alloc;
44
45 alloc = bus_to_virt((unsigned long)dma_handle);
46 free_pages((unsigned long)alloc, get_order(size));
47
48 iounmap(vaddr);
49}
50
diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c
new file mode 100644
index 000000000000..6197879e8578
--- /dev/null
+++ b/arch/sh64/kernel/pci_sh5.c
@@ -0,0 +1,541 @@
1/*
2 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
3 * Copyright (C) 2003, 2004 Paul Mundt
4 * Copyright (C) 2004 Richard Curnow
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Support functions for the SH5 PCI hardware.
10 */
11
12#include <linux/config.h>
13#include <linux/kernel.h>
14#include <linux/rwsem.h>
15#include <linux/smp.h>
16#include <linux/smp_lock.h>
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/types.h>
23#include <asm/pci.h>
24#include <linux/irq.h>
25
26#include <asm/io.h>
27#include <asm/hardware.h>
28#include "pci_sh5.h"
29
30static unsigned long pcicr_virt;
31unsigned long pciio_virt;
32
33static void __init pci_fixup_ide_bases(struct pci_dev *d)
34{
35 int i;
36
37 /*
38 * PCI IDE controllers use non-standard I/O port decoding, respect it.
39 */
40 if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
41 return;
42 printk("PCI: IDE base address fixup for %s\n", pci_name(d));
43 for(i=0; i<4; i++) {
44 struct resource *r = &d->resource[i];
45 if ((r->start & ~0x80) == 0x374) {
46 r->start |= 2;
47 r->end = r->start;
48 }
49 }
50}
51DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
52
53char * __init pcibios_setup(char *str)
54{
55 return str;
56}
57
58/* Rounds a number UP to the nearest power of two. Used for
59 * sizing the PCI window.
60 */
61static u32 __init r2p2(u32 num)
62{
63 int i = 31;
64 u32 tmp = num;
65
66 if (num == 0)
67 return 0;
68
69 do {
70 if (tmp & (1 << 31))
71 break;
72 i--;
73 tmp <<= 1;
74 } while (i >= 0);
75
76 tmp = 1 << i;
77 /* If the original number isn't a power of 2, round it up */
78 if (tmp != num)
79 tmp <<= 1;
80
81 return tmp;
82}
83
84extern unsigned long long memory_start, memory_end;
85
86int __init sh5pci_init(unsigned memStart, unsigned memSize)
87{
88 u32 lsr0;
89 u32 uval;
90
91 pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR");
92 if (!pcicr_virt) {
93 panic("Unable to remap PCICR\n");
94 }
95
96 pciio_virt = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO");
97 if (!pciio_virt) {
98 panic("Unable to remap PCIIO\n");
99 }
100
101 pr_debug("Register base addres is 0x%08lx\n", pcicr_virt);
102
103 /* Clear snoop registers */
104 SH5PCI_WRITE(CSCR0, 0);
105 SH5PCI_WRITE(CSCR1, 0);
106
107 pr_debug("Wrote to reg\n");
108
109 /* Switch off interrupts */
110 SH5PCI_WRITE(INTM, 0);
111 SH5PCI_WRITE(AINTM, 0);
112 SH5PCI_WRITE(PINTM, 0);
113
114 /* Set bus active, take it out of reset */
115 uval = SH5PCI_READ(CR);
116
117 /* Set command Register */
118 SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM);
119
120 uval=SH5PCI_READ(CR);
121 pr_debug("CR is actually 0x%08x\n",uval);
122
123 /* Allow it to be a master */
124 /* NB - WE DISABLE I/O ACCESS to stop overlap */
125 /* set WAIT bit to enable stepping, an attempt to improve stability */
126 SH5PCI_WRITE_SHORT(CSR_CMD,
127 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT);
128
129 /*
130 ** Set translation mapping memory in order to convert the address
131 ** used for the main bus, to the PCI internal address.
132 */
133 SH5PCI_WRITE(MBR,0x40000000);
134
135 /* Always set the max size 512M */
136 SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
137
138 /*
139 ** I/O addresses are mapped at internal PCI specific address
140 ** as is described into the configuration bridge table.
141 ** These are changed to 0, to allow cards that have legacy
142 ** io such as vga to function correctly. We set the SH5 IOBAR to
143 ** 256K, which is a bit big as we can only have 64K of address space
144 */
145
146 SH5PCI_WRITE(IOBR,0x0);
147
148 pr_debug("PCI:Writing 0x%08x to IOBR\n",0);
149
150 /* Set up a 256K window. Totally pointless waste of address space */
151 SH5PCI_WRITE(IOBMR,0);
152 pr_debug("PCI:Writing 0x%08x to IOBMR\n",0);
153
154 /* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. Ideally,
155 * we would want to map the I/O region somewhere, but it is so big this is not
156 * that easy!
157 */
158 SH5PCI_WRITE(CSR_IBAR0,~0);
159 /* Set memory size value */
160 memSize = memory_end - memory_start;
161
162 /* Now we set up the mbars so the PCI bus can see the memory of the machine */
163 if (memSize < (1024 * 1024)) {
164 printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%x?\n", memSize);
165 return -EINVAL;
166 }
167
168 /* Set LSR 0 */
169 lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1);
170 SH5PCI_WRITE(LSR0, lsr0);
171
172 pr_debug("PCI:Writing 0x%08x to LSR0\n",lsr0);
173
174 /* Set MBAR 0 */
175 SH5PCI_WRITE(CSR_MBAR0, memory_start);
176 SH5PCI_WRITE(LAR0, memory_start);
177
178 SH5PCI_WRITE(CSR_MBAR1,0);
179 SH5PCI_WRITE(LAR1,0);
180 SH5PCI_WRITE(LSR1,0);
181
182 pr_debug("PCI:Writing 0x%08llx to CSR_MBAR0\n",memory_start);
183 pr_debug("PCI:Writing 0x%08llx to LAR0\n",memory_start);
184
185 /* Enable the PCI interrupts on the device */
186 SH5PCI_WRITE(INTM, ~0);
187 SH5PCI_WRITE(AINTM, ~0);
188 SH5PCI_WRITE(PINTM, ~0);
189
190 pr_debug("Switching on all error interrupts\n");
191
192 return(0);
193}
194
195static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
196 int size, u32 *val)
197{
198 SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
199
200 switch (size) {
201 case 1:
202 *val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
203 break;
204 case 2:
205 *val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
206 break;
207 case 4:
208 *val = SH5PCI_READ(PDR);
209 break;
210 }
211
212 return PCIBIOS_SUCCESSFUL;
213}
214
215static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
216 int size, u32 val)
217{
218 SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
219
220 switch (size) {
221 case 1:
222 SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
223 break;
224 case 2:
225 SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
226 break;
227 case 4:
228 SH5PCI_WRITE(PDR, val);
229 break;
230 }
231
232 return PCIBIOS_SUCCESSFUL;
233}
234
235static struct pci_ops pci_config_ops = {
236 .read = sh5pci_read,
237 .write = sh5pci_write,
238};
239
240/* Everything hangs off this */
241static struct pci_bus *pci_root_bus;
242
243
244static u8 __init no_swizzle(struct pci_dev *dev, u8 * pin)
245{
246 pr_debug("swizzle for dev %d on bus %d slot %d pin is %d\n",
247 dev->devfn,dev->bus->number, PCI_SLOT(dev->devfn),*pin);
248 return PCI_SLOT(dev->devfn);
249}
250
251static inline u8 bridge_swizzle(u8 pin, u8 slot)
252{
253 return (((pin-1) + slot) % 4) + 1;
254}
255
256u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp)
257{
258 if (dev->bus->number != 0) {
259 u8 pin = *pinp;
260 do {
261 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
262 /* Move up the chain of bridges. */
263 dev = dev->bus->self;
264 } while (dev->bus->self);
265 *pinp = pin;
266
267 /* The slot is the slot of the last bridge. */
268 }
269
270 return PCI_SLOT(dev->devfn);
271}
272
273/* This needs to be shunted out of here into the board specific bit */
274
275static int __init map_cayman_irq(struct pci_dev *dev, u8 slot, u8 pin)
276{
277 int result = -1;
278
279 /* The complication here is that the PCI IRQ lines from the Cayman's 2
280 5V slots get into the CPU via a different path from the IRQ lines
281 from the 3 3.3V slots. Thus, we have to detect whether the card's
282 interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
283 at the point where we cross from 5V to 3.3V is not the normal case.
284
285 The added complication is that we don't know that the 5V slots are
286 always bus 2, because a card containing a PCI-PCI bridge may be
287 plugged into a 3.3V slot, and this changes the bus numbering.
288
289 Also, the Cayman has an intermediate PCI bus that goes a custom
290 expansion board header (and to the secondary bridge). This bus has
291 never been used in practice.
292
293 The 1ary onboard PCI-PCI bridge is device 3 on bus 0
294 The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge.
295 */
296
297 struct slot_pin {
298 int slot;
299 int pin;
300 } path[4];
301 int i=0;
302
303 while (dev->bus->number > 0) {
304
305 slot = path[i].slot = PCI_SLOT(dev->devfn);
306 pin = path[i].pin = bridge_swizzle(pin, slot);
307 dev = dev->bus->self;
308 i++;
309 if (i > 3) panic("PCI path to root bus too long!\n");
310 }
311
312 slot = PCI_SLOT(dev->devfn);
313 /* This is the slot on bus 0 through which the device is eventually
314 reachable. */
315
316 /* Now work back up. */
317 if ((slot < 3) || (i == 0)) {
318 /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
319 swizzle now. */
320 result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
321 } else {
322 i--;
323 slot = path[i].slot;
324 pin = path[i].pin;
325 if (slot > 0) {
326 panic("PCI expansion bus device found - not handled!\n");
327 } else {
328 if (i > 0) {
329 /* 5V slots */
330 i--;
331 slot = path[i].slot;
332 pin = path[i].pin;
333 /* 'pin' was swizzled earlier wrt slot, don't do it again. */
334 result = IRQ_P2INTA + (pin - 1);
335 } else {
336 /* IRQ for 2ary PCI-PCI bridge : unused */
337 result = -1;
338 }
339 }
340 }
341
342 return result;
343}
344
345irqreturn_t pcish5_err_irq(int irq, void *dev_id, struct pt_regs *regs)
346{
347 unsigned pci_int, pci_air, pci_cir, pci_aint;
348
349 pci_int = SH5PCI_READ(INT);
350 pci_cir = SH5PCI_READ(CIR);
351 pci_air = SH5PCI_READ(AIR);
352
353 if (pci_int) {
354 printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
355 printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
356 printk("PCI AIR -> 0x%x\n", pci_air);
357 printk("PCI CIR -> 0x%x\n", pci_cir);
358 SH5PCI_WRITE(INT, ~0);
359 }
360
361 pci_aint = SH5PCI_READ(AINT);
362 if (pci_aint) {
363 printk("PCI ARB INTERRUPT!\n");
364 printk("PCI AINT -> 0x%x\n", pci_aint);
365 printk("PCI AIR -> 0x%x\n", pci_air);
366 printk("PCI CIR -> 0x%x\n", pci_cir);
367 SH5PCI_WRITE(AINT, ~0);
368 }
369
370 return IRQ_HANDLED;
371}
372
373irqreturn_t pcish5_serr_irq(int irq, void *dev_id, struct pt_regs *regs)
374{
375 printk("SERR IRQ\n");
376
377 return IRQ_NONE;
378}
379
380#define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1))
381
382static void __init
383pcibios_size_bridge(struct pci_bus *bus, struct resource *ior,
384 struct resource *memr)
385{
386 struct resource io_res, mem_res;
387 struct pci_dev *dev;
388 struct pci_dev *bridge = bus->self;
389 struct list_head *ln;
390
391 if (!bridge)
392 return; /* host bridge, nothing to do */
393
394 /* set reasonable default locations for pcibios_align_resource */
395 io_res.start = PCIBIOS_MIN_IO;
396 mem_res.start = PCIBIOS_MIN_MEM;
397
398 io_res.end = io_res.start;
399 mem_res.end = mem_res.start;
400
401 /* Collect information about how our direct children are layed out. */
402 for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
403 int i;
404 dev = pci_dev_b(ln);
405
406 /* Skip bridges for now */
407 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
408 continue;
409
410 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
411 struct resource res;
412 unsigned long size;
413
414 memcpy(&res, &dev->resource[i], sizeof(res));
415 size = res.end - res.start + 1;
416
417 if (res.flags & IORESOURCE_IO) {
418 res.start = io_res.end;
419 pcibios_align_resource(dev, &res, size, 0);
420 io_res.end = res.start + size;
421 } else if (res.flags & IORESOURCE_MEM) {
422 res.start = mem_res.end;
423 pcibios_align_resource(dev, &res, size, 0);
424 mem_res.end = res.start + size;
425 }
426 }
427 }
428
429 /* And for all of the subordinate busses. */
430 for (ln=bus->children.next; ln != &bus->children; ln=ln->next)
431 pcibios_size_bridge(pci_bus_b(ln), &io_res, &mem_res);
432
433 /* turn the ending locations into sizes (subtract start) */
434 io_res.end -= io_res.start;
435 mem_res.end -= mem_res.start;
436
437 /* Align the sizes up by bridge rules */
438 io_res.end = ROUND_UP(io_res.end, 4*1024) - 1;
439 mem_res.end = ROUND_UP(mem_res.end, 1*1024*1024) - 1;
440
441 /* Adjust the bridge's allocation requirements */
442 bridge->resource[0].end = bridge->resource[0].start + io_res.end;
443 bridge->resource[1].end = bridge->resource[1].start + mem_res.end;
444
445 bridge->resource[PCI_BRIDGE_RESOURCES].end =
446 bridge->resource[PCI_BRIDGE_RESOURCES].start + io_res.end;
447 bridge->resource[PCI_BRIDGE_RESOURCES+1].end =
448 bridge->resource[PCI_BRIDGE_RESOURCES+1].start + mem_res.end;
449
450 /* adjust parent's resource requirements */
451 if (ior) {
452 ior->end = ROUND_UP(ior->end, 4*1024);
453 ior->end += io_res.end;
454 }
455
456 if (memr) {
457 memr->end = ROUND_UP(memr->end, 1*1024*1024);
458 memr->end += mem_res.end;
459 }
460}
461
462#undef ROUND_UP
463
464static void __init pcibios_size_bridges(void)
465{
466 struct resource io_res, mem_res;
467
468 memset(&io_res, 0, sizeof(io_res));
469 memset(&mem_res, 0, sizeof(mem_res));
470
471 pcibios_size_bridge(pci_root_bus, &io_res, &mem_res);
472}
473
474static int __init pcibios_init(void)
475{
476 if (request_irq(IRQ_ERR, pcish5_err_irq,
477 SA_INTERRUPT, "PCI Error",NULL) < 0) {
478 printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
479 return -EINVAL;
480 }
481
482 if (request_irq(IRQ_SERR, pcish5_serr_irq,
483 SA_INTERRUPT, "PCI SERR interrupt", NULL) < 0) {
484 printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
485 return -EINVAL;
486 }
487
488 /* The pci subsytem needs to know where memory is and how much
489 * of it there is. I've simply made these globals. A better mechanism
490 * is probably needed.
491 */
492 sh5pci_init(__pa(memory_start),
493 __pa(memory_end) - __pa(memory_start));
494
495 pci_root_bus = pci_scan_bus(0, &pci_config_ops, NULL);
496 pcibios_size_bridges();
497 pci_assign_unassigned_resources();
498 pci_fixup_irqs(no_swizzle, map_cayman_irq);
499
500 return 0;
501}
502
503subsys_initcall(pcibios_init);
504
505void __init pcibios_fixup_bus(struct pci_bus *bus)
506{
507 struct pci_dev *dev = bus->self;
508 int i;
509
510#if 1
511 if(dev) {
512 for(i=0; i<3; i++) {
513 bus->resource[i] =
514 &dev->resource[PCI_BRIDGE_RESOURCES+i];
515 bus->resource[i]->name = bus->name;
516 }
517 bus->resource[0]->flags |= IORESOURCE_IO;
518 bus->resource[1]->flags |= IORESOURCE_MEM;
519
520 /* For now, propagate host limits to the bus;
521 * we'll adjust them later. */
522
523#if 1
524 bus->resource[0]->end = 64*1024 - 1 ;
525 bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1;
526 bus->resource[0]->start = PCIBIOS_MIN_IO;
527 bus->resource[1]->start = PCIBIOS_MIN_MEM;
528#else
529 bus->resource[0]->end = 0
530 bus->resource[1]->end = 0
531 bus->resource[0]->start =0
532 bus->resource[1]->start = 0;
533#endif
534 /* Turn off downstream PF memory address range by default */
535 bus->resource[2]->start = 1024*1024;
536 bus->resource[2]->end = bus->resource[2]->start - 1;
537 }
538#endif
539
540}
541
diff --git a/arch/sh64/kernel/pci_sh5.h b/arch/sh64/kernel/pci_sh5.h
new file mode 100644
index 000000000000..8f21f5d2aa3e
--- /dev/null
+++ b/arch/sh64/kernel/pci_sh5.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
3 *
4 * May be copied or modified under the terms of the GNU General Public
5 * License. See linux/COPYING for more information.
6 *
7 * Defintions for the SH5 PCI hardware.
8 */
9
10/* Product ID */
11#define PCISH5_PID 0x350d
12
13/* vendor ID */
14#define PCISH5_VID 0x1054
15
16/* Configuration types */
17#define ST_TYPE0 0x00 /* Configuration cycle type 0 */
18#define ST_TYPE1 0x01 /* Configuration cycle type 1 */
19
20/* VCR data */
21#define PCISH5_VCR_STATUS 0x00
22#define PCISH5_VCR_VERSION 0x08
23
24/*
25** ICR register offsets and bits
26*/
27#define PCISH5_ICR_CR 0x100 /* PCI control register values */
28#define CR_PBAM (1<<12)
29#define CR_PFCS (1<<11)
30#define CR_FTO (1<<10)
31#define CR_PFE (1<<9)
32#define CR_TBS (1<<8)
33#define CR_SPUE (1<<7)
34#define CR_BMAM (1<<6)
35#define CR_HOST (1<<5)
36#define CR_CLKEN (1<<4)
37#define CR_SOCS (1<<3)
38#define CR_IOCS (1<<2)
39#define CR_RSTCTL (1<<1)
40#define CR_CFINT (1<<0)
41#define CR_LOCK_MASK 0xa5000000
42
43#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */
44#define INT_MADIM (1<<2)
45
46#define PCISH5_ICR_LSR0 0X104 /* Local space register values */
47#define PCISH5_ICR_LSR1 0X108 /* Local space register values */
48#define PCISH5_ICR_LAR0 0x10c /* Local address register values */
49#define PCISH5_ICR_LAR1 0x110 /* Local address register values */
50#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */
51#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */
52#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */
53#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */
54#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */
55#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */
56#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */
57#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */
58#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */
59#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */
60#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */
61#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */
62#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */
63#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */
64#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */
65#define PCISH5_ICR_PDR 0x220 /* Pio data register values */
66
67/* These are configs space registers */
68#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */
69#define PCISH5_ICR_CSR_DID 0x002 /* Device id */
70#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */
71#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */
72#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */
73#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */
74#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */
75
76
77
78/* Base address of registers */
79#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
80#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000)
81/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */
82
83/* Register selection macro */
84#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x))
85/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
86
87/* Write I/O functions */
88#define SH5PCI_WRITE(reg,val) ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
89#define SH5PCI_WRITE_SHORT(reg,val) ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
90#define SH5PCI_WRITE_BYTE(reg,val) ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
91
92/* Read I/O functions */
93#define SH5PCI_READ(reg) ctrl_inl(PCISH5_ICR_REG(reg))
94#define SH5PCI_READ_SHORT(reg) ctrl_inw(PCISH5_ICR_REG(reg))
95#define SH5PCI_READ_BYTE(reg) ctrl_inb(PCISH5_ICR_REG(reg))
96
97/* Set PCI config bits */
98#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
99
100/* Set PCI command register */
101#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where)
102
103/* Size converters */
104#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
105#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)
106
107
diff --git a/arch/sh64/kernel/pcibios.c b/arch/sh64/kernel/pcibios.c
new file mode 100644
index 000000000000..50c61dcb9fae
--- /dev/null
+++ b/arch/sh64/kernel/pcibios.c
@@ -0,0 +1,168 @@
1/*
2 * $Id: pcibios.c,v 1.1 2001/08/24 12:38:19 dwmw2 Exp $
3 *
4 * arch/sh/kernel/pcibios.c
5 *
6 * Copyright (C) 2002 STMicroelectronics Limited
7 * Author : David J. McKay
8 *
9 * Copyright (C) 2004 Richard Curnow, SuperH UK Limited
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 * This is GPL'd.
15 *
16 * Provided here are generic versions of:
17 * pcibios_update_resource()
18 * pcibios_align_resource()
19 * pcibios_enable_device()
20 * pcibios_set_master()
21 * pcibios_update_irq()
22 *
23 * These functions are collected here to reduce duplication of common
24 * code amongst the many platform-specific PCI support code files.
25 *
26 * Platform-specific files are expected to provide:
27 * pcibios_fixup_bus()
28 * pcibios_init()
29 * pcibios_setup()
30 * pcibios_fixup_pbus_ranges()
31 */
32
33#include <linux/kernel.h>
34#include <linux/pci.h>
35#include <linux/init.h>
36
37void
38pcibios_update_resource(struct pci_dev *dev, struct resource *root,
39 struct resource *res, int resource)
40{
41 u32 new, check;
42 int reg;
43
44 new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
45 if (resource < 6) {
46 reg = PCI_BASE_ADDRESS_0 + 4*resource;
47 } else if (resource == PCI_ROM_RESOURCE) {
48 res->flags |= IORESOURCE_ROM_ENABLE;
49 new |= PCI_ROM_ADDRESS_ENABLE;
50 reg = dev->rom_base_reg;
51 } else {
52 /* Somebody might have asked allocation of a non-standard resource */
53 return;
54 }
55
56 pci_write_config_dword(dev, reg, new);
57 pci_read_config_dword(dev, reg, &check);
58 if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
59 printk(KERN_ERR "PCI: Error while updating region "
60 "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
61 new, check);
62 }
63}
64
65/*
66 * We need to avoid collisions with `mirrored' VGA ports
67 * and other strange ISA hardware, so we always want the
68 * addresses to be allocated in the 0x000-0x0ff region
69 * modulo 0x400.
70 */
71void pcibios_align_resource(void *data, struct resource *res,
72 unsigned long size, unsigned long align)
73{
74 if (res->flags & IORESOURCE_IO) {
75 unsigned long start = res->start;
76
77 if (start & 0x300) {
78 start = (start + 0x3ff) & ~0x3ff;
79 res->start = start;
80 }
81 }
82}
83
84static void pcibios_enable_bridge(struct pci_dev *dev)
85{
86 struct pci_bus *bus = dev->subordinate;
87 u16 cmd, old_cmd;
88
89 pci_read_config_word(dev, PCI_COMMAND, &cmd);
90 old_cmd = cmd;
91
92 if (bus->resource[0]->flags & IORESOURCE_IO) {
93 cmd |= PCI_COMMAND_IO;
94 }
95 if ((bus->resource[1]->flags & IORESOURCE_MEM) ||
96 (bus->resource[2]->flags & IORESOURCE_PREFETCH)) {
97 cmd |= PCI_COMMAND_MEMORY;
98 }
99
100 if (cmd != old_cmd) {
101 pci_write_config_word(dev, PCI_COMMAND, cmd);
102 }
103
104 printk("PCI bridge %s, command register -> %04x\n",
105 pci_name(dev), cmd);
106
107}
108
109
110
111int pcibios_enable_device(struct pci_dev *dev, int mask)
112{
113 u16 cmd, old_cmd;
114 int idx;
115 struct resource *r;
116
117 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
118 pcibios_enable_bridge(dev);
119 }
120
121 pci_read_config_word(dev, PCI_COMMAND, &cmd);
122 old_cmd = cmd;
123 for(idx=0; idx<6; idx++) {
124 if (!(mask & (1 << idx)))
125 continue;
126 r = &dev->resource[idx];
127 if (!r->start && r->end) {
128 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
129 return -EINVAL;
130 }
131 if (r->flags & IORESOURCE_IO)
132 cmd |= PCI_COMMAND_IO;
133 if (r->flags & IORESOURCE_MEM)
134 cmd |= PCI_COMMAND_MEMORY;
135 }
136 if (dev->resource[PCI_ROM_RESOURCE].start)
137 cmd |= PCI_COMMAND_MEMORY;
138 if (cmd != old_cmd) {
139 printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
140 pci_write_config_word(dev, PCI_COMMAND, cmd);
141 }
142 return 0;
143}
144
145/*
146 * If we set up a device for bus mastering, we need to check and set
147 * the latency timer as it may not be properly set.
148 */
149unsigned int pcibios_max_latency = 255;
150
151void pcibios_set_master(struct pci_dev *dev)
152{
153 u8 lat;
154 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
155 if (lat < 16)
156 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
157 else if (lat > pcibios_max_latency)
158 lat = pcibios_max_latency;
159 else
160 return;
161 printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
162 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
163}
164
165void __init pcibios_update_irq(struct pci_dev *dev, int irq)
166{
167 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
168}
diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c
new file mode 100644
index 000000000000..efde41c0cd66
--- /dev/null
+++ b/arch/sh64/kernel/process.c
@@ -0,0 +1,962 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/process.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 * Started from SH3/4 version:
13 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
14 *
15 * In turn started from i386 version:
16 * Copyright (C) 1995 Linus Torvalds
17 *
18 */
19
20/*
21 * This file handles the architecture-dependent parts of process handling..
22 */
23
24/* Temporary flags/tests. All to be removed/undefined. BEGIN */
25#define IDLE_TRACE
26#define VM_SHOW_TABLES
27#define VM_TEST_FAULT
28#define VM_TEST_RTLBMISS
29#define VM_TEST_WTLBMISS
30
31#undef VM_SHOW_TABLES
32#undef IDLE_TRACE
33/* Temporary flags/tests. All to be removed/undefined. END */
34
35#define __KERNEL_SYSCALLS__
36#include <stdarg.h>
37
38#include <linux/config.h>
39#include <linux/kernel.h>
40#include <linux/rwsem.h>
41#include <linux/mm.h>
42#include <linux/smp.h>
43#include <linux/smp_lock.h>
44#include <linux/ptrace.h>
45#include <linux/slab.h>
46#include <linux/vmalloc.h>
47#include <linux/user.h>
48#include <linux/a.out.h>
49#include <linux/interrupt.h>
50#include <linux/unistd.h>
51#include <linux/delay.h>
52#include <linux/reboot.h>
53#include <linux/init.h>
54
55#include <asm/uaccess.h>
56#include <asm/pgtable.h>
57#include <asm/system.h>
58#include <asm/io.h>
59#include <asm/processor.h> /* includes also <asm/registers.h> */
60#include <asm/mmu_context.h>
61#include <asm/elf.h>
62#include <asm/page.h>
63
64#include <linux/irq.h>
65
66struct task_struct *last_task_used_math = NULL;
67
68#ifdef IDLE_TRACE
69#ifdef VM_SHOW_TABLES
70/* For testing */
71static void print_PTE(long base)
72{
73 int i, skip=0;
74 long long x, y, *p = (long long *) base;
75
76 for (i=0; i< 512; i++, p++){
77 if (*p == 0) {
78 if (!skip) {
79 skip++;
80 printk("(0s) ");
81 }
82 } else {
83 skip=0;
84 x = (*p) >> 32;
85 y = (*p) & 0xffffffff;
86 printk("%08Lx%08Lx ", x, y);
87 if (!((i+1)&0x3)) printk("\n");
88 }
89 }
90}
91
92/* For testing */
93static void print_DIR(long base)
94{
95 int i, skip=0;
96 long *p = (long *) base;
97
98 for (i=0; i< 512; i++, p++){
99 if (*p == 0) {
100 if (!skip) {
101 skip++;
102 printk("(0s) ");
103 }
104 } else {
105 skip=0;
106 printk("%08lx ", *p);
107 if (!((i+1)&0x7)) printk("\n");
108 }
109 }
110}
111
112/* For testing */
113static void print_vmalloc_first_tables(void)
114{
115
116#define PRESENT 0x800 /* Bit 11 */
117
118 /*
119 * Do it really dirty by looking at raw addresses,
120 * raw offsets, no types. If we used pgtable/pgalloc
121 * macros/definitions we could hide potential bugs.
122 *
123 * Note that pointers are 32-bit for CDC.
124 */
125 long pgdt, pmdt, ptet;
126
127 pgdt = (long) &swapper_pg_dir;
128 printk("-->PGD (0x%08lx):\n", pgdt);
129 print_DIR(pgdt);
130 printk("\n");
131
132 /* VMALLOC pool is mapped at 0xc0000000, second (pointer) entry in PGD */
133 pgdt += 4;
134 pmdt = (long) (* (long *) pgdt);
135 if (!(pmdt & PRESENT)) {
136 printk("No PMD\n");
137 return;
138 } else pmdt &= 0xfffff000;
139
140 printk("-->PMD (0x%08lx):\n", pmdt);
141 print_DIR(pmdt);
142 printk("\n");
143
144 /* Get the pmdt displacement for 0xc0000000 */
145 pmdt += 2048;
146
147 /* just look at first two address ranges ... */
148 /* ... 0xc0000000 ... */
149 ptet = (long) (* (long *) pmdt);
150 if (!(ptet & PRESENT)) {
151 printk("No PTE0\n");
152 return;
153 } else ptet &= 0xfffff000;
154
155 printk("-->PTE0 (0x%08lx):\n", ptet);
156 print_PTE(ptet);
157 printk("\n");
158
159 /* ... 0xc0001000 ... */
160 ptet += 4;
161 if (!(ptet & PRESENT)) {
162 printk("No PTE1\n");
163 return;
164 } else ptet &= 0xfffff000;
165 printk("-->PTE1 (0x%08lx):\n", ptet);
166 print_PTE(ptet);
167 printk("\n");
168}
169#else
170#define print_vmalloc_first_tables()
171#endif /* VM_SHOW_TABLES */
172
173static void test_VM(void)
174{
175 void *a, *b, *c;
176
177#ifdef VM_SHOW_TABLES
178 printk("Initial PGD/PMD/PTE\n");
179#endif
180 print_vmalloc_first_tables();
181
182 printk("Allocating 2 bytes\n");
183 a = vmalloc(2);
184 print_vmalloc_first_tables();
185
186 printk("Allocating 4100 bytes\n");
187 b = vmalloc(4100);
188 print_vmalloc_first_tables();
189
190 printk("Allocating 20234 bytes\n");
191 c = vmalloc(20234);
192 print_vmalloc_first_tables();
193
194#ifdef VM_TEST_FAULT
195 /* Here you may want to fault ! */
196
197#ifdef VM_TEST_RTLBMISS
198 printk("Ready to fault upon read.\n");
199 if (* (char *) a) {
200 printk("RTLBMISSed on area a !\n");
201 }
202 printk("RTLBMISSed on area a !\n");
203#endif
204
205#ifdef VM_TEST_WTLBMISS
206 printk("Ready to fault upon write.\n");
207 *((char *) b) = 'L';
208 printk("WTLBMISSed on area b !\n");
209#endif
210
211#endif /* VM_TEST_FAULT */
212
213 printk("Deallocating the 4100 byte chunk\n");
214 vfree(b);
215 print_vmalloc_first_tables();
216
217 printk("Deallocating the 2 byte chunk\n");
218 vfree(a);
219 print_vmalloc_first_tables();
220
221 printk("Deallocating the last chunk\n");
222 vfree(c);
223 print_vmalloc_first_tables();
224}
225
226extern unsigned long volatile jiffies;
227int once = 0;
228unsigned long old_jiffies;
229int pid = -1, pgid = -1;
230
231void idle_trace(void)
232{
233
234 _syscall0(int, getpid)
235 _syscall1(int, getpgid, int, pid)
236
237 if (!once) {
238 /* VM allocation/deallocation simple test */
239 test_VM();
240 pid = getpid();
241
242 printk("Got all through to Idle !!\n");
243 printk("I'm now going to loop forever ...\n");
244 printk("Any ! below is a timer tick.\n");
245 printk("Any . below is a getpgid system call from pid = %d.\n", pid);
246
247
248 old_jiffies = jiffies;
249 once++;
250 }
251
252 if (old_jiffies != jiffies) {
253 old_jiffies = jiffies - old_jiffies;
254 switch (old_jiffies) {
255 case 1:
256 printk("!");
257 break;
258 case 2:
259 printk("!!");
260 break;
261 case 3:
262 printk("!!!");
263 break;
264 case 4:
265 printk("!!!!");
266 break;
267 default:
268 printk("(%d!)", (int) old_jiffies);
269 }
270 old_jiffies = jiffies;
271 }
272 pgid = getpgid(pid);
273 printk(".");
274}
275#else
276#define idle_trace() do { } while (0)
277#endif /* IDLE_TRACE */
278
279static int hlt_counter = 1;
280
281#define HARD_IDLE_TIMEOUT (HZ / 3)
282
283void disable_hlt(void)
284{
285 hlt_counter++;
286}
287
288void enable_hlt(void)
289{
290 hlt_counter--;
291}
292
293static int __init nohlt_setup(char *__unused)
294{
295 hlt_counter = 1;
296 return 1;
297}
298
299static int __init hlt_setup(char *__unused)
300{
301 hlt_counter = 0;
302 return 1;
303}
304
305__setup("nohlt", nohlt_setup);
306__setup("hlt", hlt_setup);
307
308static inline void hlt(void)
309{
310 if (hlt_counter)
311 return;
312
313 __asm__ __volatile__ ("sleep" : : : "memory");
314}
315
316/*
317 * The idle loop on a uniprocessor SH..
318 */
319void default_idle(void)
320{
321 /* endless idle loop with no priority at all */
322 while (1) {
323 if (hlt_counter) {
324 while (1)
325 if (need_resched())
326 break;
327 } else {
328 local_irq_disable();
329 while (!need_resched()) {
330 local_irq_enable();
331 idle_trace();
332 hlt();
333 local_irq_disable();
334 }
335 local_irq_enable();
336 }
337 schedule();
338 }
339}
340
341void cpu_idle(void)
342{
343 default_idle();
344}
345
346void machine_restart(char * __unused)
347{
348 extern void phys_stext(void);
349
350 phys_stext();
351}
352
353void machine_halt(void)
354{
355 for (;;);
356}
357
358void machine_power_off(void)
359{
360 extern void enter_deep_standby(void);
361
362 enter_deep_standby();
363}
364
365void show_regs(struct pt_regs * regs)
366{
367 unsigned long long ah, al, bh, bl, ch, cl;
368
369 printk("\n");
370
371 ah = (regs->pc) >> 32;
372 al = (regs->pc) & 0xffffffff;
373 bh = (regs->regs[18]) >> 32;
374 bl = (regs->regs[18]) & 0xffffffff;
375 ch = (regs->regs[15]) >> 32;
376 cl = (regs->regs[15]) & 0xffffffff;
377 printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
378 ah, al, bh, bl, ch, cl);
379
380 ah = (regs->sr) >> 32;
381 al = (regs->sr) & 0xffffffff;
382 asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
383 asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
384 bh = (bh) >> 32;
385 bl = (bl) & 0xffffffff;
386 asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
387 asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
388 ch = (ch) >> 32;
389 cl = (cl) & 0xffffffff;
390 printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
391 ah, al, bh, bl, ch, cl);
392
393 ah = (regs->regs[0]) >> 32;
394 al = (regs->regs[0]) & 0xffffffff;
395 bh = (regs->regs[1]) >> 32;
396 bl = (regs->regs[1]) & 0xffffffff;
397 ch = (regs->regs[2]) >> 32;
398 cl = (regs->regs[2]) & 0xffffffff;
399 printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
400 ah, al, bh, bl, ch, cl);
401
402 ah = (regs->regs[3]) >> 32;
403 al = (regs->regs[3]) & 0xffffffff;
404 bh = (regs->regs[4]) >> 32;
405 bl = (regs->regs[4]) & 0xffffffff;
406 ch = (regs->regs[5]) >> 32;
407 cl = (regs->regs[5]) & 0xffffffff;
408 printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
409 ah, al, bh, bl, ch, cl);
410
411 ah = (regs->regs[6]) >> 32;
412 al = (regs->regs[6]) & 0xffffffff;
413 bh = (regs->regs[7]) >> 32;
414 bl = (regs->regs[7]) & 0xffffffff;
415 ch = (regs->regs[8]) >> 32;
416 cl = (regs->regs[8]) & 0xffffffff;
417 printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
418 ah, al, bh, bl, ch, cl);
419
420 ah = (regs->regs[9]) >> 32;
421 al = (regs->regs[9]) & 0xffffffff;
422 bh = (regs->regs[10]) >> 32;
423 bl = (regs->regs[10]) & 0xffffffff;
424 ch = (regs->regs[11]) >> 32;
425 cl = (regs->regs[11]) & 0xffffffff;
426 printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
427 ah, al, bh, bl, ch, cl);
428
429 ah = (regs->regs[12]) >> 32;
430 al = (regs->regs[12]) & 0xffffffff;
431 bh = (regs->regs[13]) >> 32;
432 bl = (regs->regs[13]) & 0xffffffff;
433 ch = (regs->regs[14]) >> 32;
434 cl = (regs->regs[14]) & 0xffffffff;
435 printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
436 ah, al, bh, bl, ch, cl);
437
438 ah = (regs->regs[16]) >> 32;
439 al = (regs->regs[16]) & 0xffffffff;
440 bh = (regs->regs[17]) >> 32;
441 bl = (regs->regs[17]) & 0xffffffff;
442 ch = (regs->regs[19]) >> 32;
443 cl = (regs->regs[19]) & 0xffffffff;
444 printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
445 ah, al, bh, bl, ch, cl);
446
447 ah = (regs->regs[20]) >> 32;
448 al = (regs->regs[20]) & 0xffffffff;
449 bh = (regs->regs[21]) >> 32;
450 bl = (regs->regs[21]) & 0xffffffff;
451 ch = (regs->regs[22]) >> 32;
452 cl = (regs->regs[22]) & 0xffffffff;
453 printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
454 ah, al, bh, bl, ch, cl);
455
456 ah = (regs->regs[23]) >> 32;
457 al = (regs->regs[23]) & 0xffffffff;
458 bh = (regs->regs[24]) >> 32;
459 bl = (regs->regs[24]) & 0xffffffff;
460 ch = (regs->regs[25]) >> 32;
461 cl = (regs->regs[25]) & 0xffffffff;
462 printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
463 ah, al, bh, bl, ch, cl);
464
465 ah = (regs->regs[26]) >> 32;
466 al = (regs->regs[26]) & 0xffffffff;
467 bh = (regs->regs[27]) >> 32;
468 bl = (regs->regs[27]) & 0xffffffff;
469 ch = (regs->regs[28]) >> 32;
470 cl = (regs->regs[28]) & 0xffffffff;
471 printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
472 ah, al, bh, bl, ch, cl);
473
474 ah = (regs->regs[29]) >> 32;
475 al = (regs->regs[29]) & 0xffffffff;
476 bh = (regs->regs[30]) >> 32;
477 bl = (regs->regs[30]) & 0xffffffff;
478 ch = (regs->regs[31]) >> 32;
479 cl = (regs->regs[31]) & 0xffffffff;
480 printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
481 ah, al, bh, bl, ch, cl);
482
483 ah = (regs->regs[32]) >> 32;
484 al = (regs->regs[32]) & 0xffffffff;
485 bh = (regs->regs[33]) >> 32;
486 bl = (regs->regs[33]) & 0xffffffff;
487 ch = (regs->regs[34]) >> 32;
488 cl = (regs->regs[34]) & 0xffffffff;
489 printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
490 ah, al, bh, bl, ch, cl);
491
492 ah = (regs->regs[35]) >> 32;
493 al = (regs->regs[35]) & 0xffffffff;
494 bh = (regs->regs[36]) >> 32;
495 bl = (regs->regs[36]) & 0xffffffff;
496 ch = (regs->regs[37]) >> 32;
497 cl = (regs->regs[37]) & 0xffffffff;
498 printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
499 ah, al, bh, bl, ch, cl);
500
501 ah = (regs->regs[38]) >> 32;
502 al = (regs->regs[38]) & 0xffffffff;
503 bh = (regs->regs[39]) >> 32;
504 bl = (regs->regs[39]) & 0xffffffff;
505 ch = (regs->regs[40]) >> 32;
506 cl = (regs->regs[40]) & 0xffffffff;
507 printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
508 ah, al, bh, bl, ch, cl);
509
510 ah = (regs->regs[41]) >> 32;
511 al = (regs->regs[41]) & 0xffffffff;
512 bh = (regs->regs[42]) >> 32;
513 bl = (regs->regs[42]) & 0xffffffff;
514 ch = (regs->regs[43]) >> 32;
515 cl = (regs->regs[43]) & 0xffffffff;
516 printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
517 ah, al, bh, bl, ch, cl);
518
519 ah = (regs->regs[44]) >> 32;
520 al = (regs->regs[44]) & 0xffffffff;
521 bh = (regs->regs[45]) >> 32;
522 bl = (regs->regs[45]) & 0xffffffff;
523 ch = (regs->regs[46]) >> 32;
524 cl = (regs->regs[46]) & 0xffffffff;
525 printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
526 ah, al, bh, bl, ch, cl);
527
528 ah = (regs->regs[47]) >> 32;
529 al = (regs->regs[47]) & 0xffffffff;
530 bh = (regs->regs[48]) >> 32;
531 bl = (regs->regs[48]) & 0xffffffff;
532 ch = (regs->regs[49]) >> 32;
533 cl = (regs->regs[49]) & 0xffffffff;
534 printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
535 ah, al, bh, bl, ch, cl);
536
537 ah = (regs->regs[50]) >> 32;
538 al = (regs->regs[50]) & 0xffffffff;
539 bh = (regs->regs[51]) >> 32;
540 bl = (regs->regs[51]) & 0xffffffff;
541 ch = (regs->regs[52]) >> 32;
542 cl = (regs->regs[52]) & 0xffffffff;
543 printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
544 ah, al, bh, bl, ch, cl);
545
546 ah = (regs->regs[53]) >> 32;
547 al = (regs->regs[53]) & 0xffffffff;
548 bh = (regs->regs[54]) >> 32;
549 bl = (regs->regs[54]) & 0xffffffff;
550 ch = (regs->regs[55]) >> 32;
551 cl = (regs->regs[55]) & 0xffffffff;
552 printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
553 ah, al, bh, bl, ch, cl);
554
555 ah = (regs->regs[56]) >> 32;
556 al = (regs->regs[56]) & 0xffffffff;
557 bh = (regs->regs[57]) >> 32;
558 bl = (regs->regs[57]) & 0xffffffff;
559 ch = (regs->regs[58]) >> 32;
560 cl = (regs->regs[58]) & 0xffffffff;
561 printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
562 ah, al, bh, bl, ch, cl);
563
564 ah = (regs->regs[59]) >> 32;
565 al = (regs->regs[59]) & 0xffffffff;
566 bh = (regs->regs[60]) >> 32;
567 bl = (regs->regs[60]) & 0xffffffff;
568 ch = (regs->regs[61]) >> 32;
569 cl = (regs->regs[61]) & 0xffffffff;
570 printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
571 ah, al, bh, bl, ch, cl);
572
573 ah = (regs->regs[62]) >> 32;
574 al = (regs->regs[62]) & 0xffffffff;
575 bh = (regs->tregs[0]) >> 32;
576 bl = (regs->tregs[0]) & 0xffffffff;
577 ch = (regs->tregs[1]) >> 32;
578 cl = (regs->tregs[1]) & 0xffffffff;
579 printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
580 ah, al, bh, bl, ch, cl);
581
582 ah = (regs->tregs[2]) >> 32;
583 al = (regs->tregs[2]) & 0xffffffff;
584 bh = (regs->tregs[3]) >> 32;
585 bl = (regs->tregs[3]) & 0xffffffff;
586 ch = (regs->tregs[4]) >> 32;
587 cl = (regs->tregs[4]) & 0xffffffff;
588 printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
589 ah, al, bh, bl, ch, cl);
590
591 ah = (regs->tregs[5]) >> 32;
592 al = (regs->tregs[5]) & 0xffffffff;
593 bh = (regs->tregs[6]) >> 32;
594 bl = (regs->tregs[6]) & 0xffffffff;
595 ch = (regs->tregs[7]) >> 32;
596 cl = (regs->tregs[7]) & 0xffffffff;
597 printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
598 ah, al, bh, bl, ch, cl);
599
600 /*
601 * If we're in kernel mode, dump the stack too..
602 */
603 if (!user_mode(regs)) {
604 void show_stack(struct task_struct *tsk, unsigned long *sp);
605 unsigned long sp = regs->regs[15] & 0xffffffff;
606 struct task_struct *tsk = get_current();
607
608 tsk->thread.kregs = regs;
609
610 show_stack(tsk, (unsigned long *)sp);
611 }
612}
613
614struct task_struct * alloc_task_struct(void)
615{
616 /* Get task descriptor pages */
617 return (struct task_struct *)
618 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
619}
620
621void free_task_struct(struct task_struct *p)
622{
623 free_pages((unsigned long) p, get_order(THREAD_SIZE));
624}
625
626/*
627 * Create a kernel thread
628 */
629
630/*
631 * This is the mechanism for creating a new kernel thread.
632 *
633 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
634 * who haven't done an "execve()") should use this: it will work within
635 * a system call from a "real" process, but the process memory space will
636 * not be free'd until both the parent and the child have exited.
637 */
638int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
639{
640 /* A bit less processor dependent than older sh ... */
641 unsigned int reply;
642
643static __inline__ _syscall2(int,clone,unsigned long,flags,unsigned long,newsp)
644static __inline__ _syscall1(int,exit,int,ret)
645
646 reply = clone(flags | CLONE_VM, 0);
647 if (!reply) {
648 /* Child */
649 reply = exit(fn(arg));
650 }
651
652 return reply;
653}
654
655/*
656 * Free current thread data structures etc..
657 */
658void exit_thread(void)
659{
660 /* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
661
662 The SH-5 FPU save/restore approach relies on last_task_used_math
663 pointing to a live task_struct. When another task tries to use the
664 FPU for the 1st time, the FPUDIS trap handling (see
665 arch/sh64/kernel/fpu.c) will save the existing FPU state to the
666 FP regs field within last_task_used_math before re-loading the new
667 task's FPU state (or initialising it if the FPU has been used
668 before). So if last_task_used_math is stale, and its page has already been
669 re-allocated for another use, the consequences are rather grim. Unless we
670 null it here, there is no other path through which it would get safely
671 nulled. */
672
673#ifdef CONFIG_SH_FPU
674 if (last_task_used_math == current) {
675 last_task_used_math = NULL;
676 }
677#endif
678}
679
680void flush_thread(void)
681{
682
683 /* Called by fs/exec.c (flush_old_exec) to remove traces of a
684 * previously running executable. */
685#ifdef CONFIG_SH_FPU
686 if (last_task_used_math == current) {
687 last_task_used_math = NULL;
688 }
689 /* Force FPU state to be reinitialised after exec */
690 clear_used_math();
691#endif
692
693 /* if we are a kernel thread, about to change to user thread,
694 * update kreg
695 */
696 if(current->thread.kregs==&fake_swapper_regs) {
697 current->thread.kregs =
698 ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
699 current->thread.uregs = current->thread.kregs;
700 }
701}
702
703void release_thread(struct task_struct *dead_task)
704{
705 /* do nothing */
706}
707
708/* Fill in the fpu structure for a core dump.. */
709int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
710{
711#ifdef CONFIG_SH_FPU
712 int fpvalid;
713 struct task_struct *tsk = current;
714
715 fpvalid = !!tsk_used_math(tsk);
716 if (fpvalid) {
717 if (current == last_task_used_math) {
718 grab_fpu();
719 fpsave(&tsk->thread.fpu.hard);
720 release_fpu();
721 last_task_used_math = 0;
722 regs->sr |= SR_FD;
723 }
724
725 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
726 }
727
728 return fpvalid;
729#else
730 return 0; /* Task didn't use the fpu at all. */
731#endif
732}
733
734asmlinkage void ret_from_fork(void);
735
736int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
737 unsigned long unused,
738 struct task_struct *p, struct pt_regs *regs)
739{
740 struct pt_regs *childregs;
741 unsigned long long se; /* Sign extension */
742
743#ifdef CONFIG_SH_FPU
744 if(last_task_used_math == current) {
745 grab_fpu();
746 fpsave(&current->thread.fpu.hard);
747 release_fpu();
748 last_task_used_math = NULL;
749 regs->sr |= SR_FD;
750 }
751#endif
752 /* Copy from sh version */
753 childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p->thread_info )) - 1;
754
755 *childregs = *regs;
756
757 if (user_mode(regs)) {
758 childregs->regs[15] = usp;
759 p->thread.uregs = childregs;
760 } else {
761 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
762 }
763
764 childregs->regs[9] = 0; /* Set return value for child */
765 childregs->sr |= SR_FD; /* Invalidate FPU flag */
766
767 p->thread.sp = (unsigned long) childregs;
768 p->thread.pc = (unsigned long) ret_from_fork;
769
770 /*
771 * Sign extend the edited stack.
772 * Note that thread.pc and thread.pc will stay
773 * 32-bit wide and context switch must take care
774 * of NEFF sign extension.
775 */
776
777 se = childregs->regs[15];
778 se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
779 childregs->regs[15] = se;
780
781 return 0;
782}
783
784/*
785 * fill in the user structure for a core dump..
786 */
787void dump_thread(struct pt_regs * regs, struct user * dump)
788{
789 dump->magic = CMAGIC;
790 dump->start_code = current->mm->start_code;
791 dump->start_data = current->mm->start_data;
792 dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1);
793 dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
794 dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT;
795 dump->u_ssize = (current->mm->start_stack - dump->start_stack +
796 PAGE_SIZE - 1) >> PAGE_SHIFT;
797 /* Debug registers will come here. */
798
799 dump->regs = *regs;
800
801 dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
802}
803
804asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
805 unsigned long r4, unsigned long r5,
806 unsigned long r6, unsigned long r7,
807 struct pt_regs *pregs)
808{
809 return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
810}
811
812asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
813 unsigned long r4, unsigned long r5,
814 unsigned long r6, unsigned long r7,
815 struct pt_regs *pregs)
816{
817 if (!newsp)
818 newsp = pregs->regs[15];
819 return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
820}
821
822/*
823 * This is trivial, and on the face of it looks like it
824 * could equally well be done in user mode.
825 *
826 * Not so, for quite unobvious reasons - register pressure.
827 * In user mode vfork() cannot have a stack frame, and if
828 * done by calling the "clone()" system call directly, you
829 * do not have enough call-clobbered registers to hold all
830 * the information you need.
831 */
832asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
833 unsigned long r4, unsigned long r5,
834 unsigned long r6, unsigned long r7,
835 struct pt_regs *pregs)
836{
837 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
838}
839
840/*
841 * sys_execve() executes a new program.
842 */
843asmlinkage int sys_execve(char *ufilename, char **uargv,
844 char **uenvp, unsigned long r5,
845 unsigned long r6, unsigned long r7,
846 struct pt_regs *pregs)
847{
848 int error;
849 char *filename;
850
851 lock_kernel();
852 filename = getname((char __user *)ufilename);
853 error = PTR_ERR(filename);
854 if (IS_ERR(filename))
855 goto out;
856
857 error = do_execve(filename,
858 (char __user * __user *)uargv,
859 (char __user * __user *)uenvp,
860 pregs);
861 if (error == 0) {
862 task_lock(current);
863 current->ptrace &= ~PT_DTRACE;
864 task_unlock(current);
865 }
866 putname(filename);
867out:
868 unlock_kernel();
869 return error;
870}
871
872/*
873 * These bracket the sleeping functions..
874 */
875extern void interruptible_sleep_on(wait_queue_head_t *q);
876
877#define mid_sched ((unsigned long) interruptible_sleep_on)
878
879static int in_sh64_switch_to(unsigned long pc)
880{
881 extern char __sh64_switch_to_end;
882 /* For a sleeping task, the PC is somewhere in the middle of the function,
883 so we don't have to worry about masking the LSB off */
884 return (pc >= (unsigned long) sh64_switch_to) &&
885 (pc < (unsigned long) &__sh64_switch_to_end);
886}
887
888unsigned long get_wchan(struct task_struct *p)
889{
890 unsigned long schedule_fp;
891 unsigned long sh64_switch_to_fp;
892 unsigned long schedule_caller_pc;
893 unsigned long pc;
894
895 if (!p || p == current || p->state == TASK_RUNNING)
896 return 0;
897
898 /*
899 * The same comment as on the Alpha applies here, too ...
900 */
901 pc = thread_saved_pc(p);
902
903#ifdef CONFIG_FRAME_POINTER
904 if (in_sh64_switch_to(pc)) {
905 sh64_switch_to_fp = (long) p->thread.sp;
906 /* r14 is saved at offset 4 in the sh64_switch_to frame */
907 schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
908
909 /* and the caller of 'schedule' is (currently!) saved at offset 24
910 in the frame of schedule (from disasm) */
911 schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
912 return schedule_caller_pc;
913 }
914#endif
915 return pc;
916}
917
918/* Provide a /proc/asids file that lists out the
919 ASIDs currently associated with the processes. (If the DM.PC register is
920 examined through the debug link, this shows ASID + PC. To make use of this,
921 the PID->ASID relationship needs to be known. This is primarily for
922 debugging.)
923 */
924
925#if defined(CONFIG_SH64_PROC_ASIDS)
926#include <linux/init.h>
927#include <linux/proc_fs.h>
928
929static int
930asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
931{
932 int len=0;
933 struct task_struct *p;
934 read_lock(&tasklist_lock);
935 for_each_process(p) {
936 int pid = p->pid;
937 struct mm_struct *mm;
938 if (!pid) continue;
939 mm = p->mm;
940 if (mm) {
941 unsigned long asid, context;
942 context = mm->context;
943 asid = (context & 0xff);
944 len += sprintf(buf+len, "%5d : %02lx\n", pid, asid);
945 } else {
946 len += sprintf(buf+len, "%5d : (none)\n", pid);
947 }
948 }
949 read_unlock(&tasklist_lock);
950 *eof = 1;
951 return len;
952}
953
954static int __init register_proc_asids(void)
955{
956 create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
957 return 0;
958}
959
960__initcall(register_proc_asids);
961#endif
962
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
new file mode 100644
index 000000000000..800288c1562b
--- /dev/null
+++ b/arch/sh64/kernel/ptrace.c
@@ -0,0 +1,376 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/ptrace.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 * Started from SH3/4 version:
12 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
13 *
14 * Original x86 implementation:
15 * By Ross Biro 1/23/92
16 * edited by Linus Torvalds
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/rwsem.h>
23#include <linux/sched.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/smp_lock.h>
27#include <linux/errno.h>
28#include <linux/ptrace.h>
29#include <linux/user.h>
30
31#include <asm/io.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/system.h>
35#include <asm/processor.h>
36#include <asm/mmu_context.h>
37
38/* This mask defines the bits of the SR which the user is not allowed to
39 change, which are everything except S, Q, M, PR, SZ, FR. */
40#define SR_MASK (0xffff8cfd)
41
42/*
43 * does not yet catch signals sent when the child dies.
44 * in exit.c or in signal.c.
45 */
46
47/*
48 * This routine will get a word from the user area in the process kernel stack.
49 */
50static inline int get_stack_long(struct task_struct *task, int offset)
51{
52 unsigned char *stack;
53
54 stack = (unsigned char *)(task->thread.uregs);
55 stack += offset;
56 return (*((int *)stack));
57}
58
59static inline unsigned long
60get_fpu_long(struct task_struct *task, unsigned long addr)
61{
62 unsigned long tmp;
63 struct pt_regs *regs;
64 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
65
66 if (!tsk_used_math(task)) {
67 if (addr == offsetof(struct user_fpu_struct, fpscr)) {
68 tmp = FPSCR_INIT;
69 } else {
70 tmp = 0xffffffffUL; /* matches initial value in fpu.c */
71 }
72 return tmp;
73 }
74
75 if (last_task_used_math == task) {
76 grab_fpu();
77 fpsave(&task->thread.fpu.hard);
78 release_fpu();
79 last_task_used_math = 0;
80 regs->sr |= SR_FD;
81 }
82
83 tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
84 return tmp;
85}
86
87/*
88 * This routine will put a word into the user area in the process kernel stack.
89 */
90static inline int put_stack_long(struct task_struct *task, int offset,
91 unsigned long data)
92{
93 unsigned char *stack;
94
95 stack = (unsigned char *)(task->thread.uregs);
96 stack += offset;
97 *(unsigned long *) stack = data;
98 return 0;
99}
100
101static inline int
102put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
103{
104 struct pt_regs *regs;
105
106 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
107
108 if (!tsk_used_math(task)) {
109 fpinit(&task->thread.fpu.hard);
110 set_stopped_child_used_math(task);
111 } else if (last_task_used_math == task) {
112 grab_fpu();
113 fpsave(&task->thread.fpu.hard);
114 release_fpu();
115 last_task_used_math = 0;
116 regs->sr |= SR_FD;
117 }
118
119 ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
120 return 0;
121}
122
123asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
124{
125 struct task_struct *child;
126 extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
127#define WPC_DBRMODE 0x0d104008
128 static int first_call = 1;
129 int ret;
130
131 lock_kernel();
132
133 if (first_call) {
134 /* Set WPC.DBRMODE to 0. This makes all debug events get
135 * delivered through RESVEC, i.e. into the handlers in entry.S.
136 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
137 * would normally be left set to 1, which makes debug events get
138 * delivered through DBRVEC, i.e. into the remote gdb's
139 * handlers. This prevents ptrace getting them, and confuses
140 * the remote gdb.) */
141 printk("DBRMODE set to 0 to permit native debugging\n");
142 poke_real_address_q(WPC_DBRMODE, 0);
143 first_call = 0;
144 }
145
146 ret = -EPERM;
147 if (request == PTRACE_TRACEME) {
148 /* are we already being traced? */
149 if (current->ptrace & PT_PTRACED)
150 goto out;
151 /* set the ptrace bit in the process flags. */
152 current->ptrace |= PT_PTRACED;
153 ret = 0;
154 goto out;
155 }
156 ret = -ESRCH;
157 read_lock(&tasklist_lock);
158 child = find_task_by_pid(pid);
159 if (child)
160 get_task_struct(child);
161 read_unlock(&tasklist_lock);
162 if (!child)
163 goto out;
164
165 ret = -EPERM;
166 if (pid == 1) /* you may not mess with init */
167 goto out_tsk;
168
169 if (request == PTRACE_ATTACH) {
170 ret = ptrace_attach(child);
171 goto out_tsk;
172 }
173
174 ret = ptrace_check_attach(child, request == PTRACE_KILL);
175 if (ret < 0)
176 goto out_tsk;
177
178 switch (request) {
179 /* when I and D space are separate, these will need to be fixed. */
180 case PTRACE_PEEKTEXT: /* read word at location addr. */
181 case PTRACE_PEEKDATA: {
182 unsigned long tmp;
183 int copied;
184
185 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
186 ret = -EIO;
187 if (copied != sizeof(tmp))
188 break;
189 ret = put_user(tmp,(unsigned long *) data);
190 break;
191 }
192
193 /* read the word at location addr in the USER area. */
194 case PTRACE_PEEKUSR: {
195 unsigned long tmp;
196
197 ret = -EIO;
198 if ((addr & 3) || addr < 0)
199 break;
200
201 if (addr < sizeof(struct pt_regs))
202 tmp = get_stack_long(child, addr);
203 else if ((addr >= offsetof(struct user, fpu)) &&
204 (addr < offsetof(struct user, u_fpvalid))) {
205 tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
206 } else if (addr == offsetof(struct user, u_fpvalid)) {
207 tmp = !!tsk_used_math(child);
208 } else {
209 break;
210 }
211 ret = put_user(tmp, (unsigned long *)data);
212 break;
213 }
214
215 /* when I and D space are separate, this will have to be fixed. */
216 case PTRACE_POKETEXT: /* write the word at location addr. */
217 case PTRACE_POKEDATA:
218 ret = 0;
219 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
220 break;
221 ret = -EIO;
222 break;
223
224 case PTRACE_POKEUSR:
225 /* write the word at location addr in the USER area. We must
226 disallow any changes to certain SR bits or u_fpvalid, since
227 this could crash the kernel or result in a security
228 loophole. */
229 ret = -EIO;
230 if ((addr & 3) || addr < 0)
231 break;
232
233 if (addr < sizeof(struct pt_regs)) {
234 /* Ignore change of top 32 bits of SR */
235 if (addr == offsetof (struct pt_regs, sr)+4)
236 {
237 ret = 0;
238 break;
239 }
240 /* If lower 32 bits of SR, ignore non-user bits */
241 if (addr == offsetof (struct pt_regs, sr))
242 {
243 long cursr = get_stack_long(child, addr);
244 data &= ~(SR_MASK);
245 data |= (cursr & SR_MASK);
246 }
247 ret = put_stack_long(child, addr, data);
248 }
249 else if ((addr >= offsetof(struct user, fpu)) &&
250 (addr < offsetof(struct user, u_fpvalid))) {
251 ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
252 }
253 break;
254
255 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
256 case PTRACE_CONT: { /* restart after signal. */
257 ret = -EIO;
258 if ((unsigned long) data > _NSIG)
259 break;
260 if (request == PTRACE_SYSCALL)
261 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
262 else
263 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
264 child->exit_code = data;
265 wake_up_process(child);
266 ret = 0;
267 break;
268 }
269
270/*
271 * make the child exit. Best I can do is send it a sigkill.
272 * perhaps it should be put in the status that it wants to
273 * exit.
274 */
275 case PTRACE_KILL: {
276 ret = 0;
277 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
278 break;
279 child->exit_code = SIGKILL;
280 wake_up_process(child);
281 break;
282 }
283
284 case PTRACE_SINGLESTEP: { /* set the trap flag. */
285 struct pt_regs *regs;
286
287 ret = -EIO;
288 if ((unsigned long) data > _NSIG)
289 break;
290 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
291 if ((child->ptrace & PT_DTRACE) == 0) {
292 /* Spurious delayed TF traps may occur */
293 child->ptrace |= PT_DTRACE;
294 }
295
296 regs = child->thread.uregs;
297
298 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
299
300 child->exit_code = data;
301 /* give it a chance to run. */
302 wake_up_process(child);
303 ret = 0;
304 break;
305 }
306
307 case PTRACE_DETACH: /* detach a process that was attached. */
308 ret = ptrace_detach(child, data);
309 break;
310
311 default:
312 ret = ptrace_request(child, request, addr, data);
313 break;
314 }
315out_tsk:
316 put_task_struct(child);
317out:
318 unlock_kernel();
319 return ret;
320}
321
322asmlinkage void syscall_trace(void)
323{
324 struct task_struct *tsk = current;
325
326 if (!test_thread_flag(TIF_SYSCALL_TRACE))
327 return;
328 if (!(tsk->ptrace & PT_PTRACED))
329 return;
330
331 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
332 ? 0x80 : 0));
333 /*
334 * this isn't the same as continuing with a signal, but it will do
335 * for normal use. strace only continues with a signal if the
336 * stopping signal is not SIGTRAP. -brl
337 */
338 if (tsk->exit_code) {
339 send_sig(tsk->exit_code, tsk, 1);
340 tsk->exit_code = 0;
341 }
342}
343
344/* Called with interrupts disabled */
345asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
346{
347 /* This is called after a single step exception (DEBUGSS).
348 There is no need to change the PC, as it is a post-execution
349 exception, as entry.S does not do anything to the PC for DEBUGSS.
350 We need to clear the Single Step setting in SR to avoid
351 continually stepping. */
352 local_irq_enable();
353 regs->sr &= ~SR_SSTEP;
354 force_sig(SIGTRAP, current);
355}
356
357/* Called with interrupts disabled */
358asmlinkage void do_software_break_point(unsigned long long vec,
359 struct pt_regs *regs)
360{
361 /* We need to forward step the PC, to counteract the backstep done
362 in signal.c. */
363 local_irq_enable();
364 force_sig(SIGTRAP, current);
365 regs->pc += 4;
366}
367
368/*
369 * Called by kernel/ptrace.c when detaching..
370 *
371 * Make sure single step bits etc are not set.
372 */
373void ptrace_disable(struct task_struct *child)
374{
375 /* nothing to do.. */
376}
diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c
new file mode 100644
index 000000000000..72c16533436e
--- /dev/null
+++ b/arch/sh64/kernel/semaphore.c
@@ -0,0 +1,140 @@
1/*
2 * Just taken from alpha implementation.
3 * This can't work well, perhaps.
4 */
5/*
6 * Generic semaphore code. Buyer beware. Do your own
7 * specific changes in <asm/semaphore-helper.h>
8 */
9
10#include <linux/errno.h>
11#include <linux/rwsem.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/init.h>
15#include <asm/semaphore.h>
16#include <asm/semaphore-helper.h>
17
18spinlock_t semaphore_wake_lock;
19
20/*
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to sleep, while the "waking" variable is
24 * incremented when the "up()" code goes to wake up waiting
25 * processes.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * waking_non_zero() (from asm/semaphore.h) must execute
33 * atomically.
34 *
35 * When __up() is called, the count was negative before
36 * incrementing it, and we need to wake up somebody.
37 *
38 * This routine adds one to the count of processes that need to
39 * wake up and exit. ALL waiting processes actually wake up but
40 * only the one that gets to the "waking" field first will gate
41 * through and acquire the semaphore. The others will go back
42 * to sleep.
43 *
44 * Note that these functions are only called when there is
45 * contention on the lock, and as such all this is the
46 * "non-critical" part of the whole semaphore business. The
47 * critical part is the inline stuff in <asm/semaphore.h>
48 * where we want to avoid any extra jumps and calls.
49 */
50void __up(struct semaphore *sem)
51{
52 wake_one_more(sem);
53 wake_up(&sem->wait);
54}
55
56/*
57 * Perform the "down" function. Return zero for semaphore acquired,
58 * return negative for signalled out of the function.
59 *
60 * If called from __down, the return is ignored and the wait loop is
61 * not interruptible. This means that a task waiting on a semaphore
62 * using "down()" cannot be killed until someone does an "up()" on
63 * the semaphore.
64 *
65 * If called from __down_interruptible, the return value gets checked
66 * upon return. If the return value is negative then the task continues
67 * with the negative value in the return register (it can be tested by
68 * the caller).
69 *
70 * Either form may be used in conjunction with "up()".
71 *
72 */
73
74#define DOWN_VAR \
75 struct task_struct *tsk = current; \
76 wait_queue_t wait; \
77 init_waitqueue_entry(&wait, tsk);
78
79#define DOWN_HEAD(task_state) \
80 \
81 \
82 tsk->state = (task_state); \
83 add_wait_queue(&sem->wait, &wait); \
84 \
85 /* \
86 * Ok, we're set up. sem->count is known to be less than zero \
87 * so we must wait. \
88 * \
89 * We can let go the lock for purposes of waiting. \
90 * We re-acquire it after awaking so as to protect \
91 * all semaphore operations. \
92 * \
93 * If "up()" is called before we call waking_non_zero() then \
94 * we will catch it right away. If it is called later then \
95 * we will have to go through a wakeup cycle to catch it. \
96 * \
97 * Multiple waiters contend for the semaphore lock to see \
98 * who gets to gate through and who has to wait some more. \
99 */ \
100 for (;;) {
101
102#define DOWN_TAIL(task_state) \
103 tsk->state = (task_state); \
104 } \
105 tsk->state = TASK_RUNNING; \
106 remove_wait_queue(&sem->wait, &wait);
107
108void __sched __down(struct semaphore * sem)
109{
110 DOWN_VAR
111 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
112 if (waking_non_zero(sem))
113 break;
114 schedule();
115 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
116}
117
118int __sched __down_interruptible(struct semaphore * sem)
119{
120 int ret = 0;
121 DOWN_VAR
122 DOWN_HEAD(TASK_INTERRUPTIBLE)
123
124 ret = waking_non_zero_interruptible(sem, tsk);
125 if (ret)
126 {
127 if (ret == 1)
128 /* ret != 0 only if we get interrupted -arca */
129 ret = 0;
130 break;
131 }
132 schedule();
133 DOWN_TAIL(TASK_INTERRUPTIBLE)
134 return ret;
135}
136
137int __down_trylock(struct semaphore * sem)
138{
139 return waking_non_zero_trylock(sem);
140}
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
new file mode 100644
index 000000000000..c7a7b816a30f
--- /dev/null
+++ b/arch/sh64/kernel/setup.c
@@ -0,0 +1,385 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/setup.c
7 *
8 * sh64 Arch Support
9 *
10 * This file handles the architecture-dependent parts of initialization
11 *
12 * Copyright (C) 2000, 2001 Paolo Alberelli
13 * Copyright (C) 2003, 2004 Paul Mundt
14 *
15 * benedict.gaster@superh.com: 2nd May 2002
16 * Modified to use the empty_zero_page to pass command line arguments.
17 *
18 * benedict.gaster@superh.com: 3rd May 2002
19 * Added support for ramdisk, removing statically linked romfs at the same time.
20 *
21 * lethal@linux-sh.org: 15th May 2003
22 * Added generic procfs cpuinfo reporting. Make boards just export their name.
23 *
24 * lethal@linux-sh.org: 25th May 2003
25 * Added generic get_cpu_subtype() for subtype reporting from cpu_data->type.
26 *
27 */
28#include <linux/errno.h>
29#include <linux/rwsem.h>
30#include <linux/sched.h>
31#include <linux/kernel.h>
32#include <linux/mm.h>
33#include <linux/stddef.h>
34#include <linux/unistd.h>
35#include <linux/ptrace.h>
36#include <linux/slab.h>
37#include <linux/user.h>
38#include <linux/a.out.h>
39#include <linux/tty.h>
40#include <linux/ioport.h>
41#include <linux/delay.h>
42#include <linux/config.h>
43#include <linux/init.h>
44#include <linux/seq_file.h>
45#include <linux/blkdev.h>
46#include <linux/bootmem.h>
47#include <linux/console.h>
48#include <linux/root_dev.h>
49#include <linux/cpu.h>
50#include <linux/initrd.h>
51#include <asm/processor.h>
52#include <asm/page.h>
53#include <asm/pgtable.h>
54#include <asm/platform.h>
55#include <asm/uaccess.h>
56#include <asm/system.h>
57#include <asm/io.h>
58#include <asm/sections.h>
59#include <asm/setup.h>
60#include <asm/smp.h>
61
62#ifdef CONFIG_VT
63#include <linux/console.h>
64#endif
65
66struct screen_info screen_info;
67
68#ifdef CONFIG_BLK_DEV_RAM
69extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
70extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
71extern int rd_image_start; /* starting block # of image */
72#endif
73
74extern int root_mountflags;
75extern char *get_system_type(void);
76extern void platform_setup(void);
77extern void platform_monitor(void);
78extern void platform_reserve(void);
79extern int sh64_cache_init(void);
80extern int sh64_tlb_init(void);
81
82#define RAMDISK_IMAGE_START_MASK 0x07FF
83#define RAMDISK_PROMPT_FLAG 0x8000
84#define RAMDISK_LOAD_FLAG 0x4000
85
86static char command_line[COMMAND_LINE_SIZE] = { 0, };
87unsigned long long memory_start = CONFIG_MEMORY_START;
88unsigned long long memory_end = CONFIG_MEMORY_START + (CONFIG_MEMORY_SIZE_IN_MB * 1024 * 1024);
89
90struct sh_cpuinfo boot_cpu_data;
91
92static inline void parse_mem_cmdline (char ** cmdline_p)
93{
94 char c = ' ', *to = command_line, *from = COMMAND_LINE;
95 int len = 0;
96
97 /* Save unparsed command line copy for /proc/cmdline */
98 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
99 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
100
101 for (;;) {
102 /*
103 * "mem=XXX[kKmM]" defines a size of memory.
104 */
105 if (c == ' ' && !memcmp(from, "mem=", 4)) {
106 if (to != command_line)
107 to--;
108 {
109 unsigned long mem_size;
110
111 mem_size = memparse(from+4, &from);
112 memory_end = memory_start + mem_size;
113 }
114 }
115 c = *(from++);
116 if (!c)
117 break;
118 if (COMMAND_LINE_SIZE <= ++len)
119 break;
120 *(to++) = c;
121 }
122 *to = '\0';
123
124 *cmdline_p = command_line;
125}
126
127static void __init sh64_cpu_type_detect(void)
128{
129 extern unsigned long long peek_real_address_q(unsigned long long addr);
130 unsigned long long cir;
131 /* Do peeks in real mode to avoid having to set up a mapping for the
132 WPC registers. On SH5-101 cut2, such a mapping would be exposed to
133 an address translation erratum which would make it hard to set up
134 correctly. */
135 cir = peek_real_address_q(0x0d000008);
136
137 if ((cir & 0xffff) == 0x5103) {
138 boot_cpu_data.type = CPU_SH5_103;
139 } else if (((cir >> 32) & 0xffff) == 0x51e2) {
140 /* CPU.VCR aliased at CIR address on SH5-101 */
141 boot_cpu_data.type = CPU_SH5_101;
142 } else {
143 boot_cpu_data.type = CPU_SH_NONE;
144 }
145}
146
147void __init setup_arch(char **cmdline_p)
148{
149 unsigned long bootmap_size, i;
150 unsigned long first_pfn, start_pfn, last_pfn, pages;
151
152#ifdef CONFIG_EARLY_PRINTK
153 extern void enable_early_printk(void);
154
155 /*
156 * Setup Early SCIF console
157 */
158 enable_early_printk();
159#endif
160
161 /*
162 * Setup TLB mappings
163 */
164 sh64_tlb_init();
165
166 /*
167 * Caches are already initialized by the time we get here, so we just
168 * fill in cpu_data info for the caches.
169 */
170 sh64_cache_init();
171
172 platform_setup();
173 platform_monitor();
174
175 sh64_cpu_type_detect();
176
177 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
178
179#ifdef CONFIG_BLK_DEV_RAM
180 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
181 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
182 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
183#endif
184
185 if (!MOUNT_ROOT_RDONLY)
186 root_mountflags &= ~MS_RDONLY;
187 init_mm.start_code = (unsigned long) _text;
188 init_mm.end_code = (unsigned long) _etext;
189 init_mm.end_data = (unsigned long) _edata;
190 init_mm.brk = (unsigned long) _end;
191
192 code_resource.start = __pa(_text);
193 code_resource.end = __pa(_etext)-1;
194 data_resource.start = __pa(_etext);
195 data_resource.end = __pa(_edata)-1;
196
197 parse_mem_cmdline(cmdline_p);
198
199 /*
200 * Find the lowest and highest page frame numbers we have available
201 */
202 first_pfn = PFN_DOWN(memory_start);
203 last_pfn = PFN_DOWN(memory_end);
204 pages = last_pfn - first_pfn;
205
206 /*
207 * Partially used pages are not usable - thus
208 * we are rounding upwards:
209 */
210 start_pfn = PFN_UP(__pa(_end));
211
212 /*
213 * Find a proper area for the bootmem bitmap. After this
214 * bootstrap step all allocations (until the page allocator
215 * is intact) must be done via bootmem_alloc().
216 */
217 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
218 first_pfn,
219 last_pfn);
220 /*
221 * Round it up.
222 */
223 bootmap_size = PFN_PHYS(PFN_UP(bootmap_size));
224
225 /*
226 * Register fully available RAM pages with the bootmem allocator.
227 */
228 free_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), PFN_PHYS(pages));
229
230 /*
231 * Reserve all kernel sections + bootmem bitmap + a guard page.
232 */
233 reserve_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn),
234 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE) - PFN_PHYS(first_pfn));
235
236 /*
237 * Reserve platform dependent sections
238 */
239 platform_reserve();
240
241#ifdef CONFIG_BLK_DEV_INITRD
242 if (LOADER_TYPE && INITRD_START) {
243 if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
244 reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
245
246 initrd_start =
247 (long) INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
248
249 initrd_end = initrd_start + INITRD_SIZE;
250 } else {
251 printk("initrd extends beyond end of memory "
252 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
253 (long) INITRD_START + INITRD_SIZE,
254 PFN_PHYS(last_pfn));
255 initrd_start = 0;
256 }
257 }
258#endif
259
260 /*
261 * Claim all RAM, ROM, and I/O resources.
262 */
263
264 /* Kernel RAM */
265 request_resource(&iomem_resource, &code_resource);
266 request_resource(&iomem_resource, &data_resource);
267
268 /* Other KRAM space */
269 for (i = 0; i < STANDARD_KRAM_RESOURCES - 2; i++)
270 request_resource(&iomem_resource,
271 &platform_parms.kram_res_p[i]);
272
273 /* XRAM space */
274 for (i = 0; i < STANDARD_XRAM_RESOURCES; i++)
275 request_resource(&iomem_resource,
276 &platform_parms.xram_res_p[i]);
277
278 /* ROM space */
279 for (i = 0; i < STANDARD_ROM_RESOURCES; i++)
280 request_resource(&iomem_resource,
281 &platform_parms.rom_res_p[i]);
282
283 /* I/O space */
284 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
285 request_resource(&ioport_resource,
286 &platform_parms.io_res_p[i]);
287
288
289#ifdef CONFIG_VT
290#if defined(CONFIG_VGA_CONSOLE)
291 conswitchp = &vga_con;
292#elif defined(CONFIG_DUMMY_CONSOLE)
293 conswitchp = &dummy_con;
294#endif
295#endif
296
297 printk("Hardware FPU: %s\n", fpu_in_use ? "enabled" : "disabled");
298
299 paging_init();
300}
301
302void __xchg_called_with_bad_pointer(void)
303{
304 printk(KERN_EMERG "xchg() called with bad pointer !\n");
305}
306
307static struct cpu cpu[1];
308
309static int __init topology_init(void)
310{
311 return register_cpu(cpu, 0, NULL);
312}
313
314subsys_initcall(topology_init);
315
316/*
317 * Get CPU information
318 */
319static const char *cpu_name[] = {
320 [CPU_SH5_101] = "SH5-101",
321 [CPU_SH5_103] = "SH5-103",
322 [CPU_SH_NONE] = "Unknown",
323};
324
325const char *get_cpu_subtype(void)
326{
327 return cpu_name[boot_cpu_data.type];
328}
329
330#ifdef CONFIG_PROC_FS
331static int show_cpuinfo(struct seq_file *m,void *v)
332{
333 unsigned int cpu = smp_processor_id();
334
335 if (!cpu)
336 seq_printf(m, "machine\t\t: %s\n", get_system_type());
337
338 seq_printf(m, "processor\t: %d\n", cpu);
339 seq_printf(m, "cpu family\t: SH-5\n");
340 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype());
341
342 seq_printf(m, "icache size\t: %dK-bytes\n",
343 (boot_cpu_data.icache.ways *
344 boot_cpu_data.icache.sets *
345 boot_cpu_data.icache.linesz) >> 10);
346 seq_printf(m, "dcache size\t: %dK-bytes\n",
347 (boot_cpu_data.dcache.ways *
348 boot_cpu_data.dcache.sets *
349 boot_cpu_data.dcache.linesz) >> 10);
350 seq_printf(m, "itlb entries\t: %d\n", boot_cpu_data.itlb.entries);
351 seq_printf(m, "dtlb entries\t: %d\n", boot_cpu_data.dtlb.entries);
352
353#define PRINT_CLOCK(name, value) \
354 seq_printf(m, name " clock\t: %d.%02dMHz\n", \
355 ((value) / 1000000), ((value) % 1000000)/10000)
356
357 PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock);
358 PRINT_CLOCK("bus", boot_cpu_data.bus_clock);
359 PRINT_CLOCK("module", boot_cpu_data.module_clock);
360
361 seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
362 (loops_per_jiffy*HZ+2500)/500000,
363 ((loops_per_jiffy*HZ+2500)/5000) % 100);
364
365 return 0;
366}
367
368static void *c_start(struct seq_file *m, loff_t *pos)
369{
370 return (void*)(*pos == 0);
371}
372static void *c_next(struct seq_file *m, void *v, loff_t *pos)
373{
374 return NULL;
375}
376static void c_stop(struct seq_file *m, void *v)
377{
378}
379struct seq_operations cpuinfo_op = {
380 .start = c_start,
381 .next = c_next,
382 .stop = c_stop,
383 .show = show_cpuinfo,
384};
385#endif /* CONFIG_PROC_FS */
diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c
new file mode 100644
index 000000000000..0b5497d70bd3
--- /dev/null
+++ b/arch/sh64/kernel/sh_ksyms.c
@@ -0,0 +1,89 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/sh_ksyms.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 *
10 */
11
12#include <linux/config.h>
13#include <linux/rwsem.h>
14#include <linux/module.h>
15#include <linux/smp.h>
16#include <linux/user.h>
17#include <linux/elfcore.h>
18#include <linux/sched.h>
19#include <linux/in6.h>
20#include <linux/interrupt.h>
21#include <linux/smp_lock.h>
22#include <linux/tty.h>
23
24#include <asm/semaphore.h>
25#include <asm/processor.h>
26#include <asm/uaccess.h>
27#include <asm/checksum.h>
28#include <asm/io.h>
29#include <asm/delay.h>
30#include <asm/irq.h>
31
32extern void dump_thread(struct pt_regs *, struct user *);
33extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
34
35#if 0
36/* Not yet - there's no declaration of drive_info anywhere. */
37#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
38extern struct drive_info_struct drive_info;
39EXPORT_SYMBOL(drive_info);
40#endif
41#endif
42
43/* platform dependent support */
44EXPORT_SYMBOL(dump_thread);
45EXPORT_SYMBOL(dump_fpu);
46EXPORT_SYMBOL(iounmap);
47EXPORT_SYMBOL(enable_irq);
48EXPORT_SYMBOL(disable_irq);
49EXPORT_SYMBOL(kernel_thread);
50
51/* Networking helper routines. */
52EXPORT_SYMBOL(csum_partial_copy);
53
54EXPORT_SYMBOL(strpbrk);
55EXPORT_SYMBOL(strstr);
56
57#ifdef CONFIG_VT
58EXPORT_SYMBOL(screen_info);
59#endif
60
61EXPORT_SYMBOL(__down);
62EXPORT_SYMBOL(__down_trylock);
63EXPORT_SYMBOL(__up);
64EXPORT_SYMBOL(__put_user_asm_l);
65EXPORT_SYMBOL(__get_user_asm_l);
66EXPORT_SYMBOL(memcmp);
67EXPORT_SYMBOL(memcpy);
68EXPORT_SYMBOL(memset);
69EXPORT_SYMBOL(memscan);
70EXPORT_SYMBOL(strchr);
71EXPORT_SYMBOL(strlen);
72
73EXPORT_SYMBOL(flush_dcache_page);
74
75/* For ext3 */
76EXPORT_SYMBOL(sh64_page_clear);
77
78/* Ugh. These come in from libgcc.a at link time. */
79
80extern void __sdivsi3(void);
81extern void __muldi3(void);
82extern void __udivsi3(void);
83extern char __div_table;
84EXPORT_SYMBOL(__sdivsi3);
85EXPORT_SYMBOL(__muldi3);
86EXPORT_SYMBOL(__udivsi3);
87EXPORT_SYMBOL(__div_table);
88
89
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
new file mode 100644
index 000000000000..45ad1026dde7
--- /dev/null
+++ b/arch/sh64/kernel/signal.c
@@ -0,0 +1,727 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/signal.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 * Copyright (C) 2004 Richard Curnow
11 *
12 * Started from sh version.
13 *
14 */
15#include <linux/rwsem.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/kernel.h>
21#include <linux/signal.h>
22#include <linux/errno.h>
23#include <linux/wait.h>
24#include <linux/personality.h>
25#include <linux/suspend.h>
26#include <linux/ptrace.h>
27#include <linux/unistd.h>
28#include <linux/stddef.h>
29#include <linux/personality.h>
30#include <asm/ucontext.h>
31#include <asm/uaccess.h>
32#include <asm/pgtable.h>
33
34
35#define REG_RET 9
36#define REG_ARG1 2
37#define REG_ARG2 3
38#define REG_ARG3 4
39#define REG_SP 15
40#define REG_PR 18
41#define REF_REG_RET regs->regs[REG_RET]
42#define REF_REG_SP regs->regs[REG_SP]
43#define DEREF_REG_PR regs->regs[REG_PR]
44
45#define DEBUG_SIG 0
46
47#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
48
49asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
50
51/*
52 * Atomically swap in the new signal mask, and wait for a signal.
53 */
54
55asmlinkage int
56sys_sigsuspend(old_sigset_t mask,
57 unsigned long r3, unsigned long r4, unsigned long r5,
58 unsigned long r6, unsigned long r7,
59 struct pt_regs * regs)
60{
61 sigset_t saveset;
62
63 mask &= _BLOCKABLE;
64 spin_lock_irq(&current->sighand->siglock);
65 saveset = current->blocked;
66 siginitset(&current->blocked, mask);
67 recalc_sigpending();
68 spin_unlock_irq(&current->sighand->siglock);
69
70 REF_REG_RET = -EINTR;
71 while (1) {
72 current->state = TASK_INTERRUPTIBLE;
73 schedule();
74 regs->pc += 4; /* because sys_sigreturn decrements the pc */
75 if (do_signal(regs, &saveset)) {
76 /* pc now points at signal handler. Need to decrement
77 it because entry.S will increment it. */
78 regs->pc -= 4;
79 return -EINTR;
80 }
81 }
82}
83
84asmlinkage int
85sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
86 unsigned long r4, unsigned long r5, unsigned long r6,
87 unsigned long r7,
88 struct pt_regs * regs)
89{
90 sigset_t saveset, newset;
91
92 /* XXX: Don't preclude handling different sized sigset_t's. */
93 if (sigsetsize != sizeof(sigset_t))
94 return -EINVAL;
95
96 if (copy_from_user(&newset, unewset, sizeof(newset)))
97 return -EFAULT;
98 sigdelsetmask(&newset, ~_BLOCKABLE);
99 spin_lock_irq(&current->sighand->siglock);
100 saveset = current->blocked;
101 current->blocked = newset;
102 recalc_sigpending();
103 spin_unlock_irq(&current->sighand->siglock);
104
105 REF_REG_RET = -EINTR;
106 while (1) {
107 current->state = TASK_INTERRUPTIBLE;
108 schedule();
109 regs->pc += 4; /* because sys_sigreturn decrements the pc */
110 if (do_signal(regs, &saveset)) {
111 /* pc now points at signal handler. Need to decrement
112 it because entry.S will increment it. */
113 regs->pc -= 4;
114 return -EINTR;
115 }
116 }
117}
118
119asmlinkage int
120sys_sigaction(int sig, const struct old_sigaction __user *act,
121 struct old_sigaction __user *oact)
122{
123 struct k_sigaction new_ka, old_ka;
124 int ret;
125
126 if (act) {
127 old_sigset_t mask;
128 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
129 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
130 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
131 return -EFAULT;
132 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
133 __get_user(mask, &act->sa_mask);
134 siginitset(&new_ka.sa.sa_mask, mask);
135 }
136
137 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
138
139 if (!ret && oact) {
140 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
141 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
142 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
143 return -EFAULT;
144 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
145 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
146 }
147
148 return ret;
149}
150
151asmlinkage int
152sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
153 unsigned long r4, unsigned long r5, unsigned long r6,
154 unsigned long r7,
155 struct pt_regs * regs)
156{
157 return do_sigaltstack(uss, uoss, REF_REG_SP);
158}
159
160
161/*
162 * Do a signal return; undo the signal stack.
163 */
164
165struct sigframe
166{
167 struct sigcontext sc;
168 unsigned long extramask[_NSIG_WORDS-1];
169 long long retcode[2];
170};
171
172struct rt_sigframe
173{
174 struct siginfo __user *pinfo;
175 void *puc;
176 struct siginfo info;
177 struct ucontext uc;
178 long long retcode[2];
179};
180
181#ifdef CONFIG_SH_FPU
182static inline int
183restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
184{
185 int err = 0;
186 int fpvalid;
187
188 err |= __get_user (fpvalid, &sc->sc_fpvalid);
189 conditional_used_math(fpvalid);
190 if (! fpvalid)
191 return err;
192
193 if (current == last_task_used_math) {
194 last_task_used_math = NULL;
195 regs->sr |= SR_FD;
196 }
197
198 err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
199 (sizeof(long long) * 32) + (sizeof(int) * 1));
200
201 return err;
202}
203
204static inline int
205setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
206{
207 int err = 0;
208 int fpvalid;
209
210 fpvalid = !!used_math();
211 err |= __put_user(fpvalid, &sc->sc_fpvalid);
212 if (! fpvalid)
213 return err;
214
215 if (current == last_task_used_math) {
216 grab_fpu();
217 fpsave(&current->thread.fpu.hard);
218 release_fpu();
219 last_task_used_math = NULL;
220 regs->sr |= SR_FD;
221 }
222
223 err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
224 (sizeof(long long) * 32) + (sizeof(int) * 1));
225 clear_used_math();
226
227 return err;
228}
229#else
230static inline int
231restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
232{}
233static inline int
234setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
235{}
236#endif
237
238static int
239restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
240{
241 unsigned int err = 0;
242 unsigned long long current_sr, new_sr;
243#define SR_MASK 0xffff8cfd
244
245#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
246
247 COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
248 COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
249 COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
250 COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
251 COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
252 COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
253 COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
254 COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
255 COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
256 COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
257 COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
258 COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
259 COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
260 COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
261 COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
262 COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
263 COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
264 COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
265
266 /* Prevent the signal handler manipulating SR in a way that can
267 crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
268 modified */
269 current_sr = regs->sr;
270 err |= __get_user(new_sr, &sc->sc_sr);
271 regs->sr &= SR_MASK;
272 regs->sr |= (new_sr & ~SR_MASK);
273
274 COPY(pc);
275
276#undef COPY
277
278 /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
279 * has been restored above.) */
280 err |= restore_sigcontext_fpu(regs, sc);
281
282 regs->syscall_nr = -1; /* disable syscall checks */
283 err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
284 return err;
285}
286
287asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
288 unsigned long r4, unsigned long r5,
289 unsigned long r6, unsigned long r7,
290 struct pt_regs * regs)
291{
292 struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
293 sigset_t set;
294 long long ret;
295
296 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
297 goto badframe;
298
299 if (__get_user(set.sig[0], &frame->sc.oldmask)
300 || (_NSIG_WORDS > 1
301 && __copy_from_user(&set.sig[1], &frame->extramask,
302 sizeof(frame->extramask))))
303 goto badframe;
304
305 sigdelsetmask(&set, ~_BLOCKABLE);
306
307 spin_lock_irq(&current->sighand->siglock);
308 current->blocked = set;
309 recalc_sigpending();
310 spin_unlock_irq(&current->sighand->siglock);
311
312 if (restore_sigcontext(regs, &frame->sc, &ret))
313 goto badframe;
314 regs->pc -= 4;
315
316 return (int) ret;
317
318badframe:
319 force_sig(SIGSEGV, current);
320 return 0;
321}
322
323asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
324 unsigned long r4, unsigned long r5,
325 unsigned long r6, unsigned long r7,
326 struct pt_regs * regs)
327{
328 struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
329 sigset_t set;
330 stack_t __user st;
331 long long ret;
332
333 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
334 goto badframe;
335
336 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
337 goto badframe;
338
339 sigdelsetmask(&set, ~_BLOCKABLE);
340 spin_lock_irq(&current->sighand->siglock);
341 current->blocked = set;
342 recalc_sigpending();
343 spin_unlock_irq(&current->sighand->siglock);
344
345 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
346 goto badframe;
347 regs->pc -= 4;
348
349 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
350 goto badframe;
351 /* It is more difficult to avoid calling this function than to
352 call it and ignore errors. */
353 do_sigaltstack(&st, NULL, REF_REG_SP);
354
355 return (int) ret;
356
357badframe:
358 force_sig(SIGSEGV, current);
359 return 0;
360}
361
362/*
363 * Set up a signal frame.
364 */
365
366static int
367setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
368 unsigned long mask)
369{
370 int err = 0;
371
372 /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
373 err |= setup_sigcontext_fpu(regs, sc);
374
375#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
376
377 COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
378 COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
379 COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
380 COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
381 COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
382 COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
383 COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
384 COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
385 COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
386 COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
387 COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
388 COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
389 COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
390 COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
391 COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
392 COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
393 COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
394 COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
395 COPY(sr); COPY(pc);
396
397#undef COPY
398
399 err |= __put_user(mask, &sc->oldmask);
400
401 return err;
402}
403
404/*
405 * Determine which stack to use..
406 */
407static inline void __user *
408get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
409{
410 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
411 sp = current->sas_ss_sp + current->sas_ss_size;
412
413 return (void __user *)((sp - frame_size) & -8ul);
414}
415
416void sa_default_restorer(void); /* See comments below */
417void sa_default_rt_restorer(void); /* See comments below */
418
419static void setup_frame(int sig, struct k_sigaction *ka,
420 sigset_t *set, struct pt_regs *regs)
421{
422 struct sigframe __user *frame;
423 int err = 0;
424 int signal;
425
426 frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
427
428 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
429 goto give_sigsegv;
430
431 signal = current_thread_info()->exec_domain
432 && current_thread_info()->exec_domain->signal_invmap
433 && sig < 32
434 ? current_thread_info()->exec_domain->signal_invmap[sig]
435 : sig;
436
437 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
438
439 /* Give up earlier as i386, in case */
440 if (err)
441 goto give_sigsegv;
442
443 if (_NSIG_WORDS > 1) {
444 err |= __copy_to_user(frame->extramask, &set->sig[1],
445 sizeof(frame->extramask)); }
446
447 /* Give up earlier as i386, in case */
448 if (err)
449 goto give_sigsegv;
450
451 /* Set up to return from userspace. If provided, use a stub
452 already in userspace. */
453 if (ka->sa.sa_flags & SA_RESTORER) {
454 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
455
456 /*
457 * On SH5 all edited pointers are subject to NEFF
458 */
459 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
460 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
461 } else {
462 /*
463 * Different approach on SH5.
464 * . Endianness independent asm code gets placed in entry.S .
465 * This is limited to four ASM instructions corresponding
466 * to two long longs in size.
467 * . err checking is done on the else branch only
468 * . flush_icache_range() is called upon __put_user() only
469 * . all edited pointers are subject to NEFF
470 * . being code, linker turns ShMedia bit on, always
471 * dereference index -1.
472 */
473 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
474 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
475 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
476
477 if (__copy_to_user(frame->retcode,
478 (unsigned long long)sa_default_restorer & (~1), 16) != 0)
479 goto give_sigsegv;
480
481 /* Cohere the trampoline with the I-cache. */
482 flush_cache_sigtramp(DEREF_REG_PR-1, DEREF_REG_PR-1+16);
483 }
484
485 /*
486 * Set up registers for signal handler.
487 * All edited pointers are subject to NEFF.
488 */
489 regs->regs[REG_SP] = (unsigned long) frame;
490 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
491 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
492 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
493
494 /* FIXME:
495 The glibc profiling support for SH-5 needs to be passed a sigcontext
496 so it can retrieve the PC. At some point during 2003 the glibc
497 support was changed to receive the sigcontext through the 2nd
498 argument, but there are still versions of libc.so in use that use
499 the 3rd argument. Until libc.so is stabilised, pass the sigcontext
500 through both 2nd and 3rd arguments.
501 */
502
503 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
504 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
505
506 regs->pc = (unsigned long) ka->sa.sa_handler;
507 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
508
509 set_fs(USER_DS);
510
511#if DEBUG_SIG
512 /* Broken %016Lx */
513 printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
514 signal,
515 current->comm, current->pid, frame,
516 regs->pc >> 32, regs->pc & 0xffffffff,
517 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
518#endif
519
520 return;
521
522give_sigsegv:
523 force_sigsegv(sig, current);
524}
525
526static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
527 sigset_t *set, struct pt_regs *regs)
528{
529 struct rt_sigframe __user *frame;
530 int err = 0;
531 int signal;
532
533 frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
534
535 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
536 goto give_sigsegv;
537
538 signal = current_thread_info()->exec_domain
539 && current_thread_info()->exec_domain->signal_invmap
540 && sig < 32
541 ? current_thread_info()->exec_domain->signal_invmap[sig]
542 : sig;
543
544 err |= __put_user(&frame->info, &frame->pinfo);
545 err |= __put_user(&frame->uc, &frame->puc);
546 err |= copy_siginfo_to_user(&frame->info, info);
547
548 /* Give up earlier as i386, in case */
549 if (err)
550 goto give_sigsegv;
551
552 /* Create the ucontext. */
553 err |= __put_user(0, &frame->uc.uc_flags);
554 err |= __put_user(0, &frame->uc.uc_link);
555 err |= __put_user((void *)current->sas_ss_sp,
556 &frame->uc.uc_stack.ss_sp);
557 err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
558 &frame->uc.uc_stack.ss_flags);
559 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
560 err |= setup_sigcontext(&frame->uc.uc_mcontext,
561 regs, set->sig[0]);
562 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
563
564 /* Give up earlier as i386, in case */
565 if (err)
566 goto give_sigsegv;
567
568 /* Set up to return from userspace. If provided, use a stub
569 already in userspace. */
570 if (ka->sa.sa_flags & SA_RESTORER) {
571 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
572
573 /*
574 * On SH5 all edited pointers are subject to NEFF
575 */
576 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
577 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
578 } else {
579 /*
580 * Different approach on SH5.
581 * . Endianness independent asm code gets placed in entry.S .
582 * This is limited to four ASM instructions corresponding
583 * to two long longs in size.
584 * . err checking is done on the else branch only
585 * . flush_icache_range() is called upon __put_user() only
586 * . all edited pointers are subject to NEFF
587 * . being code, linker turns ShMedia bit on, always
588 * dereference index -1.
589 */
590
591 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
592 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
593 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
594
595 if (__copy_to_user(frame->retcode,
596 (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
597 goto give_sigsegv;
598
599 flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
600 }
601
602 /*
603 * Set up registers for signal handler.
604 * All edited pointers are subject to NEFF.
605 */
606 regs->regs[REG_SP] = (unsigned long) frame;
607 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
608 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
609 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
610 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
611 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
612 regs->pc = (unsigned long) ka->sa.sa_handler;
613 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
614
615 set_fs(USER_DS);
616
617#if DEBUG_SIG
618 /* Broken %016Lx */
619 printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
620 signal,
621 current->comm, current->pid, frame,
622 regs->pc >> 32, regs->pc & 0xffffffff,
623 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
624#endif
625
626 return;
627
628give_sigsegv:
629 force_sigsegv(sig, current);
630}
631
632/*
633 * OK, we're invoking a handler
634 */
635
636static void
637handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
638 sigset_t *oldset, struct pt_regs * regs)
639{
640 /* Are we from a system call? */
641 if (regs->syscall_nr >= 0) {
642 /* If so, check system call restarting.. */
643 switch (regs->regs[REG_RET]) {
644 case -ERESTARTNOHAND:
645 regs->regs[REG_RET] = -EINTR;
646 break;
647
648 case -ERESTARTSYS:
649 if (!(ka->sa.sa_flags & SA_RESTART)) {
650 regs->regs[REG_RET] = -EINTR;
651 break;
652 }
653 /* fallthrough */
654 case -ERESTARTNOINTR:
655 /* Decode syscall # */
656 regs->regs[REG_RET] = regs->syscall_nr;
657 regs->pc -= 4;
658 }
659 }
660
661 /* Set up the stack frame */
662 if (ka->sa.sa_flags & SA_SIGINFO)
663 setup_rt_frame(sig, ka, info, oldset, regs);
664 else
665 setup_frame(sig, ka, oldset, regs);
666
667 if (!(ka->sa.sa_flags & SA_NODEFER)) {
668 spin_lock_irq(&current->sighand->siglock);
669 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
670 sigaddset(&current->blocked,sig);
671 recalc_sigpending();
672 spin_unlock_irq(&current->sighand->siglock);
673 }
674}
675
676/*
677 * Note that 'init' is a special process: it doesn't get signals it doesn't
678 * want to handle. Thus you cannot kill init even with a SIGKILL even by
679 * mistake.
680 *
681 * Note that we go through the signals twice: once to check the signals that
682 * the kernel can handle, and then we build all the user-level signal handling
683 * stack-frames in one go after that.
684 */
685int do_signal(struct pt_regs *regs, sigset_t *oldset)
686{
687 siginfo_t info;
688 int signr;
689 struct k_sigaction ka;
690
691 /*
692 * We want the common case to go fast, which
693 * is why we may in certain cases get here from
694 * kernel mode. Just return without doing anything
695 * if so.
696 */
697 if (!user_mode(regs))
698 return 1;
699
700 if (try_to_freeze(0))
701 goto no_signal;
702
703 if (!oldset)
704 oldset = &current->blocked;
705
706 signr = get_signal_to_deliver(&info, &ka, regs, 0);
707
708 if (signr > 0) {
709 /* Whee! Actually deliver the signal. */
710 handle_signal(signr, &info, &ka, oldset, regs);
711 return 1;
712 }
713
714no_signal:
715 /* Did we come from a system call? */
716 if (regs->syscall_nr >= 0) {
717 /* Restart the system call - no handlers present */
718 if (regs->regs[REG_RET] == -ERESTARTNOHAND ||
719 regs->regs[REG_RET] == -ERESTARTSYS ||
720 regs->regs[REG_RET] == -ERESTARTNOINTR) {
721 /* Decode Syscall # */
722 regs->regs[REG_RET] = regs->syscall_nr;
723 regs->pc -= 4;
724 }
725 }
726 return 0;
727}
diff --git a/arch/sh64/kernel/switchto.S b/arch/sh64/kernel/switchto.S
new file mode 100644
index 000000000000..45b2d90eed7d
--- /dev/null
+++ b/arch/sh64/kernel/switchto.S
@@ -0,0 +1,198 @@
1/*
2 * arch/sh64/kernel/switchto.S
3 *
4 * sh64 context switch
5 *
6 * Copyright (C) 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11*/
12
13 .section .text..SHmedia32,"ax"
14 .little
15
16 .balign 32
17
18 .type sh64_switch_to,@function
19 .global sh64_switch_to
20 .global __sh64_switch_to_end
21sh64_switch_to:
22
23/* Incoming args
24 r2 - prev
25 r3 - &prev->thread
26 r4 - next
27 r5 - &next->thread
28
29 Outgoing results
30 r2 - last (=prev) : this just stays in r2 throughout
31
32 Want to create a full (struct pt_regs) on the stack to allow backtracing
33 functions to work. However, we only need to populate the callee-save
34 register slots in this structure; since we're a function our ancestors must
35 have themselves preserved all caller saved state in the stack. This saves
36 some wasted effort since we won't need to look at the values.
37
38 In particular, all caller-save registers are immediately available for
39 scratch use.
40
41*/
42
43#define FRAME_SIZE (76*8 + 8)
44
45 movi FRAME_SIZE, r0
46 sub.l r15, r0, r15
47 ! Do normal-style register save to support backtrace
48
49 st.l r15, 0, r18 ! save link reg
50 st.l r15, 4, r14 ! save fp
51 add.l r15, r63, r14 ! setup frame pointer
52
53 ! hopefully this looks normal to the backtrace now.
54
55 addi.l r15, 8, r1 ! base of pt_regs
56 addi.l r1, 24, r0 ! base of pt_regs.regs
57 addi.l r0, (63*8), r8 ! base of pt_regs.trregs
58
59 /* Note : to be fixed?
60 struct pt_regs is really designed for holding the state on entry
61 to an exception, i.e. pc,sr,regs etc. However, for the context
62 switch state, some of this is not required. But the unwinder takes
63 struct pt_regs * as an arg so we have to build this structure
64 to allow unwinding switched tasks in show_state() */
65
66 st.q r0, ( 9*8), r9
67 st.q r0, (10*8), r10
68 st.q r0, (11*8), r11
69 st.q r0, (12*8), r12
70 st.q r0, (13*8), r13
71 st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
72 ! the point where the process is left in suspended animation, i.e. current
73 ! fp here, not the saved one.
74 st.q r0, (16*8), r16
75
76 st.q r0, (24*8), r24
77 st.q r0, (25*8), r25
78 st.q r0, (26*8), r26
79 st.q r0, (27*8), r27
80 st.q r0, (28*8), r28
81 st.q r0, (29*8), r29
82 st.q r0, (30*8), r30
83 st.q r0, (31*8), r31
84 st.q r0, (32*8), r32
85 st.q r0, (33*8), r33
86 st.q r0, (34*8), r34
87 st.q r0, (35*8), r35
88
89 st.q r0, (44*8), r44
90 st.q r0, (45*8), r45
91 st.q r0, (46*8), r46
92 st.q r0, (47*8), r47
93 st.q r0, (48*8), r48
94 st.q r0, (49*8), r49
95 st.q r0, (50*8), r50
96 st.q r0, (51*8), r51
97 st.q r0, (52*8), r52
98 st.q r0, (53*8), r53
99 st.q r0, (54*8), r54
100 st.q r0, (55*8), r55
101 st.q r0, (56*8), r56
102 st.q r0, (57*8), r57
103 st.q r0, (58*8), r58
104 st.q r0, (59*8), r59
105
106 ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
107 ! Use a local label to avoid creating a symbol that will confuse the !
108 ! backtrace
109 pta .Lsave_pc, tr0
110
111 gettr tr5, r45
112 gettr tr6, r46
113 gettr tr7, r47
114 st.q r8, (5*8), r45
115 st.q r8, (6*8), r46
116 st.q r8, (7*8), r47
117
118 ! Now switch context
119 gettr tr0, r9
120 st.l r3, 0, r15 ! prev->thread.sp
121 st.l r3, 8, r1 ! prev->thread.kregs
122 st.l r3, 4, r9 ! prev->thread.pc
123 st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
124
125 ! Load PC for next task (init value or save_pc later)
126 ld.l r5, 4, r18 ! next->thread.pc
127 ! Switch stacks
128 ld.l r5, 0, r15 ! next->thread.sp
129 ptabs r18, tr0
130
131 ! Update current
132 ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
133 putcon r9, kcr0 ! current = next->thread_info
134
135 ! go to save_pc for a reschedule, or the initial thread.pc for a new process
136 blink tr0, r63
137
138 ! Restore (when we come back to a previously saved task)
139.Lsave_pc:
140 addi.l r15, 32, r0 ! r0 = next's regs
141 addi.l r0, (63*8), r8 ! r8 = next's tr_regs
142
143 ld.q r8, (5*8), r45
144 ld.q r8, (6*8), r46
145 ld.q r8, (7*8), r47
146 ptabs r45, tr5
147 ptabs r46, tr6
148 ptabs r47, tr7
149
150 ld.q r0, ( 9*8), r9
151 ld.q r0, (10*8), r10
152 ld.q r0, (11*8), r11
153 ld.q r0, (12*8), r12
154 ld.q r0, (13*8), r13
155 ld.q r0, (14*8), r14
156 ld.q r0, (16*8), r16
157
158 ld.q r0, (24*8), r24
159 ld.q r0, (25*8), r25
160 ld.q r0, (26*8), r26
161 ld.q r0, (27*8), r27
162 ld.q r0, (28*8), r28
163 ld.q r0, (29*8), r29
164 ld.q r0, (30*8), r30
165 ld.q r0, (31*8), r31
166 ld.q r0, (32*8), r32
167 ld.q r0, (33*8), r33
168 ld.q r0, (34*8), r34
169 ld.q r0, (35*8), r35
170
171 ld.q r0, (44*8), r44
172 ld.q r0, (45*8), r45
173 ld.q r0, (46*8), r46
174 ld.q r0, (47*8), r47
175 ld.q r0, (48*8), r48
176 ld.q r0, (49*8), r49
177 ld.q r0, (50*8), r50
178 ld.q r0, (51*8), r51
179 ld.q r0, (52*8), r52
180 ld.q r0, (53*8), r53
181 ld.q r0, (54*8), r54
182 ld.q r0, (55*8), r55
183 ld.q r0, (56*8), r56
184 ld.q r0, (57*8), r57
185 ld.q r0, (58*8), r58
186 ld.q r0, (59*8), r59
187
188 ! epilogue
189 ld.l r15, 0, r18
190 ld.l r15, 4, r14
191 ptabs r18, tr0
192 movi FRAME_SIZE, r0
193 add r15, r0, r15
194 blink tr0, r63
195__sh64_switch_to_end:
196.LFE1:
197 .size sh64_switch_to,.LFE1-sh64_switch_to
198
diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c
new file mode 100644
index 000000000000..4546845b9caf
--- /dev/null
+++ b/arch/sh64/kernel/sys_sh64.c
@@ -0,0 +1,300 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/sys_sh64.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 *
10 * This file contains various random system calls that
11 * have a non-standard calling sequence on the Linux/SH5
12 * platform.
13 *
14 * Mostly taken from i386 version.
15 *
16 */
17
18#include <linux/errno.h>
19#include <linux/rwsem.h>
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/sem.h>
25#include <linux/msg.h>
26#include <linux/shm.h>
27#include <linux/stat.h>
28#include <linux/mman.h>
29#include <linux/file.h>
30#include <linux/utsname.h>
31#include <linux/syscalls.h>
32#include <asm/uaccess.h>
33#include <asm/ipc.h>
34#include <asm/ptrace.h>
35
36#define REG_3 3
37
38/*
39 * sys_pipe() is the normal C calling standard for creating
40 * a pipe. It's not the way Unix traditionally does this, though.
41 */
42#ifdef NEW_PIPE_IMPLEMENTATION
43asmlinkage int sys_pipe(unsigned long * fildes,
44 unsigned long dummy_r3,
45 unsigned long dummy_r4,
46 unsigned long dummy_r5,
47 unsigned long dummy_r6,
48 unsigned long dummy_r7,
49 struct pt_regs * regs) /* r8 = pt_regs forced by entry.S */
50{
51 int fd[2];
52 int ret;
53
54 ret = do_pipe(fd);
55 if (ret == 0)
56 /*
57 ***********************************************************************
58 * To avoid the copy_to_user we prefer to break the ABIs convention, *
59 * packing the valid pair of file IDs into a single register (r3); *
60 * while r2 is the return code as defined by the sh5-ABIs. *
61 * BE CAREFUL: pipe stub, into glibc, must be aware of this solution *
62 ***********************************************************************
63
64#ifdef __LITTLE_ENDIAN__
65 regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]);
66#else
67 regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]);
68#endif
69
70 */
71 /* although not very clever this is endianess independent */
72 regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd);
73
74 return ret;
75}
76
77#else
78asmlinkage int sys_pipe(unsigned long * fildes)
79{
80 int fd[2];
81 int error;
82
83 error = do_pipe(fd);
84 if (!error) {
85 if (copy_to_user(fildes, fd, 2*sizeof(int)))
86 error = -EFAULT;
87 }
88 return error;
89}
90
91#endif
92
93/*
94 * To avoid cache alias, we map the shard page with same color.
95 */
96#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
97
98unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
99 unsigned long len, unsigned long pgoff, unsigned long flags)
100{
101 struct vm_area_struct *vma;
102
103 if (flags & MAP_FIXED) {
104 /* We do not accept a shared mapping if it would violate
105 * cache aliasing constraints.
106 */
107 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
108 return -EINVAL;
109 return addr;
110 }
111
112 if (len > TASK_SIZE)
113 return -ENOMEM;
114 if (!addr)
115 addr = TASK_UNMAPPED_BASE;
116
117 if (flags & MAP_PRIVATE)
118 addr = PAGE_ALIGN(addr);
119 else
120 addr = COLOUR_ALIGN(addr);
121
122 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
123 /* At this point: (!vma || addr < vma->vm_end). */
124 if (TASK_SIZE - len < addr)
125 return -ENOMEM;
126 if (!vma || addr + len <= vma->vm_start)
127 return addr;
128 addr = vma->vm_end;
129 if (!(flags & MAP_PRIVATE))
130 addr = COLOUR_ALIGN(addr);
131 }
132}
133
134/* common code for old and new mmaps */
135static inline long do_mmap2(
136 unsigned long addr, unsigned long len,
137 unsigned long prot, unsigned long flags,
138 unsigned long fd, unsigned long pgoff)
139{
140 int error = -EBADF;
141 struct file * file = NULL;
142
143 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
144 if (!(flags & MAP_ANONYMOUS)) {
145 file = fget(fd);
146 if (!file)
147 goto out;
148 }
149
150 down_write(&current->mm->mmap_sem);
151 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
152 up_write(&current->mm->mmap_sem);
153
154 if (file)
155 fput(file);
156out:
157 return error;
158}
159
160asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
161 unsigned long prot, unsigned long flags,
162 unsigned long fd, unsigned long pgoff)
163{
164 return do_mmap2(addr, len, prot, flags, fd, pgoff);
165}
166
167asmlinkage int old_mmap(unsigned long addr, unsigned long len,
168 unsigned long prot, unsigned long flags,
169 int fd, unsigned long off)
170{
171 if (off & ~PAGE_MASK)
172 return -EINVAL;
173 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
174}
175
176/*
177 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
178 *
179 * This is really horribly ugly.
180 */
181asmlinkage int sys_ipc(uint call, int first, int second,
182 int third, void __user *ptr, long fifth)
183{
184 int version, ret;
185
186 version = call >> 16; /* hack for backward compatibility */
187 call &= 0xffff;
188
189 if (call <= SEMCTL)
190 switch (call) {
191 case SEMOP:
192 return sys_semtimedop(first, (struct sembuf __user *)ptr,
193 second, NULL);
194 case SEMTIMEDOP:
195 return sys_semtimedop(first, (struct sembuf __user *)ptr,
196 second,
197 (const struct timespec __user *)fifth);
198 case SEMGET:
199 return sys_semget (first, second, third);
200 case SEMCTL: {
201 union semun fourth;
202 if (!ptr)
203 return -EINVAL;
204 if (get_user(fourth.__pad, (void * __user *) ptr))
205 return -EFAULT;
206 return sys_semctl (first, second, third, fourth);
207 }
208 default:
209 return -EINVAL;
210 }
211
212 if (call <= MSGCTL)
213 switch (call) {
214 case MSGSND:
215 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
216 second, third);
217 case MSGRCV:
218 switch (version) {
219 case 0: {
220 struct ipc_kludge tmp;
221 if (!ptr)
222 return -EINVAL;
223
224 if (copy_from_user(&tmp,
225 (struct ipc_kludge __user *) ptr,
226 sizeof (tmp)))
227 return -EFAULT;
228 return sys_msgrcv (first, tmp.msgp, second,
229 tmp.msgtyp, third);
230 }
231 default:
232 return sys_msgrcv (first,
233 (struct msgbuf __user *) ptr,
234 second, fifth, third);
235 }
236 case MSGGET:
237 return sys_msgget ((key_t) first, second);
238 case MSGCTL:
239 return sys_msgctl (first, second,
240 (struct msqid_ds __user *) ptr);
241 default:
242 return -EINVAL;
243 }
244 if (call <= SHMCTL)
245 switch (call) {
246 case SHMAT:
247 switch (version) {
248 default: {
249 ulong raddr;
250 ret = do_shmat (first, (char __user *) ptr,
251 second, &raddr);
252 if (ret)
253 return ret;
254 return put_user (raddr, (ulong __user *) third);
255 }
256 case 1: /* iBCS2 emulator entry point */
257 if (!segment_eq(get_fs(), get_ds()))
258 return -EINVAL;
259 return do_shmat (first, (char __user *) ptr,
260 second, (ulong *) third);
261 }
262 case SHMDT:
263 return sys_shmdt ((char __user *)ptr);
264 case SHMGET:
265 return sys_shmget (first, second, third);
266 case SHMCTL:
267 return sys_shmctl (first, second,
268 (struct shmid_ds __user *) ptr);
269 default:
270 return -EINVAL;
271 }
272
273 return -EINVAL;
274}
275
276asmlinkage int sys_uname(struct old_utsname * name)
277{
278 int err;
279 if (!name)
280 return -EFAULT;
281 down_read(&uts_sem);
282 err=copy_to_user(name, &system_utsname, sizeof (*name));
283 up_read(&uts_sem);
284 return err?-EFAULT:0;
285}
286
287/* Copy from mips version */
288asmlinkage long sys_shmatcall(int shmid, char __user *shmaddr,
289 int shmflg)
290{
291 unsigned long raddr;
292 int err;
293
294 err = do_shmat(shmid, shmaddr, shmflg, &raddr);
295 if (err)
296 return err;
297
298 err = raddr;
299 return err;
300}
diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S
new file mode 100644
index 000000000000..8ed417df3dc6
--- /dev/null
+++ b/arch/sh64/kernel/syscalls.S
@@ -0,0 +1,345 @@
1/*
2 * arch/sh64/kernel/syscalls.S
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/sys.h>
14
15 .section .data, "aw"
16 .balign 32
17
18/*
19 * System calls jump table
20 */
21 .globl sys_call_table
22sys_call_table:
23 .long sys_ni_syscall /* 0 - old "setup()" system call */
24 .long sys_exit
25 .long sys_fork
26 .long sys_read
27 .long sys_write
28 .long sys_open /* 5 */
29 .long sys_close
30 .long sys_waitpid
31 .long sys_creat
32 .long sys_link
33 .long sys_unlink /* 10 */
34 .long sys_execve
35 .long sys_chdir
36 .long sys_time
37 .long sys_mknod
38 .long sys_chmod /* 15 */
39 .long sys_lchown16
40 .long sys_ni_syscall /* old break syscall holder */
41 .long sys_stat
42 .long sys_lseek
43 .long sys_getpid /* 20 */
44 .long sys_mount
45 .long sys_oldumount
46 .long sys_setuid16
47 .long sys_getuid16
48 .long sys_stime /* 25 */
49 .long sys_ptrace
50 .long sys_alarm
51 .long sys_fstat
52 .long sys_pause
53 .long sys_utime /* 30 */
54 .long sys_ni_syscall /* old stty syscall holder */
55 .long sys_ni_syscall /* old gtty syscall holder */
56 .long sys_access
57 .long sys_nice
58 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
59 .long sys_sync
60 .long sys_kill
61 .long sys_rename
62 .long sys_mkdir
63 .long sys_rmdir /* 40 */
64 .long sys_dup
65 .long sys_pipe
66 .long sys_times
67 .long sys_ni_syscall /* old prof syscall holder */
68 .long sys_brk /* 45 */
69 .long sys_setgid16
70 .long sys_getgid16
71 .long sys_signal
72 .long sys_geteuid16
73 .long sys_getegid16 /* 50 */
74 .long sys_acct
75 .long sys_umount /* recycled never used phys( */
76 .long sys_ni_syscall /* old lock syscall holder */
77 .long sys_ioctl
78 .long sys_fcntl /* 55 */
79 .long sys_ni_syscall /* old mpx syscall holder */
80 .long sys_setpgid
81 .long sys_ni_syscall /* old ulimit syscall holder */
82 .long sys_ni_syscall /* sys_olduname */
83 .long sys_umask /* 60 */
84 .long sys_chroot
85 .long sys_ustat
86 .long sys_dup2
87 .long sys_getppid
88 .long sys_getpgrp /* 65 */
89 .long sys_setsid
90 .long sys_sigaction
91 .long sys_sgetmask
92 .long sys_ssetmask
93 .long sys_setreuid16 /* 70 */
94 .long sys_setregid16
95 .long sys_sigsuspend
96 .long sys_sigpending
97 .long sys_sethostname
98 .long sys_setrlimit /* 75 */
99 .long sys_old_getrlimit
100 .long sys_getrusage
101 .long sys_gettimeofday
102 .long sys_settimeofday
103 .long sys_getgroups16 /* 80 */
104 .long sys_setgroups16
105 .long sys_ni_syscall /* sys_oldselect */
106 .long sys_symlink
107 .long sys_lstat
108 .long sys_readlink /* 85 */
109 .long sys_uselib
110 .long sys_swapon
111 .long sys_reboot
112 .long old_readdir
113 .long old_mmap /* 90 */
114 .long sys_munmap
115 .long sys_truncate
116 .long sys_ftruncate
117 .long sys_fchmod
118 .long sys_fchown16 /* 95 */
119 .long sys_getpriority
120 .long sys_setpriority
121 .long sys_ni_syscall /* old profil syscall holder */
122 .long sys_statfs
123 .long sys_fstatfs /* 100 */
124 .long sys_ni_syscall /* ioperm */
125 .long sys_socketcall /* Obsolete implementation of socket syscall */
126 .long sys_syslog
127 .long sys_setitimer
128 .long sys_getitimer /* 105 */
129 .long sys_newstat
130 .long sys_newlstat
131 .long sys_newfstat
132 .long sys_uname
133 .long sys_ni_syscall /* 110 */ /* iopl */
134 .long sys_vhangup
135 .long sys_ni_syscall /* idle */
136 .long sys_ni_syscall /* vm86old */
137 .long sys_wait4
138 .long sys_swapoff /* 115 */
139 .long sys_sysinfo
140 .long sys_ipc /* Obsolete ipc syscall implementation */
141 .long sys_fsync
142 .long sys_sigreturn
143 .long sys_clone /* 120 */
144 .long sys_setdomainname
145 .long sys_newuname
146 .long sys_ni_syscall /* sys_modify_ldt */
147 .long sys_adjtimex
148 .long sys_mprotect /* 125 */
149 .long sys_sigprocmask
150 .long sys_ni_syscall /* old "create_module" */
151 .long sys_init_module
152 .long sys_delete_module
153 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
154 .long sys_quotactl
155 .long sys_getpgid
156 .long sys_fchdir
157 .long sys_bdflush
158 .long sys_sysfs /* 135 */
159 .long sys_personality
160 .long sys_ni_syscall /* for afs_syscall */
161 .long sys_setfsuid16
162 .long sys_setfsgid16
163 .long sys_llseek /* 140 */
164 .long sys_getdents
165 .long sys_select
166 .long sys_flock
167 .long sys_msync
168 .long sys_readv /* 145 */
169 .long sys_writev
170 .long sys_getsid
171 .long sys_fdatasync
172 .long sys_sysctl
173 .long sys_mlock /* 150 */
174 .long sys_munlock
175 .long sys_mlockall
176 .long sys_munlockall
177 .long sys_sched_setparam
178 .long sys_sched_getparam /* 155 */
179 .long sys_sched_setscheduler
180 .long sys_sched_getscheduler
181 .long sys_sched_yield
182 .long sys_sched_get_priority_max
183 .long sys_sched_get_priority_min /* 160 */
184 .long sys_sched_rr_get_interval
185 .long sys_nanosleep
186 .long sys_mremap
187 .long sys_setresuid16
188 .long sys_getresuid16 /* 165 */
189 .long sys_ni_syscall /* vm86 */
190 .long sys_ni_syscall /* old "query_module" */
191 .long sys_poll
192 .long sys_nfsservctl
193 .long sys_setresgid16 /* 170 */
194 .long sys_getresgid16
195 .long sys_prctl
196 .long sys_rt_sigreturn
197 .long sys_rt_sigaction
198 .long sys_rt_sigprocmask /* 175 */
199 .long sys_rt_sigpending
200 .long sys_rt_sigtimedwait
201 .long sys_rt_sigqueueinfo
202 .long sys_rt_sigsuspend
203 .long sys_pread64 /* 180 */
204 .long sys_pwrite64
205 .long sys_chown16
206 .long sys_getcwd
207 .long sys_capget
208 .long sys_capset /* 185 */
209 .long sys_sigaltstack
210 .long sys_sendfile
211 .long sys_ni_syscall /* streams1 */
212 .long sys_ni_syscall /* streams2 */
213 .long sys_vfork /* 190 */
214 .long sys_getrlimit
215 .long sys_mmap2
216 .long sys_truncate64
217 .long sys_ftruncate64
218 .long sys_stat64 /* 195 */
219 .long sys_lstat64
220 .long sys_fstat64
221 .long sys_lchown
222 .long sys_getuid
223 .long sys_getgid /* 200 */
224 .long sys_geteuid
225 .long sys_getegid
226 .long sys_setreuid
227 .long sys_setregid
228 .long sys_getgroups /* 205 */
229 .long sys_setgroups
230 .long sys_fchown
231 .long sys_setresuid
232 .long sys_getresuid
233 .long sys_setresgid /* 210 */
234 .long sys_getresgid
235 .long sys_chown
236 .long sys_setuid
237 .long sys_setgid
238 .long sys_setfsuid /* 215 */
239 .long sys_setfsgid
240 .long sys_pivot_root
241 .long sys_mincore
242 .long sys_madvise
243 /* Broken-out socket family (maintain backwards compatibility in syscall
244 numbering with 2.4) */
245 .long sys_socket /* 220 */
246 .long sys_bind
247 .long sys_connect
248 .long sys_listen
249 .long sys_accept
250 .long sys_getsockname /* 225 */
251 .long sys_getpeername
252 .long sys_socketpair
253 .long sys_send
254 .long sys_sendto
255 .long sys_recv /* 230*/
256 .long sys_recvfrom
257 .long sys_shutdown
258 .long sys_setsockopt
259 .long sys_getsockopt
260 .long sys_sendmsg /* 235 */
261 .long sys_recvmsg
262 /* Broken-out IPC family (maintain backwards compatibility in syscall
263 numbering with 2.4) */
264 .long sys_semop
265 .long sys_semget
266 .long sys_semctl
267 .long sys_msgsnd /* 240 */
268 .long sys_msgrcv
269 .long sys_msgget
270 .long sys_msgctl
271 .long sys_shmatcall
272 .long sys_shmdt /* 245 */
273 .long sys_shmget
274 .long sys_shmctl
275 /* Rest of syscalls listed in 2.4 i386 unistd.h */
276 .long sys_getdents64
277 .long sys_fcntl64
278 .long sys_ni_syscall /* 250 reserved for TUX */
279 .long sys_ni_syscall /* Reserved for Security */
280 .long sys_gettid
281 .long sys_readahead
282 .long sys_setxattr
283 .long sys_lsetxattr /* 255 */
284 .long sys_fsetxattr
285 .long sys_getxattr
286 .long sys_lgetxattr
287 .long sys_fgetxattr
288 .long sys_listxattr /* 260 */
289 .long sys_llistxattr
290 .long sys_flistxattr
291 .long sys_removexattr
292 .long sys_lremovexattr
293 .long sys_fremovexattr /* 265 */
294 .long sys_tkill
295 .long sys_sendfile64
296 .long sys_futex
297 .long sys_sched_setaffinity
298 .long sys_sched_getaffinity /* 270 */
299 .long sys_ni_syscall
300 .long sys_ni_syscall
301 .long sys_io_setup
302 .long sys_io_destroy
303 .long sys_io_getevents /* 275 */
304 .long sys_io_submit
305 .long sys_io_cancel
306 .long sys_fadvise64
307 .long sys_ni_syscall
308 .long sys_exit_group /* 280 */
309 /* Rest of new 2.6 syscalls */
310 .long sys_lookup_dcookie
311 .long sys_epoll_create
312 .long sys_epoll_ctl
313 .long sys_epoll_wait
314 .long sys_remap_file_pages /* 285 */
315 .long sys_set_tid_address
316 .long sys_timer_create
317 .long sys_timer_settime
318 .long sys_timer_gettime
319 .long sys_timer_getoverrun /* 290 */
320 .long sys_timer_delete
321 .long sys_clock_settime
322 .long sys_clock_gettime
323 .long sys_clock_getres
324 .long sys_clock_nanosleep /* 295 */
325 .long sys_statfs64
326 .long sys_fstatfs64
327 .long sys_tgkill
328 .long sys_utimes
329 .long sys_fadvise64_64 /* 300 */
330 .long sys_ni_syscall /* Reserved for vserver */
331 .long sys_ni_syscall /* Reserved for mbind */
332 .long sys_ni_syscall /* get_mempolicy */
333 .long sys_ni_syscall /* set_mempolicy */
334 .long sys_mq_open /* 305 */
335 .long sys_mq_unlink
336 .long sys_mq_timedsend
337 .long sys_mq_timedreceive
338 .long sys_mq_notify
339 .long sys_mq_getsetattr /* 310 */
340 .long sys_ni_syscall /* Reserved for kexec */
341 .long sys_waitid
342 .long sys_add_key
343 .long sys_request_key
344 .long sys_keyctl /* 315 */
345
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
new file mode 100644
index 000000000000..6c84da3efc73
--- /dev/null
+++ b/arch/sh64/kernel/time.c
@@ -0,0 +1,610 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/time.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 * Copyright (C) 2003 Richard Curnow
11 *
12 * Original TMU/RTC code taken from sh version.
13 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
14 * Some code taken from i386 version.
15 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
16 */
17
18#include <linux/config.h>
19#include <linux/errno.h>
20#include <linux/rwsem.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/param.h>
24#include <linux/string.h>
25#include <linux/mm.h>
26#include <linux/interrupt.h>
27#include <linux/time.h>
28#include <linux/delay.h>
29#include <linux/init.h>
30#include <linux/profile.h>
31#include <linux/smp.h>
32
33#include <asm/registers.h> /* required by inline __asm__ stmt. */
34
35#include <asm/processor.h>
36#include <asm/uaccess.h>
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/delay.h>
40
41#include <linux/timex.h>
42#include <linux/irq.h>
43#include <asm/hardware.h>
44
45#define TMU_TOCR_INIT 0x00
46#define TMU0_TCR_INIT 0x0020
47#define TMU_TSTR_INIT 1
48#define TMU_TSTR_OFF 0
49
50/* RCR1 Bits */
51#define RCR1_CF 0x80 /* Carry Flag */
52#define RCR1_CIE 0x10 /* Carry Interrupt Enable */
53#define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
54#define RCR1_AF 0x01 /* Alarm Flag */
55
56/* RCR2 Bits */
57#define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
58#define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
59#define RCR2_RTCEN 0x08 /* ENable RTC */
60#define RCR2_ADJ 0x04 /* ADJustment (30-second) */
61#define RCR2_RESET 0x02 /* Reset bit */
62#define RCR2_START 0x01 /* Start bit */
63
64/* Clock, Power and Reset Controller */
65#define CPRC_BLOCK_OFF 0x01010000
66#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
67
68#define FRQCR (cprc_base+0x0)
69#define WTCSR (cprc_base+0x0018)
70#define STBCR (cprc_base+0x0030)
71
72/* Time Management Unit */
73#define TMU_BLOCK_OFF 0x01020000
74#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
75#define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
76#define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
77#define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
78
79#define TMU_TOCR tmu_base+0x0 /* Byte access */
80#define TMU_TSTR tmu_base+0x4 /* Byte access */
81
82#define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
83#define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
84#define TMU0_TCR TMU0_BASE+0x8 /* Word access */
85
86/* Real Time Clock */
87#define RTC_BLOCK_OFF 0x01040000
88#define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
89
90#define R64CNT rtc_base+0x00
91#define RSECCNT rtc_base+0x04
92#define RMINCNT rtc_base+0x08
93#define RHRCNT rtc_base+0x0c
94#define RWKCNT rtc_base+0x10
95#define RDAYCNT rtc_base+0x14
96#define RMONCNT rtc_base+0x18
97#define RYRCNT rtc_base+0x1c /* 16bit */
98#define RSECAR rtc_base+0x20
99#define RMINAR rtc_base+0x24
100#define RHRAR rtc_base+0x28
101#define RWKAR rtc_base+0x2c
102#define RDAYAR rtc_base+0x30
103#define RMONAR rtc_base+0x34
104#define RCR1 rtc_base+0x38
105#define RCR2 rtc_base+0x3c
106
107#ifndef BCD_TO_BIN
108#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
109#endif
110
111#ifndef BIN_TO_BCD
112#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
113#endif
114
115#define TICK_SIZE (tick_nsec / 1000)
116
117extern unsigned long wall_jiffies;
118
119u64 jiffies_64 = INITIAL_JIFFIES;
120
121static unsigned long tmu_base, rtc_base;
122unsigned long cprc_base;
123
124/* Variables to allow interpolation of time of day to resolution better than a
125 * jiffy. */
126
127/* This is effectively protected by xtime_lock */
128static unsigned long ctc_last_interrupt;
129static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
130
131#define CTC_JIFFY_SCALE_SHIFT 40
132
133/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
134static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
135
136/* Estimate number of microseconds that have elapsed since the last timer tick,
137 by scaling the delta that has occured in the CTC register.
138
139 WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
140 the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
141 in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
142 probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
143 sleeping, though will be coarser.
144
145 FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
146 is running or if the freq or tick arguments of adjtimex are modified after
147 we have calibrated the scaling factor? This will result in either a jump at
148 the end of a tick period, or a wrap backwards at the start of the next one,
149 if the application is reading the time of day often enough. I think we
150 ought to do better than this. For this reason, usecs_per_jiffy is left
151 separated out in the calculation below. This allows some future hook into
152 the adjtime-related stuff in kernel/timer.c to remove this hazard.
153
154*/
155
156static unsigned long usecs_since_tick(void)
157{
158 unsigned long long current_ctc;
159 long ctc_ticks_since_interrupt;
160 unsigned long long ull_ctc_ticks_since_interrupt;
161 unsigned long result;
162
163 unsigned long long mul1_out;
164 unsigned long long mul1_out_high;
165 unsigned long long mul2_out_low, mul2_out_high;
166
167 /* Read CTC register */
168 asm ("getcon cr62, %0" : "=r" (current_ctc));
169 /* Note, the CTC counts down on each CPU clock, not up.
170 Note(2), use long type to get correct wraparound arithmetic when
171 the counter crosses zero. */
172 ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
173 ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
174
175 /* Inline assembly to do 32x32x32->64 multiplier */
176 asm volatile ("mulu.l %1, %2, %0" :
177 "=r" (mul1_out) :
178 "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
179
180 mul1_out_high = mul1_out >> 32;
181
182 asm volatile ("mulu.l %1, %2, %0" :
183 "=r" (mul2_out_low) :
184 "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
185
186#if 1
187 asm volatile ("mulu.l %1, %2, %0" :
188 "=r" (mul2_out_high) :
189 "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
190#endif
191
192 result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
193
194 return result;
195}
196
197void do_gettimeofday(struct timeval *tv)
198{
199 unsigned long flags;
200 unsigned long seq;
201 unsigned long usec, sec;
202
203 do {
204 seq = read_seqbegin_irqsave(&xtime_lock, flags);
205 usec = usecs_since_tick();
206 {
207 unsigned long lost = jiffies - wall_jiffies;
208
209 if (lost)
210 usec += lost * (1000000 / HZ);
211 }
212
213 sec = xtime.tv_sec;
214 usec += xtime.tv_nsec / 1000;
215 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
216
217 while (usec >= 1000000) {
218 usec -= 1000000;
219 sec++;
220 }
221
222 tv->tv_sec = sec;
223 tv->tv_usec = usec;
224}
225
226int do_settimeofday(struct timespec *tv)
227{
228 time_t wtm_sec, sec = tv->tv_sec;
229 long wtm_nsec, nsec = tv->tv_nsec;
230
231 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
232 return -EINVAL;
233
234 write_seqlock_irq(&xtime_lock);
235 /*
236 * This is revolting. We need to set "xtime" correctly. However, the
237 * value in this location is the value at the most recent update of
238 * wall time. Discover what correction gettimeofday() would have
239 * made, and then undo it!
240 */
241 nsec -= 1000 * (usecs_since_tick() +
242 (jiffies - wall_jiffies) * (1000000 / HZ));
243
244 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
245 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
246
247 set_normalized_timespec(&xtime, sec, nsec);
248 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
249
250 time_adjust = 0; /* stop active adjtime() */
251 time_status |= STA_UNSYNC;
252 time_maxerror = NTP_PHASE_LIMIT;
253 time_esterror = NTP_PHASE_LIMIT;
254 write_sequnlock_irq(&xtime_lock);
255 clock_was_set();
256
257 return 0;
258}
259
260static int set_rtc_time(unsigned long nowtime)
261{
262 int retval = 0;
263 int real_seconds, real_minutes, cmos_minutes;
264
265 ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
266
267 cmos_minutes = ctrl_inb(RMINCNT);
268 BCD_TO_BIN(cmos_minutes);
269
270 /*
271 * since we're only adjusting minutes and seconds,
272 * don't interfere with hour overflow. This avoids
273 * messing with unknown time zones but requires your
274 * RTC not to be off by more than 15 minutes
275 */
276 real_seconds = nowtime % 60;
277 real_minutes = nowtime / 60;
278 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
279 real_minutes += 30; /* correct for half hour time zone */
280 real_minutes %= 60;
281
282 if (abs(real_minutes - cmos_minutes) < 30) {
283 BIN_TO_BCD(real_seconds);
284 BIN_TO_BCD(real_minutes);
285 ctrl_outb(real_seconds, RSECCNT);
286 ctrl_outb(real_minutes, RMINCNT);
287 } else {
288 printk(KERN_WARNING
289 "set_rtc_time: can't update from %d to %d\n",
290 cmos_minutes, real_minutes);
291 retval = -1;
292 }
293
294 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
295
296 return retval;
297}
298
299/* last time the RTC clock got updated */
300static long last_rtc_update = 0;
301
302/*
303 * timer_interrupt() needs to keep up the real-time clock,
304 * as well as call the "do_timer()" routine every clocktick
305 */
306static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
307{
308 unsigned long long current_ctc;
309 asm ("getcon cr62, %0" : "=r" (current_ctc));
310 ctc_last_interrupt = (unsigned long) current_ctc;
311
312 do_timer(regs);
313#ifndef CONFIG_SMP
314 update_process_times(user_mode(regs));
315#endif
316 profile_tick(CPU_PROFILING, regs);
317
318#ifdef CONFIG_HEARTBEAT
319 {
320 extern void heartbeat(void);
321
322 heartbeat();
323 }
324#endif
325
326 /*
327 * If we have an externally synchronized Linux clock, then update
328 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
329 * called as close as possible to 500 ms before the new second starts.
330 */
331 if ((time_status & STA_UNSYNC) == 0 &&
332 xtime.tv_sec > last_rtc_update + 660 &&
333 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
334 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
335 if (set_rtc_time(xtime.tv_sec) == 0)
336 last_rtc_update = xtime.tv_sec;
337 else
338 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
339 }
340}
341
342/*
343 * This is the same as the above, except we _also_ save the current
344 * Time Stamp Counter value at the time of the timer interrupt, so that
345 * we later on can estimate the time of day more exactly.
346 */
347static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
348{
349 unsigned long timer_status;
350
351 /* Clear UNF bit */
352 timer_status = ctrl_inw(TMU0_TCR);
353 timer_status &= ~0x100;
354 ctrl_outw(timer_status, TMU0_TCR);
355
356 /*
357 * Here we are in the timer irq handler. We just have irqs locally
358 * disabled but we don't know if the timer_bh is running on the other
359 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
360 * the irq version of write_lock because as just said we have irq
361 * locally disabled. -arca
362 */
363 write_lock(&xtime_lock);
364 do_timer_interrupt(irq, NULL, regs);
365 write_unlock(&xtime_lock);
366
367 return IRQ_HANDLED;
368}
369
370static unsigned long get_rtc_time(void)
371{
372 unsigned int sec, min, hr, wk, day, mon, yr, yr100;
373
374 again:
375 do {
376 ctrl_outb(0, RCR1); /* Clear CF-bit */
377 sec = ctrl_inb(RSECCNT);
378 min = ctrl_inb(RMINCNT);
379 hr = ctrl_inb(RHRCNT);
380 wk = ctrl_inb(RWKCNT);
381 day = ctrl_inb(RDAYCNT);
382 mon = ctrl_inb(RMONCNT);
383 yr = ctrl_inw(RYRCNT);
384 yr100 = (yr >> 8);
385 yr &= 0xff;
386 } while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
387
388 BCD_TO_BIN(yr100);
389 BCD_TO_BIN(yr);
390 BCD_TO_BIN(mon);
391 BCD_TO_BIN(day);
392 BCD_TO_BIN(hr);
393 BCD_TO_BIN(min);
394 BCD_TO_BIN(sec);
395
396 if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
397 hr > 23 || min > 59 || sec > 59) {
398 printk(KERN_ERR
399 "SH RTC: invalid value, resetting to 1 Jan 2000\n");
400 ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
401 ctrl_outb(0, RSECCNT);
402 ctrl_outb(0, RMINCNT);
403 ctrl_outb(0, RHRCNT);
404 ctrl_outb(6, RWKCNT);
405 ctrl_outb(1, RDAYCNT);
406 ctrl_outb(1, RMONCNT);
407 ctrl_outw(0x2000, RYRCNT);
408 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
409 goto again;
410 }
411
412 return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
413}
414
415static __init unsigned int get_cpu_hz(void)
416{
417 unsigned int count;
418 unsigned long __dummy;
419 unsigned long ctc_val_init, ctc_val;
420
421 /*
422 ** Regardless the toolchain, force the compiler to use the
423 ** arbitrary register r3 as a clock tick counter.
424 ** NOTE: r3 must be in accordance with rtc_interrupt()
425 */
426 register unsigned long long __rtc_irq_flag __asm__ ("r3");
427
428 local_irq_enable();
429 do {} while (ctrl_inb(R64CNT) != 0);
430 ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
431
432 /*
433 * r3 is arbitrary. CDC does not support "=z".
434 */
435 ctc_val_init = 0xffffffff;
436 ctc_val = ctc_val_init;
437
438 asm volatile("gettr tr0, %1\n\t"
439 "putcon %0, " __CTC "\n\t"
440 "and %2, r63, %2\n\t"
441 "pta $+4, tr0\n\t"
442 "beq/l %2, r63, tr0\n\t"
443 "ptabs %1, tr0\n\t"
444 "getcon " __CTC ", %0\n\t"
445 : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
446 : "0" (0));
447 local_irq_disable();
448 /*
449 * SH-3:
450 * CPU clock = 4 stages * loop
451 * tst rm,rm if id ex
452 * bt/s 1b if id ex
453 * add #1,rd if id ex
454 * (if) pipe line stole
455 * tst rm,rm if id ex
456 * ....
457 *
458 *
459 * SH-4:
460 * CPU clock = 6 stages * loop
461 * I don't know why.
462 * ....
463 *
464 * SH-5:
465 * Use CTC register to count. This approach returns the right value
466 * even if the I-cache is disabled (e.g. whilst debugging.)
467 *
468 */
469
470 count = ctc_val_init - ctc_val; /* CTC counts down */
471
472#if defined (CONFIG_SH_SIMULATOR)
473 /*
474 * Let's pretend we are a 5MHz SH-5 to avoid a too
475 * little timer interval. Also to keep delay
476 * calibration within a reasonable time.
477 */
478 return 5000000;
479#else
480 /*
481 * This really is count by the number of clock cycles
482 * by the ratio between a complete R64CNT
483 * wrap-around (128) and CUI interrupt being raised (64).
484 */
485 return count*2;
486#endif
487}
488
489static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
490{
491 ctrl_outb(0, RCR1); /* Disable Carry Interrupts */
492 regs->regs[3] = 1; /* Using r3 */
493
494 return IRQ_HANDLED;
495}
496
497static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL};
498static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "rtc", NULL, NULL};
499
500void __init time_init(void)
501{
502 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
503 unsigned long interval;
504 unsigned long frqcr, ifc, pfc;
505 static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
506#define bfc_table ifc_table /* Same */
507#define pfc_table ifc_table /* Same */
508
509 tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
510 if (!tmu_base) {
511 panic("Unable to remap TMU\n");
512 }
513
514 rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
515 if (!rtc_base) {
516 panic("Unable to remap RTC\n");
517 }
518
519 cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
520 if (!cprc_base) {
521 panic("Unable to remap CPRC\n");
522 }
523
524 xtime.tv_sec = get_rtc_time();
525 xtime.tv_nsec = 0;
526
527 setup_irq(TIMER_IRQ, &irq0);
528 setup_irq(RTC_IRQ, &irq1);
529
530 /* Check how fast it is.. */
531 cpu_clock = get_cpu_hz();
532
533 /* Note careful order of operations to maintain reasonable precision and avoid overflow. */
534 scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
535
536 disable_irq(RTC_IRQ);
537
538 printk("CPU clock: %d.%02dMHz\n",
539 (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
540 {
541 unsigned short bfc;
542 frqcr = ctrl_inl(FRQCR);
543 ifc = ifc_table[(frqcr>> 6) & 0x0007];
544 bfc = bfc_table[(frqcr>> 3) & 0x0007];
545 pfc = pfc_table[(frqcr>> 12) & 0x0007];
546 master_clock = cpu_clock * ifc;
547 bus_clock = master_clock/bfc;
548 }
549
550 printk("Bus clock: %d.%02dMHz\n",
551 (bus_clock/1000000), (bus_clock % 1000000)/10000);
552 module_clock = master_clock/pfc;
553 printk("Module clock: %d.%02dMHz\n",
554 (module_clock/1000000), (module_clock % 1000000)/10000);
555 interval = (module_clock/(HZ*4));
556
557 printk("Interval = %ld\n", interval);
558
559 current_cpu_data.cpu_clock = cpu_clock;
560 current_cpu_data.master_clock = master_clock;
561 current_cpu_data.bus_clock = bus_clock;
562 current_cpu_data.module_clock = module_clock;
563
564 /* Start TMU0 */
565 ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
566 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
567 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
568 ctrl_outl(interval, TMU0_TCOR);
569 ctrl_outl(interval, TMU0_TCNT);
570 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
571}
572
573void enter_deep_standby(void)
574{
575 /* Disable watchdog timer */
576 ctrl_outl(0xa5000000, WTCSR);
577 /* Configure deep standby on sleep */
578 ctrl_outl(0x03, STBCR);
579
580#ifdef CONFIG_SH_ALPHANUMERIC
581 {
582 extern void mach_alphanum(int position, unsigned char value);
583 extern void mach_alphanum_brightness(int setting);
584 char halted[] = "Halted. ";
585 int i;
586 mach_alphanum_brightness(6); /* dimmest setting above off */
587 for (i=0; i<8; i++) {
588 mach_alphanum(i, halted[i]);
589 }
590 asm __volatile__ ("synco");
591 }
592#endif
593
594 asm __volatile__ ("sleep");
595 asm __volatile__ ("synci");
596 asm __volatile__ ("nop");
597 asm __volatile__ ("nop");
598 asm __volatile__ ("nop");
599 asm __volatile__ ("nop");
600 panic("Unexpected wakeup!\n");
601}
602
603/*
604 * Scheduler clock - returns current time in nanosec units.
605 */
606unsigned long long sched_clock(void)
607{
608 return (unsigned long long)jiffies * (1000000000 / HZ);
609}
610
diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c
new file mode 100644
index 000000000000..224b7f5b9224
--- /dev/null
+++ b/arch/sh64/kernel/traps.c
@@ -0,0 +1,961 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/traps.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 */
13
14/*
15 * 'Traps.c' handles hardware traps and faults after we have saved some
16 * state in 'entry.S'.
17 */
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/smp_lock.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/interrupt.h>
32#include <linux/sysctl.h>
33#include <linux/module.h>
34
35#include <asm/system.h>
36#include <asm/uaccess.h>
37#include <asm/io.h>
38#include <asm/atomic.h>
39#include <asm/processor.h>
40#include <asm/pgtable.h>
41
42#undef DEBUG_EXCEPTION
43#ifdef DEBUG_EXCEPTION
44/* implemented in ../lib/dbg.c */
45extern void show_excp_regs(char *fname, int trapnr, int signr,
46 struct pt_regs *regs);
47#else
48#define show_excp_regs(a, b, c, d)
49#endif
50
51static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
52 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
53
54#define DO_ERROR(trapnr, signr, str, name, tsk) \
55asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
56{ \
57 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
58}
59
60spinlock_t die_lock;
61
62void die(const char * str, struct pt_regs * regs, long err)
63{
64 console_verbose();
65 spin_lock_irq(&die_lock);
66 printk("%s: %lx\n", str, (err & 0xffffff));
67 show_regs(regs);
68 spin_unlock_irq(&die_lock);
69 do_exit(SIGSEGV);
70}
71
72static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
73{
74 if (!user_mode(regs))
75 die(str, regs, err);
76}
77
78static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
79{
80 if (!user_mode(regs)) {
81 const struct exception_table_entry *fixup;
82 fixup = search_exception_tables(regs->pc);
83 if (fixup) {
84 regs->pc = fixup->fixup;
85 return;
86 }
87 die(str, regs, err);
88 }
89}
90
91DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
92DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
93
94
95/* Implement misaligned load/store handling for kernel (and optionally for user
96 mode too). Limitation : only SHmedia mode code is handled - there is no
97 handling at all for misaligned accesses occurring in SHcompact code yet. */
98
99static int misaligned_fixup(struct pt_regs *regs);
100
101asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
102{
103 if (misaligned_fixup(regs) < 0) {
104 do_unhandled_exception(7, SIGSEGV, "address error(load)",
105 "do_address_error_load",
106 error_code, regs, current);
107 }
108 return;
109}
110
111asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
112{
113 if (misaligned_fixup(regs) < 0) {
114 do_unhandled_exception(8, SIGSEGV, "address error(store)",
115 "do_address_error_store",
116 error_code, regs, current);
117 }
118 return;
119}
120
121#if defined(CONFIG_SH64_ID2815_WORKAROUND)
122
123#define OPCODE_INVALID 0
124#define OPCODE_USER_VALID 1
125#define OPCODE_PRIV_VALID 2
126
127/* getcon/putcon - requires checking which control register is referenced. */
128#define OPCODE_CTRL_REG 3
129
130/* Table of valid opcodes for SHmedia mode.
131 Form a 10-bit value by concatenating the major/minor opcodes i.e.
132 opcode[31:26,20:16]. The 6 MSBs of this value index into the following
133 array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
134 LSBs==4'b0000 etc). */
135static unsigned long shmedia_opcode_table[64] = {
136 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
137 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
138 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
139 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
140 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
141 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
142 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
143 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
144};
145
146void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
147{
148 /* Workaround SH5-101 cut2 silicon defect #2815 :
149 in some situations, inter-mode branches from SHcompact -> SHmedia
150 which should take ITLBMISS or EXECPROT exceptions at the target
151 falsely take RESINST at the target instead. */
152
153 unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
154 unsigned long pc, aligned_pc;
155 int get_user_error;
156 int trapnr = 12;
157 int signr = SIGILL;
158 char *exception_name = "reserved_instruction";
159
160 pc = regs->pc;
161 if ((pc & 3) == 1) {
162 /* SHmedia : check for defect. This requires executable vmas
163 to be readable too. */
164 aligned_pc = pc & ~3;
165 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
166 get_user_error = -EFAULT;
167 } else {
168 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
169 }
170 if (get_user_error >= 0) {
171 unsigned long index, shift;
172 unsigned long major, minor, combined;
173 unsigned long reserved_field;
174 reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
175 major = (opcode >> 26) & 0x3f;
176 minor = (opcode >> 16) & 0xf;
177 combined = (major << 4) | minor;
178 index = major;
179 shift = minor << 1;
180 if (reserved_field == 0) {
181 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
182 switch (opcode_state) {
183 case OPCODE_INVALID:
184 /* Trap. */
185 break;
186 case OPCODE_USER_VALID:
187 /* Restart the instruction : the branch to the instruction will now be from an RTE
188 not from SHcompact so the silicon defect won't be triggered. */
189 return;
190 case OPCODE_PRIV_VALID:
191 if (!user_mode(regs)) {
192 /* Should only ever get here if a module has
193 SHcompact code inside it. If so, the same fix up is needed. */
194 return; /* same reason */
195 }
196 /* Otherwise, user mode trying to execute a privileged instruction -
197 fall through to trap. */
198 break;
199 case OPCODE_CTRL_REG:
200 /* If in privileged mode, return as above. */
201 if (!user_mode(regs)) return;
202 /* In user mode ... */
203 if (combined == 0x9f) { /* GETCON */
204 unsigned long regno = (opcode >> 20) & 0x3f;
205 if (regno >= 62) {
206 return;
207 }
208 /* Otherwise, reserved or privileged control register, => trap */
209 } else if (combined == 0x1bf) { /* PUTCON */
210 unsigned long regno = (opcode >> 4) & 0x3f;
211 if (regno >= 62) {
212 return;
213 }
214 /* Otherwise, reserved or privileged control register, => trap */
215 } else {
216 /* Trap */
217 }
218 break;
219 default:
220 /* Fall through to trap. */
221 break;
222 }
223 }
224 /* fall through to normal resinst processing */
225 } else {
226 /* Error trying to read opcode. This typically means a
227 real fault, not a RESINST any more. So change the
228 codes. */
229 trapnr = 87;
230 exception_name = "address error (exec)";
231 signr = SIGSEGV;
232 }
233 }
234
235 do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
236}
237
238#else /* CONFIG_SH64_ID2815_WORKAROUND */
239
240/* If the workaround isn't needed, this is just a straightforward reserved
241 instruction */
242DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
243
244#endif /* CONFIG_SH64_ID2815_WORKAROUND */
245
246
247#include <asm/system.h>
248
249/* Called with interrupts disabled */
250asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
251{
252 PLS();
253 show_excp_regs(__FUNCTION__, -1, -1, regs);
254 die_if_kernel("exception", regs, ex);
255}
256
257int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
258{
259 /* Syscall debug */
260 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
261
262 die_if_kernel("unknown trapa", regs, scId);
263
264 return -ENOSYS;
265}
266
267void show_stack(struct task_struct *tsk, unsigned long *sp)
268{
269#ifdef CONFIG_KALLSYMS
270 extern void sh64_unwind(struct pt_regs *regs);
271 struct pt_regs *regs;
272
273 regs = tsk ? tsk->thread.kregs : NULL;
274
275 sh64_unwind(regs);
276#else
277 printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
278#endif
279}
280
281void show_task(unsigned long *sp)
282{
283 show_stack(NULL, sp);
284}
285
286void dump_stack(void)
287{
288 show_task(NULL);
289}
290/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
291EXPORT_SYMBOL(dump_stack);
292
293static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
294 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
295{
296 show_excp_regs(fn_name, trapnr, signr, regs);
297 tsk->thread.error_code = error_code;
298 tsk->thread.trap_no = trapnr;
299
300 if (user_mode(regs))
301 force_sig(signr, tsk);
302
303 die_if_no_fixup(str, regs, error_code);
304}
305
306static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
307{
308 int get_user_error;
309 unsigned long aligned_pc;
310 unsigned long opcode;
311
312 if ((pc & 3) == 1) {
313 /* SHmedia */
314 aligned_pc = pc & ~3;
315 if (from_user_mode) {
316 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
317 get_user_error = -EFAULT;
318 } else {
319 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
320 *result_opcode = opcode;
321 }
322 return get_user_error;
323 } else {
324 /* If the fault was in the kernel, we can either read
325 * this directly, or if not, we fault.
326 */
327 *result_opcode = *(unsigned long *) aligned_pc;
328 return 0;
329 }
330 } else if ((pc & 1) == 0) {
331 /* SHcompact */
332 /* TODO : provide handling for this. We don't really support
333 user-mode SHcompact yet, and for a kernel fault, this would
334 have to come from a module built for SHcompact. */
335 return -EFAULT;
336 } else {
337 /* misaligned */
338 return -EFAULT;
339 }
340}
341
342static int address_is_sign_extended(__u64 a)
343{
344 __u64 b;
345#if (NEFF == 32)
346 b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
347 return (b == a) ? 1 : 0;
348#else
349#error "Sign extend check only works for NEFF==32"
350#endif
351}
352
353static int generate_and_check_address(struct pt_regs *regs,
354 __u32 opcode,
355 int displacement_not_indexed,
356 int width_shift,
357 __u64 *address)
358{
359 /* return -1 for fault, 0 for OK */
360
361 __u64 base_address, addr;
362 int basereg;
363
364 basereg = (opcode >> 20) & 0x3f;
365 base_address = regs->regs[basereg];
366 if (displacement_not_indexed) {
367 __s64 displacement;
368 displacement = (opcode >> 10) & 0x3ff;
369 displacement = ((displacement << 54) >> 54); /* sign extend */
370 addr = (__u64)((__s64)base_address + (displacement << width_shift));
371 } else {
372 __u64 offset;
373 int offsetreg;
374 offsetreg = (opcode >> 10) & 0x3f;
375 offset = regs->regs[offsetreg];
376 addr = base_address + offset;
377 }
378
379 /* Check sign extended */
380 if (!address_is_sign_extended(addr)) {
381 return -1;
382 }
383
384#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
385 /* Check accessible. For misaligned access in the kernel, assume the
386 address is always accessible (and if not, just fault when the
387 load/store gets done.) */
388 if (user_mode(regs)) {
389 if (addr >= TASK_SIZE) {
390 return -1;
391 }
392 /* Do access_ok check later - it depends on whether it's a load or a store. */
393 }
394#endif
395
396 *address = addr;
397 return 0;
398}
399
400/* Default value as for sh */
401#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
402static int user_mode_unaligned_fixup_count = 10;
403static int user_mode_unaligned_fixup_enable = 1;
404#endif
405
406static int kernel_mode_unaligned_fixup_count = 32;
407
408static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
409{
410 unsigned short x;
411 unsigned char *p, *q;
412 p = (unsigned char *) (int) address;
413 q = (unsigned char *) &x;
414 q[0] = p[0];
415 q[1] = p[1];
416
417 if (do_sign_extend) {
418 *result = (__u64)(__s64) *(short *) &x;
419 } else {
420 *result = (__u64) x;
421 }
422}
423
424static void misaligned_kernel_word_store(__u64 address, __u64 value)
425{
426 unsigned short x;
427 unsigned char *p, *q;
428 p = (unsigned char *) (int) address;
429 q = (unsigned char *) &x;
430
431 x = (__u16) value;
432 p[0] = q[0];
433 p[1] = q[1];
434}
435
436static int misaligned_load(struct pt_regs *regs,
437 __u32 opcode,
438 int displacement_not_indexed,
439 int width_shift,
440 int do_sign_extend)
441{
442 /* Return -1 for a fault, 0 for OK */
443 int error;
444 int destreg;
445 __u64 address;
446
447 error = generate_and_check_address(regs, opcode,
448 displacement_not_indexed, width_shift, &address);
449 if (error < 0) {
450 return error;
451 }
452
453 destreg = (opcode >> 4) & 0x3f;
454#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
455 if (user_mode(regs)) {
456 __u64 buffer;
457
458 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
459 return -1;
460 }
461
462 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
463 return -1; /* fault */
464 }
465 switch (width_shift) {
466 case 1:
467 if (do_sign_extend) {
468 regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
469 } else {
470 regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
471 }
472 break;
473 case 2:
474 regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
475 break;
476 case 3:
477 regs->regs[destreg] = buffer;
478 break;
479 default:
480 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
481 width_shift, (unsigned long) regs->pc);
482 break;
483 }
484 } else
485#endif
486 {
487 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
488 __u64 lo, hi;
489
490 switch (width_shift) {
491 case 1:
492 misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
493 break;
494 case 2:
495 asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
496 asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
497 regs->regs[destreg] = lo | hi;
498 break;
499 case 3:
500 asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
501 asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
502 regs->regs[destreg] = lo | hi;
503 break;
504
505 default:
506 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
507 width_shift, (unsigned long) regs->pc);
508 break;
509 }
510 }
511
512 return 0;
513
514}
515
516static int misaligned_store(struct pt_regs *regs,
517 __u32 opcode,
518 int displacement_not_indexed,
519 int width_shift)
520{
521 /* Return -1 for a fault, 0 for OK */
522 int error;
523 int srcreg;
524 __u64 address;
525
526 error = generate_and_check_address(regs, opcode,
527 displacement_not_indexed, width_shift, &address);
528 if (error < 0) {
529 return error;
530 }
531
532 srcreg = (opcode >> 4) & 0x3f;
533#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
534 if (user_mode(regs)) {
535 __u64 buffer;
536
537 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
538 return -1;
539 }
540
541 switch (width_shift) {
542 case 1:
543 *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
544 break;
545 case 2:
546 *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
547 break;
548 case 3:
549 buffer = regs->regs[srcreg];
550 break;
551 default:
552 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
553 width_shift, (unsigned long) regs->pc);
554 break;
555 }
556
557 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
558 return -1; /* fault */
559 }
560 } else
561#endif
562 {
563 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
564 __u64 val = regs->regs[srcreg];
565
566 switch (width_shift) {
567 case 1:
568 misaligned_kernel_word_store(address, val);
569 break;
570 case 2:
571 asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
572 asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
573 break;
574 case 3:
575 asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
576 asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
577 break;
578
579 default:
580 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
581 width_shift, (unsigned long) regs->pc);
582 break;
583 }
584 }
585
586 return 0;
587
588}
589
590#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
591/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
592 error. */
593static int misaligned_fpu_load(struct pt_regs *regs,
594 __u32 opcode,
595 int displacement_not_indexed,
596 int width_shift,
597 int do_paired_load)
598{
599 /* Return -1 for a fault, 0 for OK */
600 int error;
601 int destreg;
602 __u64 address;
603
604 error = generate_and_check_address(regs, opcode,
605 displacement_not_indexed, width_shift, &address);
606 if (error < 0) {
607 return error;
608 }
609
610 destreg = (opcode >> 4) & 0x3f;
611 if (user_mode(regs)) {
612 __u64 buffer;
613 __u32 buflo, bufhi;
614
615 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
616 return -1;
617 }
618
619 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
620 return -1; /* fault */
621 }
622 /* 'current' may be the current owner of the FPU state, so
623 context switch the registers into memory so they can be
624 indexed by register number. */
625 if (last_task_used_math == current) {
626 grab_fpu();
627 fpsave(&current->thread.fpu.hard);
628 release_fpu();
629 last_task_used_math = NULL;
630 regs->sr |= SR_FD;
631 }
632
633 buflo = *(__u32*) &buffer;
634 bufhi = *(1 + (__u32*) &buffer);
635
636 switch (width_shift) {
637 case 2:
638 current->thread.fpu.hard.fp_regs[destreg] = buflo;
639 break;
640 case 3:
641 if (do_paired_load) {
642 current->thread.fpu.hard.fp_regs[destreg] = buflo;
643 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
644 } else {
645#if defined(CONFIG_LITTLE_ENDIAN)
646 current->thread.fpu.hard.fp_regs[destreg] = bufhi;
647 current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
648#else
649 current->thread.fpu.hard.fp_regs[destreg] = buflo;
650 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
651#endif
652 }
653 break;
654 default:
655 printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
656 width_shift, (unsigned long) regs->pc);
657 break;
658 }
659 return 0;
660 } else {
661 die ("Misaligned FPU load inside kernel", regs, 0);
662 return -1;
663 }
664
665
666}
667
668static int misaligned_fpu_store(struct pt_regs *regs,
669 __u32 opcode,
670 int displacement_not_indexed,
671 int width_shift,
672 int do_paired_load)
673{
674 /* Return -1 for a fault, 0 for OK */
675 int error;
676 int srcreg;
677 __u64 address;
678
679 error = generate_and_check_address(regs, opcode,
680 displacement_not_indexed, width_shift, &address);
681 if (error < 0) {
682 return error;
683 }
684
685 srcreg = (opcode >> 4) & 0x3f;
686 if (user_mode(regs)) {
687 __u64 buffer;
688 /* Initialise these to NaNs. */
689 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
690
691 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
692 return -1;
693 }
694
695 /* 'current' may be the current owner of the FPU state, so
696 context switch the registers into memory so they can be
697 indexed by register number. */
698 if (last_task_used_math == current) {
699 grab_fpu();
700 fpsave(&current->thread.fpu.hard);
701 release_fpu();
702 last_task_used_math = NULL;
703 regs->sr |= SR_FD;
704 }
705
706 switch (width_shift) {
707 case 2:
708 buflo = current->thread.fpu.hard.fp_regs[srcreg];
709 break;
710 case 3:
711 if (do_paired_load) {
712 buflo = current->thread.fpu.hard.fp_regs[srcreg];
713 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
714 } else {
715#if defined(CONFIG_LITTLE_ENDIAN)
716 bufhi = current->thread.fpu.hard.fp_regs[srcreg];
717 buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
718#else
719 buflo = current->thread.fpu.hard.fp_regs[srcreg];
720 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
721#endif
722 }
723 break;
724 default:
725 printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
726 width_shift, (unsigned long) regs->pc);
727 break;
728 }
729
730 *(__u32*) &buffer = buflo;
731 *(1 + (__u32*) &buffer) = bufhi;
732 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
733 return -1; /* fault */
734 }
735 return 0;
736 } else {
737 die ("Misaligned FPU load inside kernel", regs, 0);
738 return -1;
739 }
740}
741#endif
742
743static int misaligned_fixup(struct pt_regs *regs)
744{
745 unsigned long opcode;
746 int error;
747 int major, minor;
748
749#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
750 /* Never fixup user mode misaligned accesses without this option enabled. */
751 return -1;
752#else
753 if (!user_mode_unaligned_fixup_enable) return -1;
754#endif
755
756 error = read_opcode(regs->pc, &opcode, user_mode(regs));
757 if (error < 0) {
758 return error;
759 }
760 major = (opcode >> 26) & 0x3f;
761 minor = (opcode >> 16) & 0xf;
762
763#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
764 if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
765 --user_mode_unaligned_fixup_count;
766 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
767 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
768 current->comm, current->pid, (__u32)regs->pc, opcode);
769 } else
770#endif
771 if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
772 --kernel_mode_unaligned_fixup_count;
773 if (in_interrupt()) {
774 printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
775 (__u32)regs->pc, opcode);
776 } else {
777 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
778 current->comm, current->pid, (__u32)regs->pc, opcode);
779 }
780 }
781
782
783 switch (major) {
784 case (0x84>>2): /* LD.W */
785 error = misaligned_load(regs, opcode, 1, 1, 1);
786 break;
787 case (0xb0>>2): /* LD.UW */
788 error = misaligned_load(regs, opcode, 1, 1, 0);
789 break;
790 case (0x88>>2): /* LD.L */
791 error = misaligned_load(regs, opcode, 1, 2, 1);
792 break;
793 case (0x8c>>2): /* LD.Q */
794 error = misaligned_load(regs, opcode, 1, 3, 0);
795 break;
796
797 case (0xa4>>2): /* ST.W */
798 error = misaligned_store(regs, opcode, 1, 1);
799 break;
800 case (0xa8>>2): /* ST.L */
801 error = misaligned_store(regs, opcode, 1, 2);
802 break;
803 case (0xac>>2): /* ST.Q */
804 error = misaligned_store(regs, opcode, 1, 3);
805 break;
806
807 case (0x40>>2): /* indexed loads */
808 switch (minor) {
809 case 0x1: /* LDX.W */
810 error = misaligned_load(regs, opcode, 0, 1, 1);
811 break;
812 case 0x5: /* LDX.UW */
813 error = misaligned_load(regs, opcode, 0, 1, 0);
814 break;
815 case 0x2: /* LDX.L */
816 error = misaligned_load(regs, opcode, 0, 2, 1);
817 break;
818 case 0x3: /* LDX.Q */
819 error = misaligned_load(regs, opcode, 0, 3, 0);
820 break;
821 default:
822 error = -1;
823 break;
824 }
825 break;
826
827 case (0x60>>2): /* indexed stores */
828 switch (minor) {
829 case 0x1: /* STX.W */
830 error = misaligned_store(regs, opcode, 0, 1);
831 break;
832 case 0x2: /* STX.L */
833 error = misaligned_store(regs, opcode, 0, 2);
834 break;
835 case 0x3: /* STX.Q */
836 error = misaligned_store(regs, opcode, 0, 3);
837 break;
838 default:
839 error = -1;
840 break;
841 }
842 break;
843
844#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
845 case (0x94>>2): /* FLD.S */
846 error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
847 break;
848 case (0x98>>2): /* FLD.P */
849 error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
850 break;
851 case (0x9c>>2): /* FLD.D */
852 error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
853 break;
854 case (0x1c>>2): /* floating indexed loads */
855 switch (minor) {
856 case 0x8: /* FLDX.S */
857 error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
858 break;
859 case 0xd: /* FLDX.P */
860 error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
861 break;
862 case 0x9: /* FLDX.D */
863 error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
864 break;
865 default:
866 error = -1;
867 break;
868 }
869 break;
870 case (0xb4>>2): /* FLD.S */
871 error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
872 break;
873 case (0xb8>>2): /* FLD.P */
874 error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
875 break;
876 case (0xbc>>2): /* FLD.D */
877 error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
878 break;
879 case (0x3c>>2): /* floating indexed stores */
880 switch (minor) {
881 case 0x8: /* FSTX.S */
882 error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
883 break;
884 case 0xd: /* FSTX.P */
885 error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
886 break;
887 case 0x9: /* FSTX.D */
888 error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
889 break;
890 default:
891 error = -1;
892 break;
893 }
894 break;
895#endif
896
897 default:
898 /* Fault */
899 error = -1;
900 break;
901 }
902
903 if (error < 0) {
904 return error;
905 } else {
906 regs->pc += 4; /* Skip the instruction that's just been emulated */
907 return 0;
908 }
909
910}
911
912static ctl_table unaligned_table[] = {
913 {1, "kernel_reports", &kernel_mode_unaligned_fixup_count,
914 sizeof(int), 0644, NULL, &proc_dointvec},
915#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
916 {2, "user_reports", &user_mode_unaligned_fixup_count,
917 sizeof(int), 0644, NULL, &proc_dointvec},
918 {3, "user_enable", &user_mode_unaligned_fixup_enable,
919 sizeof(int), 0644, NULL, &proc_dointvec},
920#endif
921 {0}
922};
923
924static ctl_table unaligned_root[] = {
925 {1, "unaligned_fixup", NULL, 0, 0555, unaligned_table},
926 {0}
927};
928
929static ctl_table sh64_root[] = {
930 {1, "sh64", NULL, 0, 0555, unaligned_root},
931 {0}
932};
933static struct ctl_table_header *sysctl_header;
934static int __init init_sysctl(void)
935{
936 sysctl_header = register_sysctl_table(sh64_root, 0);
937 return 0;
938}
939
940__initcall(init_sysctl);
941
942
943asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
944{
945 u64 peek_real_address_q(u64 addr);
946 u64 poke_real_address_q(u64 addr, u64 val);
947 unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
948 unsigned long long exp_cause;
949 /* It's not worth ioremapping the debug module registers for the amount
950 of access we make to them - just go direct to their physical
951 addresses. */
952 exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
953 if (exp_cause & ~4) {
954 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
955 (unsigned long)(exp_cause & 0xffffffff));
956 }
957 show_state();
958 /* Clear all DEBUGINT causes */
959 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
960}
961
diff --git a/arch/sh64/kernel/unwind.c b/arch/sh64/kernel/unwind.c
new file mode 100644
index 000000000000..f934f97f9f9c
--- /dev/null
+++ b/arch/sh64/kernel/unwind.c
@@ -0,0 +1,326 @@
1/*
2 * arch/sh64/kernel/unwind.c
3 *
4 * Copyright (C) 2004 Paul Mundt
5 * Copyright (C) 2004 Richard Curnow
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/kallsyms.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17#include <asm/processor.h>
18#include <asm/io.h>
19
20static u8 regcache[63];
21
22/*
23 * Finding the previous stack frame isn't horribly straightforward as it is
24 * on some other platforms. In the sh64 case, we don't have "linked" stack
25 * frames, so we need to do a bit of work to determine the previous frame,
26 * and in turn, the previous r14/r18 pair.
27 *
28 * There are generally a few cases which determine where we can find out
29 * the r14/r18 values. In the general case, this can be determined by poking
30 * around the prologue of the symbol PC is in (note that we absolutely must
31 * have frame pointer support as well as the kernel symbol table mapped,
32 * otherwise we can't even get this far).
33 *
34 * In other cases, such as the interrupt/exception path, we can poke around
35 * the sp/fp.
36 *
37 * Notably, this entire approach is somewhat error prone, and in the event
38 * that the previous frame cannot be determined, that's all we can do.
39 * Either way, this still leaves us with a more correct backtrace then what
40 * we would be able to come up with by walking the stack (which is garbage
41 * for anything beyond the first frame).
42 * -- PFM.
43 */
44static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
45 unsigned long *pprev_fp, unsigned long *pprev_pc,
46 struct pt_regs *regs)
47{
48 const char *sym;
49 char *modname, namebuf[128];
50 unsigned long offset, size;
51 unsigned long prologue = 0;
52 unsigned long fp_displacement = 0;
53 unsigned long fp_prev = 0;
54 unsigned long offset_r14 = 0, offset_r18 = 0;
55 int i, found_prologue_end = 0;
56
57 sym = kallsyms_lookup(pc, &size, &offset, &modname, namebuf);
58 if (!sym)
59 return -EINVAL;
60
61 prologue = pc - offset;
62 if (!prologue)
63 return -EINVAL;
64
65 /* Validate fp, to avoid risk of dereferencing a bad pointer later.
66 Assume 128Mb since that's the amount of RAM on a Cayman. Modify
67 when there is an SH-5 board with more. */
68 if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
69 (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
70 ((fp & 7) != 0)) {
71 return -EINVAL;
72 }
73
74 /*
75 * Depth to walk, depth is completely arbitrary.
76 */
77 for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
78 unsigned long op;
79 u8 major, minor;
80 u8 src, dest, disp;
81
82 op = *(unsigned long *)prologue;
83
84 major = (op >> 26) & 0x3f;
85 src = (op >> 20) & 0x3f;
86 minor = (op >> 16) & 0xf;
87 disp = (op >> 10) & 0x3f;
88 dest = (op >> 4) & 0x3f;
89
90 /*
91 * Stack frame creation happens in a number of ways.. in the
92 * general case when the stack frame is less than 511 bytes,
93 * it's generally created by an addi or addi.l:
94 *
95 * addi/addi.l r15, -FRAME_SIZE, r15
96 *
97 * in the event that the frame size is bigger than this, it's
98 * typically created using a movi/sub pair as follows:
99 *
100 * movi FRAME_SIZE, rX
101 * sub r15, rX, r15
102 */
103
104 switch (major) {
105 case (0x00 >> 2):
106 switch (minor) {
107 case 0x8: /* add.l */
108 case 0x9: /* add */
109 /* Look for r15, r63, r14 */
110 if (src == 15 && disp == 63 && dest == 14)
111 found_prologue_end = 1;
112
113 break;
114 case 0xa: /* sub.l */
115 case 0xb: /* sub */
116 if (src != 15 || dest != 15)
117 continue;
118
119 fp_displacement -= regcache[disp];
120 fp_prev = fp - fp_displacement;
121 break;
122 }
123 break;
124 case (0xa8 >> 2): /* st.l */
125 if (src != 15)
126 continue;
127
128 switch (dest) {
129 case 14:
130 if (offset_r14 || fp_displacement == 0)
131 continue;
132
133 offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
134 offset_r14 *= sizeof(unsigned long);
135 offset_r14 += fp_displacement;
136 break;
137 case 18:
138 if (offset_r18 || fp_displacement == 0)
139 continue;
140
141 offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
142 offset_r18 *= sizeof(unsigned long);
143 offset_r18 += fp_displacement;
144 break;
145 }
146
147 break;
148 case (0xcc >> 2): /* movi */
149 if (dest >= 63) {
150 printk(KERN_NOTICE "%s: Invalid dest reg %d "
151 "specified in movi handler. Failed "
152 "opcode was 0x%lx: ", __FUNCTION__,
153 dest, op);
154
155 continue;
156 }
157
158 /* Sign extend */
159 regcache[dest] =
160 ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
161 break;
162 case (0xd0 >> 2): /* addi */
163 case (0xd4 >> 2): /* addi.l */
164 /* Look for r15, -FRAME_SIZE, r15 */
165 if (src != 15 || dest != 15)
166 continue;
167
168 /* Sign extended frame size.. */
169 fp_displacement +=
170 (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
171 fp_prev = fp - fp_displacement;
172 break;
173 }
174
175 if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
176 break;
177 }
178
179 if (offset_r14 == 0 || fp_prev == 0) {
180 if (!offset_r14)
181 pr_debug("Unable to find r14 offset\n");
182 if (!fp_prev)
183 pr_debug("Unable to find previous fp\n");
184
185 return -EINVAL;
186 }
187
188 /* For innermost leaf function, there might not be a offset_r18 */
189 if (!*pprev_pc && (offset_r18 == 0))
190 return -EINVAL;
191
192 *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
193
194 if (offset_r18)
195 *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
196
197 *pprev_pc &= ~1;
198
199 return 0;
200}
201
202/* Don't put this on the stack since we'll want to call sh64_unwind
203 * when we're close to underflowing the stack anyway. */
204static struct pt_regs here_regs;
205
206extern const char syscall_ret;
207extern const char ret_from_syscall;
208extern const char ret_from_exception;
209extern const char ret_from_irq;
210
211static void sh64_unwind_inner(struct pt_regs *regs);
212
213static void unwind_nested (unsigned long pc, unsigned long fp)
214{
215 if ((fp >= __MEMORY_START) &&
216 ((fp & 7) == 0)) {
217 sh64_unwind_inner((struct pt_regs *) fp);
218 }
219}
220
221static void sh64_unwind_inner(struct pt_regs *regs)
222{
223 unsigned long pc, fp;
224 int ofs = 0;
225 int first_pass;
226
227 pc = regs->pc & ~1;
228 fp = regs->regs[14];
229
230 first_pass = 1;
231 for (;;) {
232 int cond;
233 unsigned long next_fp, next_pc;
234
235 if (pc == ((unsigned long) &syscall_ret & ~1)) {
236 printk("SYSCALL\n");
237 unwind_nested(pc,fp);
238 return;
239 }
240
241 if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
242 printk("SYSCALL (PREEMPTED)\n");
243 unwind_nested(pc,fp);
244 return;
245 }
246
247 /* In this case, the PC is discovered by lookup_prev_stack_frame but
248 it has 4 taken off it to look like the 'caller' */
249 if (pc == ((unsigned long) &ret_from_exception & ~1)) {
250 printk("EXCEPTION\n");
251 unwind_nested(pc,fp);
252 return;
253 }
254
255 if (pc == ((unsigned long) &ret_from_irq & ~1)) {
256 printk("IRQ\n");
257 unwind_nested(pc,fp);
258 return;
259 }
260
261 cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
262 ((pc & 3) == 0) && ((fp & 7) == 0));
263
264 pc -= ofs;
265
266 printk("[<%08lx>] ", pc);
267 print_symbol("%s\n", pc);
268
269 if (first_pass) {
270 /* If the innermost frame is a leaf function, it's
271 * possible that r18 is never saved out to the stack.
272 */
273 next_pc = regs->regs[18];
274 } else {
275 next_pc = 0;
276 }
277
278 if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
279 ofs = sizeof(unsigned long);
280 pc = next_pc & ~1;
281 fp = next_fp;
282 } else {
283 printk("Unable to lookup previous stack frame\n");
284 break;
285 }
286 first_pass = 0;
287 }
288
289 printk("\n");
290
291}
292
293void sh64_unwind(struct pt_regs *regs)
294{
295 if (!regs) {
296 /*
297 * Fetch current regs if we have no other saved state to back
298 * trace from.
299 */
300 regs = &here_regs;
301
302 __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
303 __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
304 __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
305
306 __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
307 __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
308 __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
309 __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
310 __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
311 __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
312 __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
313 __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
314
315 __asm__ __volatile__ (
316 "pta 0f, tr0\n\t"
317 "blink tr0, %0\n\t"
318 "0: nop"
319 : "=r" (regs->pc)
320 );
321 }
322
323 printk("\nCall Trace:\n");
324 sh64_unwind_inner(regs);
325}
326
diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..7d9f7a6339a0
--- /dev/null
+++ b/arch/sh64/kernel/vmlinux.lds.S
@@ -0,0 +1,181 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh5/vmlinux.lds.S
7 *
8 * ld script to make ST50 Linux kernel
9 *
10 * Copyright (C) 2000, 2001 Paolo Alberelli
11 *
12 * benedict.gaster@superh.com: 2nd May 2002
13 * Add definition of empty_zero_page to be the first page of kernel image.
14 *
15 * benedict.gaster@superh.com: 3rd May 2002
16 * Added support for ramdisk, removing statically linked romfs at the same time.
17 *
18 * lethal@linux-sh.org: 9th May 2003
19 * Kill off GLOBAL_NAME() usage and other CDC-isms.
20 *
21 * lethal@linux-sh.org: 19th May 2003
22 * Remove support for ancient toolchains.
23 */
24
25#include <linux/config.h>
26#include <asm/page.h>
27#include <asm/cache.h>
28#include <asm/processor.h>
29#include <asm/thread_info.h>
30
31#define LOAD_OFFSET CONFIG_CACHED_MEMORY_OFFSET
32#include <asm-generic/vmlinux.lds.h>
33
34#ifdef NOTDEF
35#ifdef CONFIG_LITTLE_ENDIAN
36OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux")
37#else
38OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64")
39#endif
40#endif
41
42OUTPUT_ARCH(sh:sh5)
43
44#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
45
46ENTRY(__start)
47SECTIONS
48{
49 . = CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
50 _text = .; /* Text and read-only data */
51 text = .; /* Text and read-only data */
52
53 .empty_zero_page : C_PHYS(.empty_zero_page) {
54 *(.empty_zero_page)
55 } = 0
56
57 .text : C_PHYS(.text) {
58 *(.text)
59 *(.text64)
60 *(.text..SHmedia32)
61 SCHED_TEXT
62 LOCK_TEXT
63 *(.fixup)
64 *(.gnu.warning)
65#ifdef CONFIG_LITTLE_ENDIAN
66 } = 0x6ff0fff0
67#else
68 } = 0xf0fff06f
69#endif
70
71 /* We likely want __ex_table to be Cache Line aligned */
72 . = ALIGN(L1_CACHE_BYTES); /* Exception table */
73 __start___ex_table = .;
74 __ex_table : C_PHYS(__ex_table) { *(__ex_table) }
75 __stop___ex_table = .;
76
77 RODATA
78
79 _etext = .; /* End of text section */
80
81 .data : C_PHYS(.data) { /* Data */
82 *(.data)
83 CONSTRUCTORS
84 }
85
86 . = ALIGN(PAGE_SIZE);
87 .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) }
88
89 . = ALIGN(L1_CACHE_BYTES);
90 __per_cpu_start = .;
91 .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) }
92 __per_cpu_end = . ;
93 .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
94
95 _edata = .; /* End of data section */
96
97 . = ALIGN(THREAD_SIZE); /* init_task: structure size aligned */
98 .data.init_task : C_PHYS(.data.init_task) { *(.data.init_task) }
99
100 . = ALIGN(PAGE_SIZE); /* Init code and data */
101 __init_begin = .;
102 _sinittext = .;
103 .init.text : C_PHYS(.init.text) { *(.init.text) }
104 _einittext = .;
105 .init.data : C_PHYS(.init.data) { *(.init.data) }
106 . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */
107 __setup_start = .;
108 .init.setup : C_PHYS(.init.setup) { *(.init.setup) }
109 __setup_end = .;
110 __initcall_start = .;
111 .initcall.init : C_PHYS(.initcall.init) {
112 *(.initcall1.init)
113 *(.initcall2.init)
114 *(.initcall3.init)
115 *(.initcall4.init)
116 *(.initcall5.init)
117 *(.initcall6.init)
118 *(.initcall7.init)
119 }
120 __initcall_end = .;
121 __con_initcall_start = .;
122 .con_initcall.init : C_PHYS(.con_initcall.init) { *(.con_initcall.init) }
123 __con_initcall_end = .;
124 SECURITY_INIT
125 __initramfs_start = .;
126 .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
127 __initramfs_end = .;
128 . = ALIGN(PAGE_SIZE);
129 __init_end = .;
130
131 /* Align to the biggest single data representation, head and tail */
132 . = ALIGN(8);
133 __bss_start = .; /* BSS */
134 .bss : C_PHYS(.bss) {
135 *(.bss)
136 }
137 . = ALIGN(8);
138 _end = . ;
139
140 /* Sections to be discarded */
141 /DISCARD/ : {
142 *(.exit.text)
143 *(.exit.data)
144 *(.exitcall.exit)
145 }
146
147 /* Stabs debugging sections. */
148 .stab 0 : C_PHYS(.stab) { *(.stab) }
149 .stabstr 0 : C_PHYS(.stabstr) { *(.stabstr) }
150 .stab.excl 0 : C_PHYS(.stab.excl) { *(.stab.excl) }
151 .stab.exclstr 0 : C_PHYS(.stab.exclstr) { *(.stab.exclstr) }
152 .stab.index 0 : C_PHYS(.stab.index) { *(.stab.index) }
153 .stab.indexstr 0 : C_PHYS(.stab.indexstr) { *(.stab.indexstr) }
154 .comment 0 : C_PHYS(.comment) { *(.comment) }
155 /* DWARF debug sections.
156 Symbols in the DWARF debugging section are relative to the beginning
157 of the section so we begin .debug at 0. */
158 /* DWARF 1 */
159 .debug 0 : C_PHYS(.debug) { *(.debug) }
160 .line 0 : C_PHYS(.line) { *(.line) }
161 /* GNU DWARF 1 extensions */
162 .debug_srcinfo 0 : C_PHYS(.debug_srcinfo) { *(.debug_srcinfo) }
163 .debug_sfnames 0 : C_PHYS(.debug_sfnames) { *(.debug_sfnames) }
164 /* DWARF 1.1 and DWARF 2 */
165 .debug_aranges 0 : C_PHYS(.debug_aranges) { *(.debug_aranges) }
166 .debug_pubnames 0 : C_PHYS(.debug_pubnames) { *(.debug_pubnames) }
167 /* DWARF 2 */
168 .debug_info 0 : C_PHYS(.debug_info) { *(.debug_info) }
169 .debug_abbrev 0 : C_PHYS(.debug_abbrev) { *(.debug_abbrev) }
170 .debug_line 0 : C_PHYS(.debug_line) { *(.debug_line) }
171 .debug_frame 0 : C_PHYS(.debug_frame) { *(.debug_frame) }
172 .debug_str 0 : C_PHYS(.debug_str) { *(.debug_str) }
173 .debug_loc 0 : C_PHYS(.debug_loc) { *(.debug_loc) }
174 .debug_macinfo 0 : C_PHYS(.debug_macinfo) { *(.debug_macinfo) }
175 /* SGI/MIPS DWARF 2 extensions */
176 .debug_weaknames 0 : C_PHYS(.debug_weaknames) { *(.debug_weaknames) }
177 .debug_funcnames 0 : C_PHYS(.debug_funcnames) { *(.debug_funcnames) }
178 .debug_typenames 0 : C_PHYS(.debug_typenames) { *(.debug_typenames) }
179 .debug_varnames 0 : C_PHYS(.debug_varnames) { *(.debug_varnames) }
180 /* These must appear regardless of . */
181}