aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh64/kernel')
-rw-r--r--arch/sh64/kernel/Makefile36
-rw-r--r--arch/sh64/kernel/alphanum.c43
-rw-r--r--arch/sh64/kernel/asm-offsets.c33
-rw-r--r--arch/sh64/kernel/dma.c297
-rw-r--r--arch/sh64/kernel/early_printk.c99
-rw-r--r--arch/sh64/kernel/entry.S2102
-rw-r--r--arch/sh64/kernel/fpu.c170
-rw-r--r--arch/sh64/kernel/head.S372
-rw-r--r--arch/sh64/kernel/init_task.c46
-rw-r--r--arch/sh64/kernel/irq.c115
-rw-r--r--arch/sh64/kernel/irq_intc.c272
-rw-r--r--arch/sh64/kernel/led.c40
-rw-r--r--arch/sh64/kernel/module.c161
-rw-r--r--arch/sh64/kernel/pci_sh5.c536
-rw-r--r--arch/sh64/kernel/pci_sh5.h107
-rw-r--r--arch/sh64/kernel/pcibios.c168
-rw-r--r--arch/sh64/kernel/process.c691
-rw-r--r--arch/sh64/kernel/ptrace.c332
-rw-r--r--arch/sh64/kernel/semaphore.c140
-rw-r--r--arch/sh64/kernel/setup.c379
-rw-r--r--arch/sh64/kernel/sh_ksyms.c62
-rw-r--r--arch/sh64/kernel/signal.c750
-rw-r--r--arch/sh64/kernel/switchto.S198
-rw-r--r--arch/sh64/kernel/sys_sh64.c304
-rw-r--r--arch/sh64/kernel/syscalls.S381
-rw-r--r--arch/sh64/kernel/time.c593
-rw-r--r--arch/sh64/kernel/traps.c982
-rw-r--r--arch/sh64/kernel/unwind.c326
-rw-r--r--arch/sh64/kernel/vmlinux.lds.S140
29 files changed, 0 insertions, 9875 deletions
diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile
deleted file mode 100644
index e3467bda6167..000000000000
--- a/arch/sh64/kernel/Makefile
+++ /dev/null
@@ -1,36 +0,0 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2000, 2001 Paolo Alberelli
7# Copyright (C) 2003 Paul Mundt
8#
9# Makefile for the Linux sh64 kernel.
10#
11# Note! Dependencies are done automagically by 'make dep', which also
12# removes any old dependencies. DON'T put your own dependencies here
13# unless it's something special (ie not a .c file).
14#
15
16extra-y := head.o init_task.o vmlinux.lds
17
18obj-y := process.o signal.o entry.o traps.o irq.o irq_intc.o \
19 ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \
20 switchto.o syscalls.o
21
22obj-$(CONFIG_HEARTBEAT) += led.o
23obj-$(CONFIG_SH_ALPHANUMERIC) += alphanum.o
24obj-$(CONFIG_SH_DMA) += dma.o
25obj-$(CONFIG_SH_FPU) += fpu.o
26obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
27obj-$(CONFIG_KALLSYMS) += unwind.o
28obj-$(CONFIG_PCI) += pcibios.o
29obj-$(CONFIG_MODULES) += module.o
30
31ifeq ($(CONFIG_PCI),y)
32obj-$(CONFIG_CPU_SH5) += pci_sh5.o
33endif
34
35USE_STANDARD_AS_RULE := true
36
diff --git a/arch/sh64/kernel/alphanum.c b/arch/sh64/kernel/alphanum.c
deleted file mode 100644
index d1619d95fbaa..000000000000
--- a/arch/sh64/kernel/alphanum.c
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * arch/sh64/kernel/alphanum.c
3 *
4 * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Machine-independent functions for handling 8-digit alphanumeric display
10 * (e.g. Agilent HDSP-253x)
11 */
12#include <linux/stddef.h>
13#include <linux/sched.h>
14
15void mach_alphanum(int pos, unsigned char val);
16
17void print_seg(char *file, int line)
18{
19 int i;
20 unsigned int nibble;
21
22 for (i = 0; i < 5; i++) {
23 mach_alphanum(i, file[i]);
24 }
25
26 for (i = 0; i < 3; i++) {
27 nibble = ((line >> (i * 4)) & 0xf);
28 mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
29 }
30}
31
32void print_seg_num(unsigned num)
33{
34 int i;
35 unsigned int nibble;
36
37 for (i = 0; i < 8; i++) {
38 nibble = ((num >> (i * 4)) & 0xf);
39
40 mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48));
41 }
42}
43
diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c
deleted file mode 100644
index ca76537c16c0..000000000000
--- a/arch/sh64/kernel/asm-offsets.c
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#include <linux/stddef.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <asm/thread_info.h>
15
16#define DEFINE(sym, val) \
17 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
18
19#define BLANK() asm volatile("\n->" : : )
20
21int main(void)
22{
23 /* offsets into the thread_info struct */
24 DEFINE(TI_TASK, offsetof(struct thread_info, task));
25 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
26 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
27 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
28 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
29 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
30 DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
31
32 return 0;
33}
diff --git a/arch/sh64/kernel/dma.c b/arch/sh64/kernel/dma.c
deleted file mode 100644
index 32c6f0549bf1..000000000000
--- a/arch/sh64/kernel/dma.c
+++ /dev/null
@@ -1,297 +0,0 @@
1/*
2 * arch/sh64/kernel/dma.c
3 *
4 * DMA routines for the SH-5 DMAC.
5 *
6 * Copyright (C) 2003 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/types.h>
16#include <linux/irq.h>
17#include <linux/spinlock.h>
18#include <linux/mm.h>
19#include <asm/hardware.h>
20#include <asm/dma.h>
21#include <asm/signal.h>
22#include <asm/errno.h>
23#include <asm/io.h>
24
25typedef struct {
26 unsigned long dev_addr;
27 unsigned long mem_addr;
28
29 unsigned int mode;
30 unsigned int count;
31} dma_info_t;
32
33static dma_info_t dma_info[MAX_DMA_CHANNELS];
34static DEFINE_SPINLOCK(dma_spin_lock);
35
36/* arch/sh64/kernel/irq_intc.c */
37extern void make_intc_irq(unsigned int irq);
38
39/* DMAC Interrupts */
40#define DMA_IRQ_DMTE0 18
41#define DMA_IRQ_DERR 22
42
43#define DMAC_COMMON_BASE (dmac_base + 0x08)
44#define DMAC_SAR_BASE (dmac_base + 0x10)
45#define DMAC_DAR_BASE (dmac_base + 0x18)
46#define DMAC_COUNT_BASE (dmac_base + 0x20)
47#define DMAC_CTRL_BASE (dmac_base + 0x28)
48#define DMAC_STATUS_BASE (dmac_base + 0x30)
49
50#define DMAC_SAR(n) (DMAC_SAR_BASE + ((n) * 0x28))
51#define DMAC_DAR(n) (DMAC_DAR_BASE + ((n) * 0x28))
52#define DMAC_COUNT(n) (DMAC_COUNT_BASE + ((n) * 0x28))
53#define DMAC_CTRL(n) (DMAC_CTRL_BASE + ((n) * 0x28))
54#define DMAC_STATUS(n) (DMAC_STATUS_BASE + ((n) * 0x28))
55
56/* DMAC.COMMON Bit Definitions */
57#define DMAC_COMMON_PR 0x00000001 /* Priority */
58 /* Bits 1-2 Reserved */
59#define DMAC_COMMON_ME 0x00000008 /* Master Enable */
60#define DMAC_COMMON_NMI 0x00000010 /* NMI Flag */
61 /* Bits 5-6 Reserved */
62#define DMAC_COMMON_ER 0x00000780 /* Error Response */
63#define DMAC_COMMON_AAE 0x00007800 /* Address Alignment Error */
64 /* Bits 15-63 Reserved */
65
66/* DMAC.SAR Bit Definitions */
67#define DMAC_SAR_ADDR 0xffffffff /* Source Address */
68
69/* DMAC.DAR Bit Definitions */
70#define DMAC_DAR_ADDR 0xffffffff /* Destination Address */
71
72/* DMAC.COUNT Bit Definitions */
73#define DMAC_COUNT_CNT 0xffffffff /* Transfer Count */
74
75/* DMAC.CTRL Bit Definitions */
76#define DMAC_CTRL_TS 0x00000007 /* Transfer Size */
77#define DMAC_CTRL_SI 0x00000018 /* Source Increment */
78#define DMAC_CTRL_DI 0x00000060 /* Destination Increment */
79#define DMAC_CTRL_RS 0x00000780 /* Resource Select */
80#define DMAC_CTRL_IE 0x00000800 /* Interrupt Enable */
81#define DMAC_CTRL_TE 0x00001000 /* Transfer Enable */
82 /* Bits 15-63 Reserved */
83
84/* DMAC.STATUS Bit Definitions */
85#define DMAC_STATUS_TE 0x00000001 /* Transfer End */
86#define DMAC_STATUS_AAE 0x00000002 /* Address Alignment Error */
87 /* Bits 2-63 Reserved */
88
89static unsigned long dmac_base;
90
91void set_dma_count(unsigned int chan, unsigned int count);
92void set_dma_addr(unsigned int chan, unsigned int addr);
93
94static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs)
95{
96 unsigned int chan = irq - DMA_IRQ_DMTE0;
97 dma_info_t *info = dma_info + chan;
98 u64 status;
99
100 if (info->mode & DMA_MODE_WRITE) {
101 sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan));
102 } else {
103 sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan));
104 }
105
106 set_dma_count(chan, info->count);
107
108 /* Clear the TE bit */
109 status = sh64_in64(DMAC_STATUS(chan));
110 status &= ~DMAC_STATUS_TE;
111 sh64_out64(status, DMAC_STATUS(chan));
112
113 return IRQ_HANDLED;
114}
115
116static struct irqaction irq_dmte = {
117 .handler = dma_mte,
118 .flags = IRQF_DISABLED,
119 .name = "DMA MTE",
120};
121
122static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
123{
124 u64 tmp;
125 u8 chan;
126
127 printk(KERN_NOTICE "DMAC: Got a DMA Error!\n");
128
129 tmp = sh64_in64(DMAC_COMMON_BASE);
130
131 /* Check for the type of error */
132 if ((chan = tmp & DMAC_COMMON_AAE)) {
133 /* It's an address alignment error.. */
134 printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan);
135
136 printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n",
137 (sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR),
138 (sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR),
139 (sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT));
140
141 } else if ((chan = tmp & DMAC_COMMON_ER)) {
142 /* Something else went wrong.. */
143 printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan);
144 }
145
146 /* Reset the ME bit to clear the interrupt */
147 tmp |= DMAC_COMMON_ME;
148 sh64_out64(tmp, DMAC_COMMON_BASE);
149
150 return IRQ_HANDLED;
151}
152
153static struct irqaction irq_derr = {
154 .handler = dma_err,
155 .flags = IRQF_DISABLED,
156 .name = "DMA Error",
157};
158
159static inline unsigned long calc_xmit_shift(unsigned int chan)
160{
161 return sh64_in64(DMAC_CTRL(chan)) & 0x03;
162}
163
164void setup_dma(unsigned int chan, dma_info_t *info)
165{
166 unsigned int irq = DMA_IRQ_DMTE0 + chan;
167 dma_info_t *dma = dma_info + chan;
168
169 make_intc_irq(irq);
170 setup_irq(irq, &irq_dmte);
171 dma = info;
172}
173
174void enable_dma(unsigned int chan)
175{
176 u64 ctrl;
177
178 ctrl = sh64_in64(DMAC_CTRL(chan));
179 ctrl |= DMAC_CTRL_TE;
180 sh64_out64(ctrl, DMAC_CTRL(chan));
181}
182
183void disable_dma(unsigned int chan)
184{
185 u64 ctrl;
186
187 ctrl = sh64_in64(DMAC_CTRL(chan));
188 ctrl &= ~DMAC_CTRL_TE;
189 sh64_out64(ctrl, DMAC_CTRL(chan));
190}
191
192void set_dma_mode(unsigned int chan, char mode)
193{
194 dma_info_t *info = dma_info + chan;
195
196 info->mode = mode;
197
198 set_dma_addr(chan, info->mem_addr);
199 set_dma_count(chan, info->count);
200}
201
202void set_dma_addr(unsigned int chan, unsigned int addr)
203{
204 dma_info_t *info = dma_info + chan;
205 unsigned long sar, dar;
206
207 info->mem_addr = addr;
208 sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr;
209 dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr;
210
211 sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan));
212 sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan));
213}
214
215void set_dma_count(unsigned int chan, unsigned int count)
216{
217 dma_info_t *info = dma_info + chan;
218 u64 tmp;
219
220 info->count = count;
221
222 tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT;
223
224 sh64_out64(tmp, DMAC_COUNT(chan));
225}
226
227unsigned long claim_dma_lock(void)
228{
229 unsigned long flags;
230
231 spin_lock_irqsave(&dma_spin_lock, flags);
232
233 return flags;
234}
235
236void release_dma_lock(unsigned long flags)
237{
238 spin_unlock_irqrestore(&dma_spin_lock, flags);
239}
240
241int get_dma_residue(unsigned int chan)
242{
243 return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan));
244}
245
246int __init init_dma(void)
247{
248 struct vcr_info vcr;
249 u64 tmp;
250
251 /* Remap the DMAC */
252 dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC");
253 if (!dmac_base) {
254 printk(KERN_ERR "Unable to remap DMAC\n");
255 return -ENOMEM;
256 }
257
258 /* Report DMAC.VCR Info */
259 vcr = sh64_get_vcr_info(dmac_base);
260 printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n",
261 vcr.mod_id, vcr.mod_vers);
262
263 /* Set the ME bit */
264 tmp = sh64_in64(DMAC_COMMON_BASE);
265 tmp |= DMAC_COMMON_ME;
266 sh64_out64(tmp, DMAC_COMMON_BASE);
267
268 /* Enable the DMAC Error Interrupt */
269 make_intc_irq(DMA_IRQ_DERR);
270 setup_irq(DMA_IRQ_DERR, &irq_derr);
271
272 return 0;
273}
274
275static void __exit exit_dma(void)
276{
277 onchip_unmap(dmac_base);
278 free_irq(DMA_IRQ_DERR, 0);
279}
280
281module_init(init_dma);
282module_exit(exit_dma);
283
284MODULE_AUTHOR("Paul Mundt");
285MODULE_DESCRIPTION("DMA API for SH-5 DMAC");
286MODULE_LICENSE("GPL");
287
288EXPORT_SYMBOL(setup_dma);
289EXPORT_SYMBOL(claim_dma_lock);
290EXPORT_SYMBOL(release_dma_lock);
291EXPORT_SYMBOL(enable_dma);
292EXPORT_SYMBOL(disable_dma);
293EXPORT_SYMBOL(set_dma_mode);
294EXPORT_SYMBOL(set_dma_addr);
295EXPORT_SYMBOL(set_dma_count);
296EXPORT_SYMBOL(get_dma_residue);
297
diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c
deleted file mode 100644
index 4f9131123672..000000000000
--- a/arch/sh64/kernel/early_printk.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * arch/sh64/kernel/early_printk.c
3 *
4 * SH-5 Early SCIF console (cloned and hacked from sh implementation)
5 *
6 * Copyright (C) 2003, 2004 Paul Mundt <lethal@linux-sh.org>
7 * Copyright (C) 2002 M. R. Brown <mrbrown@0xd6.org>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/console.h>
14#include <linux/tty.h>
15#include <linux/init.h>
16#include <asm/io.h>
17#include <asm/hardware.h>
18
19#define SCIF_BASE_ADDR 0x01030000
20#define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
21
22/*
23 * Fixed virtual address where SCIF is mapped (should already be done
24 * in arch/sh64/kernel/head.S!).
25 */
26#define SCIF_REG 0xfa030000
27
28enum {
29 SCIF_SCSMR2 = SCIF_REG + 0x00,
30 SCIF_SCBRR2 = SCIF_REG + 0x04,
31 SCIF_SCSCR2 = SCIF_REG + 0x08,
32 SCIF_SCFTDR2 = SCIF_REG + 0x0c,
33 SCIF_SCFSR2 = SCIF_REG + 0x10,
34 SCIF_SCFRDR2 = SCIF_REG + 0x14,
35 SCIF_SCFCR2 = SCIF_REG + 0x18,
36 SCIF_SCFDR2 = SCIF_REG + 0x1c,
37 SCIF_SCSPTR2 = SCIF_REG + 0x20,
38 SCIF_SCLSR2 = SCIF_REG + 0x24,
39};
40
41static void sh_console_putc(int c)
42{
43 while (!(ctrl_inw(SCIF_SCFSR2) & 0x20))
44 cpu_relax();
45
46 ctrl_outb(c, SCIF_SCFTDR2);
47 ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2);
48
49 if (c == '\n')
50 sh_console_putc('\r');
51}
52
53static void sh_console_flush(void)
54{
55 ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
56
57 while (!(ctrl_inw(SCIF_SCFSR2) & 0x40))
58 cpu_relax();
59
60 ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2);
61}
62
63static void sh_console_write(struct console *con, const char *s, unsigned count)
64{
65 while (count-- > 0)
66 sh_console_putc(*s++);
67
68 sh_console_flush();
69}
70
71static int __init sh_console_setup(struct console *con, char *options)
72{
73 con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8;
74
75 return 0;
76}
77
78static struct console sh_console = {
79 .name = "scifcon",
80 .write = sh_console_write,
81 .setup = sh_console_setup,
82 .flags = CON_PRINTBUFFER | CON_BOOT,
83 .index = -1,
84};
85
86void __init enable_early_printk(void)
87{
88 ctrl_outb(0x2a, SCIF_SCBRR2); /* 19200bps */
89
90 ctrl_outw(0x04, SCIF_SCFCR2); /* Reset TFRST */
91 ctrl_outw(0x10, SCIF_SCFCR2); /* TTRG0=1 */
92
93 ctrl_outw(0, SCIF_SCSPTR2);
94 ctrl_outw(0x60, SCIF_SCFSR2);
95 ctrl_outw(0, SCIF_SCLSR2);
96 ctrl_outw(0x30, SCIF_SCSCR2);
97
98 register_console(&sh_console);
99}
diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S
deleted file mode 100644
index 7013fcb6665c..000000000000
--- a/arch/sh64/kernel/entry.S
+++ /dev/null
@@ -1,2102 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/entry.S
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2004, 2005 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 */
13
14#include <linux/errno.h>
15#include <linux/sys.h>
16
17#include <asm/processor.h>
18#include <asm/registers.h>
19#include <asm/unistd.h>
20#include <asm/thread_info.h>
21#include <asm/asm-offsets.h>
22
23/*
24 * SR fields.
25 */
26#define SR_ASID_MASK 0x00ff0000
27#define SR_FD_MASK 0x00008000
28#define SR_SS 0x08000000
29#define SR_BL 0x10000000
30#define SR_MD 0x40000000
31
32/*
33 * Event code.
34 */
35#define EVENT_INTERRUPT 0
36#define EVENT_FAULT_TLB 1
37#define EVENT_FAULT_NOT_TLB 2
38#define EVENT_DEBUG 3
39
40/* EXPEVT values */
41#define RESET_CAUSE 0x20
42#define DEBUGSS_CAUSE 0x980
43
44/*
45 * Frame layout. Quad index.
46 */
47#define FRAME_T(x) FRAME_TBASE+(x*8)
48#define FRAME_R(x) FRAME_RBASE+(x*8)
49#define FRAME_S(x) FRAME_SBASE+(x*8)
50#define FSPC 0
51#define FSSR 1
52#define FSYSCALL_ID 2
53
54/* Arrange the save frame to be a multiple of 32 bytes long */
55#define FRAME_SBASE 0
56#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
57#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
58#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
59#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
60
61#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
62#define FP_FRAME_BASE 0
63
64#define SAVED_R2 0*8
65#define SAVED_R3 1*8
66#define SAVED_R4 2*8
67#define SAVED_R5 3*8
68#define SAVED_R18 4*8
69#define SAVED_R6 5*8
70#define SAVED_TR0 6*8
71
72/* These are the registers saved in the TLB path that aren't saved in the first
73 level of the normal one. */
74#define TLB_SAVED_R25 7*8
75#define TLB_SAVED_TR1 8*8
76#define TLB_SAVED_TR2 9*8
77#define TLB_SAVED_TR3 10*8
78#define TLB_SAVED_TR4 11*8
79/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
80 breakage otherwise. */
81#define TLB_SAVED_R0 12*8
82#define TLB_SAVED_R1 13*8
83
84#define CLI() \
85 getcon SR, r6; \
86 ori r6, 0xf0, r6; \
87 putcon r6, SR;
88
89#define STI() \
90 getcon SR, r6; \
91 andi r6, ~0xf0, r6; \
92 putcon r6, SR;
93
94#ifdef CONFIG_PREEMPT
95# define preempt_stop() CLI()
96#else
97# define preempt_stop()
98# define resume_kernel restore_all
99#endif
100
101 .section .data, "aw"
102
103#define FAST_TLBMISS_STACK_CACHELINES 4
104#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
105
106/* Register back-up area for all exceptions */
107 .balign 32
108 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
109 * register saves etc. */
110 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
111/* This is 32 byte aligned by construction */
112/* Register back-up area for all exceptions */
113reg_save_area:
114 .quad 0
115 .quad 0
116 .quad 0
117 .quad 0
118
119 .quad 0
120 .quad 0
121 .quad 0
122 .quad 0
123
124 .quad 0
125 .quad 0
126 .quad 0
127 .quad 0
128
129 .quad 0
130 .quad 0
131
132/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
133 * reentrancy. Note this area may be accessed via physical address.
134 * Align so this fits a whole single cache line, for ease of purging.
135 */
136 .balign 32,0,32
137resvec_save_area:
138 .quad 0
139 .quad 0
140 .quad 0
141 .quad 0
142 .quad 0
143 .balign 32,0,32
144
145/* Jump table of 3rd level handlers */
146trap_jtable:
147 .long do_exception_error /* 0x000 */
148 .long do_exception_error /* 0x020 */
149 .long tlb_miss_load /* 0x040 */
150 .long tlb_miss_store /* 0x060 */
151 ! ARTIFICIAL pseudo-EXPEVT setting
152 .long do_debug_interrupt /* 0x080 */
153 .long tlb_miss_load /* 0x0A0 */
154 .long tlb_miss_store /* 0x0C0 */
155 .long do_address_error_load /* 0x0E0 */
156 .long do_address_error_store /* 0x100 */
157#ifdef CONFIG_SH_FPU
158 .long do_fpu_error /* 0x120 */
159#else
160 .long do_exception_error /* 0x120 */
161#endif
162 .long do_exception_error /* 0x140 */
163 .long system_call /* 0x160 */
164 .long do_reserved_inst /* 0x180 */
165 .long do_illegal_slot_inst /* 0x1A0 */
166 .long do_NMI /* 0x1C0 */
167 .long do_exception_error /* 0x1E0 */
168 .rept 15
169 .long do_IRQ /* 0x200 - 0x3C0 */
170 .endr
171 .long do_exception_error /* 0x3E0 */
172 .rept 32
173 .long do_IRQ /* 0x400 - 0x7E0 */
174 .endr
175 .long fpu_error_or_IRQA /* 0x800 */
176 .long fpu_error_or_IRQB /* 0x820 */
177 .long do_IRQ /* 0x840 */
178 .long do_IRQ /* 0x860 */
179 .rept 6
180 .long do_exception_error /* 0x880 - 0x920 */
181 .endr
182 .long do_software_break_point /* 0x940 */
183 .long do_exception_error /* 0x960 */
184 .long do_single_step /* 0x980 */
185
186 .rept 3
187 .long do_exception_error /* 0x9A0 - 0x9E0 */
188 .endr
189 .long do_IRQ /* 0xA00 */
190 .long do_IRQ /* 0xA20 */
191 .long itlb_miss_or_IRQ /* 0xA40 */
192 .long do_IRQ /* 0xA60 */
193 .long do_IRQ /* 0xA80 */
194 .long itlb_miss_or_IRQ /* 0xAA0 */
195 .long do_exception_error /* 0xAC0 */
196 .long do_address_error_exec /* 0xAE0 */
197 .rept 8
198 .long do_exception_error /* 0xB00 - 0xBE0 */
199 .endr
200 .rept 18
201 .long do_IRQ /* 0xC00 - 0xE20 */
202 .endr
203
204 .section .text64, "ax"
205
206/*
207 * --- Exception/Interrupt/Event Handling Section
208 */
209
210/*
211 * VBR and RESVEC blocks.
212 *
213 * First level handler for VBR-based exceptions.
214 *
215 * To avoid waste of space, align to the maximum text block size.
216 * This is assumed to be at most 128 bytes or 32 instructions.
217 * DO NOT EXCEED 32 instructions on the first level handlers !
218 *
219 * Also note that RESVEC is contained within the VBR block
220 * where the room left (1KB - TEXT_SIZE) allows placing
221 * the RESVEC block (at most 512B + TEXT_SIZE).
222 *
223 * So first (and only) level handler for RESVEC-based exceptions.
224 *
225 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
226 * and interrupt) we are a lot tight with register space until
227 * saving onto the stack frame, which is done in handle_exception().
228 *
229 */
230
231#define TEXT_SIZE 128
232#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
233
234 .balign TEXT_SIZE
235LVBR_block:
236 .space 256, 0 /* Power-on class handler, */
237 /* not required here */
238not_a_tlb_miss:
239 synco /* TAKum03020 (but probably a good idea anyway.) */
240 /* Save original stack pointer into KCR1 */
241 putcon SP, KCR1
242
243 /* Save other original registers into reg_save_area */
244 movi reg_save_area, SP
245 st.q SP, SAVED_R2, r2
246 st.q SP, SAVED_R3, r3
247 st.q SP, SAVED_R4, r4
248 st.q SP, SAVED_R5, r5
249 st.q SP, SAVED_R6, r6
250 st.q SP, SAVED_R18, r18
251 gettr tr0, r3
252 st.q SP, SAVED_TR0, r3
253
254 /* Set args for Non-debug, Not a TLB miss class handler */
255 getcon EXPEVT, r2
256 movi ret_from_exception, r3
257 ori r3, 1, r3
258 movi EVENT_FAULT_NOT_TLB, r4
259 or SP, ZERO, r5
260 getcon KCR1, SP
261 pta handle_exception, tr0
262 blink tr0, ZERO
263
264 .balign 256
265 ! VBR+0x200
266 nop
267 .balign 256
268 ! VBR+0x300
269 nop
270 .balign 256
271 /*
272 * Instead of the natural .balign 1024 place RESVEC here
273 * respecting the final 1KB alignment.
274 */
275 .balign TEXT_SIZE
276 /*
277 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
278 * block making sure the final alignment is correct.
279 */
280tlb_miss:
281 synco /* TAKum03020 (but probably a good idea anyway.) */
282 putcon SP, KCR1
283 movi reg_save_area, SP
284 /* SP is guaranteed 32-byte aligned. */
285 st.q SP, TLB_SAVED_R0 , r0
286 st.q SP, TLB_SAVED_R1 , r1
287 st.q SP, SAVED_R2 , r2
288 st.q SP, SAVED_R3 , r3
289 st.q SP, SAVED_R4 , r4
290 st.q SP, SAVED_R5 , r5
291 st.q SP, SAVED_R6 , r6
292 st.q SP, SAVED_R18, r18
293
294 /* Save R25 for safety; as/ld may want to use it to achieve the call to
295 * the code in mm/tlbmiss.c */
296 st.q SP, TLB_SAVED_R25, r25
297 gettr tr0, r2
298 gettr tr1, r3
299 gettr tr2, r4
300 gettr tr3, r5
301 gettr tr4, r18
302 st.q SP, SAVED_TR0 , r2
303 st.q SP, TLB_SAVED_TR1 , r3
304 st.q SP, TLB_SAVED_TR2 , r4
305 st.q SP, TLB_SAVED_TR3 , r5
306 st.q SP, TLB_SAVED_TR4 , r18
307
308 pt do_fast_page_fault, tr0
309 getcon SSR, r2
310 getcon EXPEVT, r3
311 getcon TEA, r4
312 shlri r2, 30, r2
313 andi r2, 1, r2 /* r2 = SSR.MD */
314 blink tr0, LINK
315
316 pt fixup_to_invoke_general_handler, tr1
317
318 /* If the fast path handler fixed the fault, just drop through quickly
319 to the restore code right away to return to the excepting context.
320 */
321 beqi/u r2, 0, tr1
322
323fast_tlb_miss_restore:
324 ld.q SP, SAVED_TR0, r2
325 ld.q SP, TLB_SAVED_TR1, r3
326 ld.q SP, TLB_SAVED_TR2, r4
327
328 ld.q SP, TLB_SAVED_TR3, r5
329 ld.q SP, TLB_SAVED_TR4, r18
330
331 ptabs r2, tr0
332 ptabs r3, tr1
333 ptabs r4, tr2
334 ptabs r5, tr3
335 ptabs r18, tr4
336
337 ld.q SP, TLB_SAVED_R0, r0
338 ld.q SP, TLB_SAVED_R1, r1
339 ld.q SP, SAVED_R2, r2
340 ld.q SP, SAVED_R3, r3
341 ld.q SP, SAVED_R4, r4
342 ld.q SP, SAVED_R5, r5
343 ld.q SP, SAVED_R6, r6
344 ld.q SP, SAVED_R18, r18
345 ld.q SP, TLB_SAVED_R25, r25
346
347 getcon KCR1, SP
348 rte
349 nop /* for safety, in case the code is run on sh5-101 cut1.x */
350
351fixup_to_invoke_general_handler:
352
353 /* OK, new method. Restore stuff that's not expected to get saved into
354 the 'first-level' reg save area, then just fall through to setting
355 up the registers and calling the second-level handler. */
356
357 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
358 r25,tr1-4 and save r6 to get into the right state. */
359
360 ld.q SP, TLB_SAVED_TR1, r3
361 ld.q SP, TLB_SAVED_TR2, r4
362 ld.q SP, TLB_SAVED_TR3, r5
363 ld.q SP, TLB_SAVED_TR4, r18
364 ld.q SP, TLB_SAVED_R25, r25
365
366 ld.q SP, TLB_SAVED_R0, r0
367 ld.q SP, TLB_SAVED_R1, r1
368
369 ptabs/u r3, tr1
370 ptabs/u r4, tr2
371 ptabs/u r5, tr3
372 ptabs/u r18, tr4
373
374 /* Set args for Non-debug, TLB miss class handler */
375 getcon EXPEVT, r2
376 movi ret_from_exception, r3
377 ori r3, 1, r3
378 movi EVENT_FAULT_TLB, r4
379 or SP, ZERO, r5
380 getcon KCR1, SP
381 pta handle_exception, tr0
382 blink tr0, ZERO
383
384/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
385 DOES END UP AT VBR+0x600 */
386 nop
387 nop
388 nop
389 nop
390 nop
391 nop
392
393 .balign 256
394 /* VBR + 0x600 */
395
396interrupt:
397 synco /* TAKum03020 (but probably a good idea anyway.) */
398 /* Save original stack pointer into KCR1 */
399 putcon SP, KCR1
400
401 /* Save other original registers into reg_save_area */
402 movi reg_save_area, SP
403 st.q SP, SAVED_R2, r2
404 st.q SP, SAVED_R3, r3
405 st.q SP, SAVED_R4, r4
406 st.q SP, SAVED_R5, r5
407 st.q SP, SAVED_R6, r6
408 st.q SP, SAVED_R18, r18
409 gettr tr0, r3
410 st.q SP, SAVED_TR0, r3
411
412 /* Set args for interrupt class handler */
413 getcon INTEVT, r2
414 movi ret_from_irq, r3
415 ori r3, 1, r3
416 movi EVENT_INTERRUPT, r4
417 or SP, ZERO, r5
418 getcon KCR1, SP
419 pta handle_exception, tr0
420 blink tr0, ZERO
421 .balign TEXT_SIZE /* let's waste the bare minimum */
422
423LVBR_block_end: /* Marker. Used for total checking */
424
425 .balign 256
426LRESVEC_block:
427 /* Panic handler. Called with MMU off. Possible causes/actions:
428 * - Reset: Jump to program start.
429 * - Single Step: Turn off Single Step & return.
430 * - Others: Call panic handler, passing PC as arg.
431 * (this may need to be extended...)
432 */
433reset_or_panic:
434 synco /* TAKum03020 (but probably a good idea anyway.) */
435 putcon SP, DCR
436 /* First save r0-1 and tr0, as we need to use these */
437 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
438 st.q SP, 0, r0
439 st.q SP, 8, r1
440 gettr tr0, r0
441 st.q SP, 32, r0
442
443 /* Check cause */
444 getcon EXPEVT, r0
445 movi RESET_CAUSE, r1
446 sub r1, r0, r1 /* r1=0 if reset */
447 movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0
448 ori r0, 1, r0
449 ptabs r0, tr0
450 beqi r1, 0, tr0 /* Jump to start address if reset */
451
452 getcon EXPEVT, r0
453 movi DEBUGSS_CAUSE, r1
454 sub r1, r0, r1 /* r1=0 if single step */
455 pta single_step_panic, tr0
456 beqi r1, 0, tr0 /* jump if single step */
457
458 /* Now jump to where we save the registers. */
459 movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1
460 ptabs r1, tr0
461 blink tr0, r63
462
463single_step_panic:
464 /* We are in a handler with Single Step set. We need to resume the
465 * handler, by turning on MMU & turning off Single Step. */
466 getcon SSR, r0
467 movi SR_MMU, r1
468 or r0, r1, r0
469 movi ~SR_SS, r1
470 and r0, r1, r0
471 putcon r0, SSR
472 /* Restore EXPEVT, as the rte won't do this */
473 getcon PEXPEVT, r0
474 putcon r0, EXPEVT
475 /* Restore regs */
476 ld.q SP, 32, r0
477 ptabs r0, tr0
478 ld.q SP, 0, r0
479 ld.q SP, 8, r1
480 getcon DCR, SP
481 synco
482 rte
483
484
485 .balign 256
486debug_exception:
487 synco /* TAKum03020 (but probably a good idea anyway.) */
488 /*
489 * Single step/software_break_point first level handler.
490 * Called with MMU off, so the first thing we do is enable it
491 * by doing an rte with appropriate SSR.
492 */
493 putcon SP, DCR
494 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
495 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
496
497 /* With the MMU off, we are bypassing the cache, so purge any
498 * data that will be made stale by the following stores.
499 */
500 ocbp SP, 0
501 synco
502
503 st.q SP, 0, r0
504 st.q SP, 8, r1
505 getcon SPC, r0
506 st.q SP, 16, r0
507 getcon SSR, r0
508 st.q SP, 24, r0
509
510 /* Enable MMU, block exceptions, set priv mode, disable single step */
511 movi SR_MMU | SR_BL | SR_MD, r1
512 or r0, r1, r0
513 movi ~SR_SS, r1
514 and r0, r1, r0
515 putcon r0, SSR
516 /* Force control to debug_exception_2 when rte is executed */
517 movi debug_exeception_2, r0
518 ori r0, 1, r0 /* force SHmedia, just in case */
519 putcon r0, SPC
520 getcon DCR, SP
521 synco
522 rte
523debug_exeception_2:
524 /* Restore saved regs */
525 putcon SP, KCR1
526 movi resvec_save_area, SP
527 ld.q SP, 24, r0
528 putcon r0, SSR
529 ld.q SP, 16, r0
530 putcon r0, SPC
531 ld.q SP, 0, r0
532 ld.q SP, 8, r1
533
534 /* Save other original registers into reg_save_area */
535 movi reg_save_area, SP
536 st.q SP, SAVED_R2, r2
537 st.q SP, SAVED_R3, r3
538 st.q SP, SAVED_R4, r4
539 st.q SP, SAVED_R5, r5
540 st.q SP, SAVED_R6, r6
541 st.q SP, SAVED_R18, r18
542 gettr tr0, r3
543 st.q SP, SAVED_TR0, r3
544
545 /* Set args for debug class handler */
546 getcon EXPEVT, r2
547 movi ret_from_exception, r3
548 ori r3, 1, r3
549 movi EVENT_DEBUG, r4
550 or SP, ZERO, r5
551 getcon KCR1, SP
552 pta handle_exception, tr0
553 blink tr0, ZERO
554
555 .balign 256
556debug_interrupt:
557 /* !!! WE COME HERE IN REAL MODE !!! */
558 /* Hook-up debug interrupt to allow various debugging options to be
559 * hooked into its handler. */
560 /* Save original stack pointer into KCR1 */
561 synco
562 putcon SP, KCR1
563 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
564 ocbp SP, 0
565 ocbp SP, 32
566 synco
567
568 /* Save other original registers into reg_save_area thru real addresses */
569 st.q SP, SAVED_R2, r2
570 st.q SP, SAVED_R3, r3
571 st.q SP, SAVED_R4, r4
572 st.q SP, SAVED_R5, r5
573 st.q SP, SAVED_R6, r6
574 st.q SP, SAVED_R18, r18
575 gettr tr0, r3
576 st.q SP, SAVED_TR0, r3
577
578 /* move (spc,ssr)->(pspc,pssr). The rte will shift
579 them back again, so that they look like the originals
580 as far as the real handler code is concerned. */
581 getcon spc, r6
582 putcon r6, pspc
583 getcon ssr, r6
584 putcon r6, pssr
585
586 ! construct useful SR for handle_exception
587 movi 3, r6
588 shlli r6, 30, r6
589 getcon sr, r18
590 or r18, r6, r6
591 putcon r6, ssr
592
593 ! SSR is now the current SR with the MD and MMU bits set
594 ! i.e. the rte will switch back to priv mode and put
595 ! the mmu back on
596
597 ! construct spc
598 movi handle_exception, r18
599 ori r18, 1, r18 ! for safety (do we need this?)
600 putcon r18, spc
601
602 /* Set args for Non-debug, Not a TLB miss class handler */
603
604 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
605 ! debug interrupt handler in the vectoring table
606 movi 0x80, r2
607 movi ret_from_exception, r3
608 ori r3, 1, r3
609 movi EVENT_FAULT_NOT_TLB, r4
610
611 or SP, ZERO, r5
612 movi CONFIG_CACHED_MEMORY_OFFSET, r6
613 add r6, r5, r5
614 getcon KCR1, SP
615
616 synco ! for safety
617 rte ! -> handle_exception, switch back to priv mode again
618
619LRESVEC_block_end: /* Marker. Unused. */
620
621 .balign TEXT_SIZE
622
623/*
624 * Second level handler for VBR-based exceptions. Pre-handler.
625 * In common to all stack-frame sensitive handlers.
626 *
627 * Inputs:
628 * (KCR0) Current [current task union]
629 * (KCR1) Original SP
630 * (r2) INTEVT/EXPEVT
631 * (r3) appropriate return address
632 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
633 * (r5) Pointer to reg_save_area
634 * (SP) Original SP
635 *
636 * Available registers:
637 * (r6)
638 * (r18)
639 * (tr0)
640 *
641 */
642handle_exception:
643 /* Common 2nd level handler. */
644
645 /* First thing we need an appropriate stack pointer */
646 getcon SSR, r6
647 shlri r6, 30, r6
648 andi r6, 1, r6
649 pta stack_ok, tr0
650 bne r6, ZERO, tr0 /* Original stack pointer is fine */
651
652 /* Set stack pointer for user fault */
653 getcon KCR0, SP
654 movi THREAD_SIZE, r6 /* Point to the end */
655 add SP, r6, SP
656
657stack_ok:
658
659/* DEBUG : check for underflow/overflow of the kernel stack */
660 pta no_underflow, tr0
661 getcon KCR0, r6
662 movi 1024, r18
663 add r6, r18, r6
664 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
665
666/* Just panic to cause a crash. */
667bad_sp:
668 ld.b r63, 0, r6
669 nop
670
671no_underflow:
672 pta bad_sp, tr0
673 getcon kcr0, r6
674 movi THREAD_SIZE, r18
675 add r18, r6, r6
676 bgt SP, r6, tr0 ! sp above the stack
677
678 /* Make some room for the BASIC frame. */
679 movi -(FRAME_SIZE), r6
680 add SP, r6, SP
681
682/* Could do this with no stalling if we had another spare register, but the
683 code below will be OK. */
684 ld.q r5, SAVED_R2, r6
685 ld.q r5, SAVED_R3, r18
686 st.q SP, FRAME_R(2), r6
687 ld.q r5, SAVED_R4, r6
688 st.q SP, FRAME_R(3), r18
689 ld.q r5, SAVED_R5, r18
690 st.q SP, FRAME_R(4), r6
691 ld.q r5, SAVED_R6, r6
692 st.q SP, FRAME_R(5), r18
693 ld.q r5, SAVED_R18, r18
694 st.q SP, FRAME_R(6), r6
695 ld.q r5, SAVED_TR0, r6
696 st.q SP, FRAME_R(18), r18
697 st.q SP, FRAME_T(0), r6
698
699 /* Keep old SP around */
700 getcon KCR1, r6
701
702 /* Save the rest of the general purpose registers */
703 st.q SP, FRAME_R(0), r0
704 st.q SP, FRAME_R(1), r1
705 st.q SP, FRAME_R(7), r7
706 st.q SP, FRAME_R(8), r8
707 st.q SP, FRAME_R(9), r9
708 st.q SP, FRAME_R(10), r10
709 st.q SP, FRAME_R(11), r11
710 st.q SP, FRAME_R(12), r12
711 st.q SP, FRAME_R(13), r13
712 st.q SP, FRAME_R(14), r14
713
714 /* SP is somewhere else */
715 st.q SP, FRAME_R(15), r6
716
717 st.q SP, FRAME_R(16), r16
718 st.q SP, FRAME_R(17), r17
719 /* r18 is saved earlier. */
720 st.q SP, FRAME_R(19), r19
721 st.q SP, FRAME_R(20), r20
722 st.q SP, FRAME_R(21), r21
723 st.q SP, FRAME_R(22), r22
724 st.q SP, FRAME_R(23), r23
725 st.q SP, FRAME_R(24), r24
726 st.q SP, FRAME_R(25), r25
727 st.q SP, FRAME_R(26), r26
728 st.q SP, FRAME_R(27), r27
729 st.q SP, FRAME_R(28), r28
730 st.q SP, FRAME_R(29), r29
731 st.q SP, FRAME_R(30), r30
732 st.q SP, FRAME_R(31), r31
733 st.q SP, FRAME_R(32), r32
734 st.q SP, FRAME_R(33), r33
735 st.q SP, FRAME_R(34), r34
736 st.q SP, FRAME_R(35), r35
737 st.q SP, FRAME_R(36), r36
738 st.q SP, FRAME_R(37), r37
739 st.q SP, FRAME_R(38), r38
740 st.q SP, FRAME_R(39), r39
741 st.q SP, FRAME_R(40), r40
742 st.q SP, FRAME_R(41), r41
743 st.q SP, FRAME_R(42), r42
744 st.q SP, FRAME_R(43), r43
745 st.q SP, FRAME_R(44), r44
746 st.q SP, FRAME_R(45), r45
747 st.q SP, FRAME_R(46), r46
748 st.q SP, FRAME_R(47), r47
749 st.q SP, FRAME_R(48), r48
750 st.q SP, FRAME_R(49), r49
751 st.q SP, FRAME_R(50), r50
752 st.q SP, FRAME_R(51), r51
753 st.q SP, FRAME_R(52), r52
754 st.q SP, FRAME_R(53), r53
755 st.q SP, FRAME_R(54), r54
756 st.q SP, FRAME_R(55), r55
757 st.q SP, FRAME_R(56), r56
758 st.q SP, FRAME_R(57), r57
759 st.q SP, FRAME_R(58), r58
760 st.q SP, FRAME_R(59), r59
761 st.q SP, FRAME_R(60), r60
762 st.q SP, FRAME_R(61), r61
763 st.q SP, FRAME_R(62), r62
764
765 /*
766 * Save the S* registers.
767 */
768 getcon SSR, r61
769 st.q SP, FRAME_S(FSSR), r61
770 getcon SPC, r62
771 st.q SP, FRAME_S(FSPC), r62
772 movi -1, r62 /* Reset syscall_nr */
773 st.q SP, FRAME_S(FSYSCALL_ID), r62
774
775 /* Save the rest of the target registers */
776 gettr tr1, r6
777 st.q SP, FRAME_T(1), r6
778 gettr tr2, r6
779 st.q SP, FRAME_T(2), r6
780 gettr tr3, r6
781 st.q SP, FRAME_T(3), r6
782 gettr tr4, r6
783 st.q SP, FRAME_T(4), r6
784 gettr tr5, r6
785 st.q SP, FRAME_T(5), r6
786 gettr tr6, r6
787 st.q SP, FRAME_T(6), r6
788 gettr tr7, r6
789 st.q SP, FRAME_T(7), r6
790
791 ! setup FP so that unwinder can wind back through nested kernel mode
792 ! exceptions
793 add SP, ZERO, r14
794
795#ifdef CONFIG_POOR_MANS_STRACE
796 /* We've pushed all the registers now, so only r2-r4 hold anything
797 * useful. Move them into callee save registers */
798 or r2, ZERO, r28
799 or r3, ZERO, r29
800 or r4, ZERO, r30
801
802 /* Preserve r2 as the event code */
803 movi evt_debug, r3
804 ori r3, 1, r3
805 ptabs r3, tr0
806
807 or SP, ZERO, r6
808 getcon TRA, r5
809 blink tr0, LINK
810
811 or r28, ZERO, r2
812 or r29, ZERO, r3
813 or r30, ZERO, r4
814#endif
815
816 /* For syscall and debug race condition, get TRA now */
817 getcon TRA, r5
818
819 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
820 * Also set FD, to catch FPU usage in the kernel.
821 *
822 * benedict.gaster@superh.com 29/07/2002
823 *
824 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
825 * same time change BL from 1->0, as any pending interrupt of a level
826 * higher than he previous value of IMASK will leak through and be
827 * taken unexpectedly.
828 *
829 * To avoid this we raise the IMASK and then issue another PUTCON to
830 * enable interrupts.
831 */
832 getcon SR, r6
833 movi SR_IMASK | SR_FD, r7
834 or r6, r7, r6
835 putcon r6, SR
836 movi SR_UNBLOCK_EXC, r7
837 and r6, r7, r6
838 putcon r6, SR
839
840
841 /* Now call the appropriate 3rd level handler */
842 or r3, ZERO, LINK
843 movi trap_jtable, r3
844 shlri r2, 3, r2
845 ldx.l r2, r3, r3
846 shlri r2, 2, r2
847 ptabs r3, tr0
848 or SP, ZERO, r3
849 blink tr0, ZERO
850
851/*
852 * Second level handler for VBR-based exceptions. Post-handlers.
853 *
854 * Post-handlers for interrupts (ret_from_irq), exceptions
855 * (ret_from_exception) and common reentrance doors (restore_all
856 * to get back to the original context, ret_from_syscall loop to
857 * check kernel exiting).
858 *
859 * ret_with_reschedule and work_notifysig are an inner lables of
860 * the ret_from_syscall loop.
861 *
862 * In common to all stack-frame sensitive handlers.
863 *
864 * Inputs:
865 * (SP) struct pt_regs *, original register's frame pointer (basic)
866 *
867 */
868 .global ret_from_irq
869ret_from_irq:
870#ifdef CONFIG_POOR_MANS_STRACE
871 pta evt_debug_ret_from_irq, tr0
872 ori SP, 0, r2
873 blink tr0, LINK
874#endif
875 ld.q SP, FRAME_S(FSSR), r6
876 shlri r6, 30, r6
877 andi r6, 1, r6
878 pta resume_kernel, tr0
879 bne r6, ZERO, tr0 /* no further checks */
880 STI()
881 pta ret_with_reschedule, tr0
882 blink tr0, ZERO /* Do not check softirqs */
883
884 .global ret_from_exception
885ret_from_exception:
886 preempt_stop()
887
888#ifdef CONFIG_POOR_MANS_STRACE
889 pta evt_debug_ret_from_exc, tr0
890 ori SP, 0, r2
891 blink tr0, LINK
892#endif
893
894 ld.q SP, FRAME_S(FSSR), r6
895 shlri r6, 30, r6
896 andi r6, 1, r6
897 pta resume_kernel, tr0
898 bne r6, ZERO, tr0 /* no further checks */
899
900 /* Check softirqs */
901
902#ifdef CONFIG_PREEMPT
903 pta ret_from_syscall, tr0
904 blink tr0, ZERO
905
906resume_kernel:
907 pta restore_all, tr0
908
909 getcon KCR0, r6
910 ld.l r6, TI_PRE_COUNT, r7
911 beq/u r7, ZERO, tr0
912
913need_resched:
914 ld.l r6, TI_FLAGS, r7
915 movi (1 << TIF_NEED_RESCHED), r8
916 and r8, r7, r8
917 bne r8, ZERO, tr0
918
919 getcon SR, r7
920 andi r7, 0xf0, r7
921 bne r7, ZERO, tr0
922
923 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
924 shori (PREEMPT_ACTIVE & 65535), r8
925 st.l r6, TI_PRE_COUNT, r8
926
927 STI()
928 movi schedule, r7
929 ori r7, 1, r7
930 ptabs r7, tr1
931 blink tr1, LINK
932
933 st.l r6, TI_PRE_COUNT, ZERO
934 CLI()
935
936 pta need_resched, tr1
937 blink tr1, ZERO
938#endif
939
940 .global ret_from_syscall
941ret_from_syscall:
942
943ret_with_reschedule:
944 getcon KCR0, r6 ! r6 contains current_thread_info
945 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
946
947 ! FIXME:!!!
948 ! no handling of TIF_SYSCALL_TRACE yet!!
949
950 movi _TIF_NEED_RESCHED, r8
951 and r8, r7, r8
952 pta work_resched, tr0
953 bne r8, ZERO, tr0
954
955 pta restore_all, tr1
956
957 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
958 and r8, r7, r8
959 pta work_notifysig, tr0
960 bne r8, ZERO, tr0
961
962 blink tr1, ZERO
963
964work_resched:
965 pta ret_from_syscall, tr0
966 gettr tr0, LINK
967 movi schedule, r6
968 ptabs r6, tr0
969 blink tr0, ZERO /* Call schedule(), return on top */
970
971work_notifysig:
972 gettr tr1, LINK
973
974 movi do_signal, r6
975 ptabs r6, tr0
976 or SP, ZERO, r2
977 or ZERO, ZERO, r3
978 blink tr0, LINK /* Call do_signal(regs, 0), return here */
979
980restore_all:
981 /* Do prefetches */
982
983 ld.q SP, FRAME_T(0), r6
984 ld.q SP, FRAME_T(1), r7
985 ld.q SP, FRAME_T(2), r8
986 ld.q SP, FRAME_T(3), r9
987 ptabs r6, tr0
988 ptabs r7, tr1
989 ptabs r8, tr2
990 ptabs r9, tr3
991 ld.q SP, FRAME_T(4), r6
992 ld.q SP, FRAME_T(5), r7
993 ld.q SP, FRAME_T(6), r8
994 ld.q SP, FRAME_T(7), r9
995 ptabs r6, tr4
996 ptabs r7, tr5
997 ptabs r8, tr6
998 ptabs r9, tr7
999
1000 ld.q SP, FRAME_R(0), r0
1001 ld.q SP, FRAME_R(1), r1
1002 ld.q SP, FRAME_R(2), r2
1003 ld.q SP, FRAME_R(3), r3
1004 ld.q SP, FRAME_R(4), r4
1005 ld.q SP, FRAME_R(5), r5
1006 ld.q SP, FRAME_R(6), r6
1007 ld.q SP, FRAME_R(7), r7
1008 ld.q SP, FRAME_R(8), r8
1009 ld.q SP, FRAME_R(9), r9
1010 ld.q SP, FRAME_R(10), r10
1011 ld.q SP, FRAME_R(11), r11
1012 ld.q SP, FRAME_R(12), r12
1013 ld.q SP, FRAME_R(13), r13
1014 ld.q SP, FRAME_R(14), r14
1015
1016 ld.q SP, FRAME_R(16), r16
1017 ld.q SP, FRAME_R(17), r17
1018 ld.q SP, FRAME_R(18), r18
1019 ld.q SP, FRAME_R(19), r19
1020 ld.q SP, FRAME_R(20), r20
1021 ld.q SP, FRAME_R(21), r21
1022 ld.q SP, FRAME_R(22), r22
1023 ld.q SP, FRAME_R(23), r23
1024 ld.q SP, FRAME_R(24), r24
1025 ld.q SP, FRAME_R(25), r25
1026 ld.q SP, FRAME_R(26), r26
1027 ld.q SP, FRAME_R(27), r27
1028 ld.q SP, FRAME_R(28), r28
1029 ld.q SP, FRAME_R(29), r29
1030 ld.q SP, FRAME_R(30), r30
1031 ld.q SP, FRAME_R(31), r31
1032 ld.q SP, FRAME_R(32), r32
1033 ld.q SP, FRAME_R(33), r33
1034 ld.q SP, FRAME_R(34), r34
1035 ld.q SP, FRAME_R(35), r35
1036 ld.q SP, FRAME_R(36), r36
1037 ld.q SP, FRAME_R(37), r37
1038 ld.q SP, FRAME_R(38), r38
1039 ld.q SP, FRAME_R(39), r39
1040 ld.q SP, FRAME_R(40), r40
1041 ld.q SP, FRAME_R(41), r41
1042 ld.q SP, FRAME_R(42), r42
1043 ld.q SP, FRAME_R(43), r43
1044 ld.q SP, FRAME_R(44), r44
1045 ld.q SP, FRAME_R(45), r45
1046 ld.q SP, FRAME_R(46), r46
1047 ld.q SP, FRAME_R(47), r47
1048 ld.q SP, FRAME_R(48), r48
1049 ld.q SP, FRAME_R(49), r49
1050 ld.q SP, FRAME_R(50), r50
1051 ld.q SP, FRAME_R(51), r51
1052 ld.q SP, FRAME_R(52), r52
1053 ld.q SP, FRAME_R(53), r53
1054 ld.q SP, FRAME_R(54), r54
1055 ld.q SP, FRAME_R(55), r55
1056 ld.q SP, FRAME_R(56), r56
1057 ld.q SP, FRAME_R(57), r57
1058 ld.q SP, FRAME_R(58), r58
1059
1060 getcon SR, r59
1061 movi SR_BLOCK_EXC, r60
1062 or r59, r60, r59
1063 putcon r59, SR /* SR.BL = 1, keep nesting out */
1064 ld.q SP, FRAME_S(FSSR), r61
1065 ld.q SP, FRAME_S(FSPC), r62
1066 movi SR_ASID_MASK, r60
1067 and r59, r60, r59
1068 andc r61, r60, r61 /* Clear out older ASID */
1069 or r59, r61, r61 /* Retain current ASID */
1070 putcon r61, SSR
1071 putcon r62, SPC
1072
1073 /* Ignore FSYSCALL_ID */
1074
1075 ld.q SP, FRAME_R(59), r59
1076 ld.q SP, FRAME_R(60), r60
1077 ld.q SP, FRAME_R(61), r61
1078 ld.q SP, FRAME_R(62), r62
1079
1080 /* Last touch */
1081 ld.q SP, FRAME_R(15), SP
1082 rte
1083 nop
1084
1085/*
1086 * Third level handlers for VBR-based exceptions. Adapting args to
1087 * and/or deflecting to fourth level handlers.
1088 *
1089 * Fourth level handlers interface.
1090 * Most are C-coded handlers directly pointed by the trap_jtable.
1091 * (Third = Fourth level)
1092 * Inputs:
1093 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1094 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1095 * (r3) struct pt_regs *, original register's frame pointer
1096 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1097 * (r5) TRA control register (for syscall/debug benefit only)
1098 * (LINK) return address
1099 * (SP) = r3
1100 *
1101 * Kernel TLB fault handlers will get a slightly different interface.
1102 * (r2) struct pt_regs *, original register's frame pointer
1103 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1104 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1105 * (r5) Effective Address of fault
1106 * (LINK) return address
1107 * (SP) = r2
1108 *
1109 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1110 *
1111 */
1112tlb_miss_load:
1113 or SP, ZERO, r2
1114 or ZERO, ZERO, r3 /* Read */
1115 or ZERO, ZERO, r4 /* Data */
1116 getcon TEA, r5
1117 pta call_do_page_fault, tr0
1118 beq ZERO, ZERO, tr0
1119
1120tlb_miss_store:
1121 or SP, ZERO, r2
1122 movi 1, r3 /* Write */
1123 or ZERO, ZERO, r4 /* Data */
1124 getcon TEA, r5
1125 pta call_do_page_fault, tr0
1126 beq ZERO, ZERO, tr0
1127
1128itlb_miss_or_IRQ:
1129 pta its_IRQ, tr0
1130 beqi/u r4, EVENT_INTERRUPT, tr0
1131 or SP, ZERO, r2
1132 or ZERO, ZERO, r3 /* Read */
1133 movi 1, r4 /* Text */
1134 getcon TEA, r5
1135 /* Fall through */
1136
1137call_do_page_fault:
1138 movi do_page_fault, r6
1139 ptabs r6, tr0
1140 blink tr0, ZERO
1141
1142fpu_error_or_IRQA:
1143 pta its_IRQ, tr0
1144 beqi/l r4, EVENT_INTERRUPT, tr0
1145#ifdef CONFIG_SH_FPU
1146 movi do_fpu_state_restore, r6
1147#else
1148 movi do_exception_error, r6
1149#endif
1150 ptabs r6, tr0
1151 blink tr0, ZERO
1152
1153fpu_error_or_IRQB:
1154 pta its_IRQ, tr0
1155 beqi/l r4, EVENT_INTERRUPT, tr0
1156#ifdef CONFIG_SH_FPU
1157 movi do_fpu_state_restore, r6
1158#else
1159 movi do_exception_error, r6
1160#endif
1161 ptabs r6, tr0
1162 blink tr0, ZERO
1163
1164its_IRQ:
1165 movi do_IRQ, r6
1166 ptabs r6, tr0
1167 blink tr0, ZERO
1168
1169/*
1170 * system_call/unknown_trap third level handler:
1171 *
1172 * Inputs:
1173 * (r2) fault/interrupt code, entry number (TRAP = 11)
1174 * (r3) struct pt_regs *, original register's frame pointer
1175 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1176 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1177 * (SP) = r3
1178 * (LINK) return address: ret_from_exception
1179 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1180 *
1181 * Outputs:
1182 * (*r3) Syscall reply (Saved r2)
1183 * (LINK) In case of syscall only it can be scrapped.
1184 * Common second level post handler will be ret_from_syscall.
1185 * Common (non-trace) exit point to that is syscall_ret (saving
1186 * result to r2). Common bad exit point is syscall_bad (returning
1187 * ENOSYS then saved to r2).
1188 *
1189 */
1190
1191unknown_trap:
1192 /* Unknown Trap or User Trace */
1193 movi do_unknown_trapa, r6
1194 ptabs r6, tr0
1195 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1196 andi r2, 0x1ff, r2 /* r2 = syscall # */
1197 blink tr0, LINK
1198
1199 pta syscall_ret, tr0
1200 blink tr0, ZERO
1201
1202 /* New syscall implementation*/
1203system_call:
1204 pta unknown_trap, tr0
1205 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1206 shlri r4, 20, r4
1207 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1208
1209 /* It's a system call */
1210 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1211 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1212
1213 STI()
1214
1215 pta syscall_allowed, tr0
1216 movi NR_syscalls - 1, r4 /* Last valid */
1217 bgeu/l r4, r5, tr0
1218
1219syscall_bad:
1220 /* Return ENOSYS ! */
1221 movi -(ENOSYS), r2 /* Fall-through */
1222
1223 .global syscall_ret
1224syscall_ret:
1225 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1226
1227#ifdef CONFIG_POOR_MANS_STRACE
1228 /* nothing useful in registers at this point */
1229
1230 movi evt_debug2, r5
1231 ori r5, 1, r5
1232 ptabs r5, tr0
1233 ld.q SP, FRAME_R(9), r2
1234 or SP, ZERO, r3
1235 blink tr0, LINK
1236#endif
1237
1238 ld.q SP, FRAME_S(FSPC), r2
1239 addi r2, 4, r2 /* Move PC, being pre-execution event */
1240 st.q SP, FRAME_S(FSPC), r2
1241 pta ret_from_syscall, tr0
1242 blink tr0, ZERO
1243
1244
1245/* A different return path for ret_from_fork, because we now need
1246 * to call schedule_tail with the later kernels. Because prev is
1247 * loaded into r2 by switch_to() means we can just call it straight away
1248 */
1249
1250.global ret_from_fork
1251ret_from_fork:
1252
1253 movi schedule_tail,r5
1254 ori r5, 1, r5
1255 ptabs r5, tr0
1256 blink tr0, LINK
1257
1258#ifdef CONFIG_POOR_MANS_STRACE
1259 /* nothing useful in registers at this point */
1260
1261 movi evt_debug2, r5
1262 ori r5, 1, r5
1263 ptabs r5, tr0
1264 ld.q SP, FRAME_R(9), r2
1265 or SP, ZERO, r3
1266 blink tr0, LINK
1267#endif
1268
1269 ld.q SP, FRAME_S(FSPC), r2
1270 addi r2, 4, r2 /* Move PC, being pre-execution event */
1271 st.q SP, FRAME_S(FSPC), r2
1272 pta ret_from_syscall, tr0
1273 blink tr0, ZERO
1274
1275
1276
1277syscall_allowed:
1278 /* Use LINK to deflect the exit point, default is syscall_ret */
1279 pta syscall_ret, tr0
1280 gettr tr0, LINK
1281 pta syscall_notrace, tr0
1282
1283 getcon KCR0, r2
1284 ld.l r2, TI_FLAGS, r4
1285 movi (1 << TIF_SYSCALL_TRACE), r6
1286 and r6, r4, r6
1287 beq/l r6, ZERO, tr0
1288
1289 /* Trace it by calling syscall_trace before and after */
1290 movi syscall_trace, r4
1291 ptabs r4, tr0
1292 blink tr0, LINK
1293 /* Reload syscall number as r5 is trashed by syscall_trace */
1294 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1295 andi r5, 0x1ff, r5
1296
1297 pta syscall_ret_trace, tr0
1298 gettr tr0, LINK
1299
1300syscall_notrace:
1301 /* Now point to the appropriate 4th level syscall handler */
1302 movi sys_call_table, r4
1303 shlli r5, 2, r5
1304 ldx.l r4, r5, r5
1305 ptabs r5, tr0
1306
1307 /* Prepare original args */
1308 ld.q SP, FRAME_R(2), r2
1309 ld.q SP, FRAME_R(3), r3
1310 ld.q SP, FRAME_R(4), r4
1311 ld.q SP, FRAME_R(5), r5
1312 ld.q SP, FRAME_R(6), r6
1313 ld.q SP, FRAME_R(7), r7
1314
1315 /* And now the trick for those syscalls requiring regs * ! */
1316 or SP, ZERO, r8
1317
1318 /* Call it */
1319 blink tr0, ZERO /* LINK is already properly set */
1320
1321syscall_ret_trace:
1322 /* We get back here only if under trace */
1323 st.q SP, FRAME_R(9), r2 /* Save return value */
1324
1325 movi syscall_trace, LINK
1326 ptabs LINK, tr0
1327 blink tr0, LINK
1328
1329 /* This needs to be done after any syscall tracing */
1330 ld.q SP, FRAME_S(FSPC), r2
1331 addi r2, 4, r2 /* Move PC, being pre-execution event */
1332 st.q SP, FRAME_S(FSPC), r2
1333
1334 pta ret_from_syscall, tr0
1335 blink tr0, ZERO /* Resume normal return sequence */
1336
1337/*
1338 * --- Switch to running under a particular ASID and return the previous ASID value
1339 * --- The caller is assumed to have done a cli before calling this.
1340 *
1341 * Input r2 : new ASID
1342 * Output r2 : old ASID
1343 */
1344
1345 .global switch_and_save_asid
1346switch_and_save_asid:
1347 getcon sr, r0
1348 movi 255, r4
1349 shlli r4, 16, r4 /* r4 = mask to select ASID */
1350 and r0, r4, r3 /* r3 = shifted old ASID */
1351 andi r2, 255, r2 /* mask down new ASID */
1352 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1353 andc r0, r4, r0 /* efface old ASID from SR */
1354 or r0, r2, r0 /* insert the new ASID */
1355 putcon r0, ssr
1356 movi 1f, r0
1357 putcon r0, spc
1358 rte
1359 nop
13601:
1361 ptabs LINK, tr0
1362 shlri r3, 16, r2 /* r2 = old ASID */
1363 blink tr0, r63
1364
1365 .global route_to_panic_handler
1366route_to_panic_handler:
1367 /* Switch to real mode, goto panic_handler, don't return. Useful for
1368 last-chance debugging, e.g. if no output wants to go to the console.
1369 */
1370
1371 movi panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
1372 ptabs r1, tr0
1373 pta 1f, tr1
1374 gettr tr1, r0
1375 putcon r0, spc
1376 getcon sr, r0
1377 movi 1, r1
1378 shlli r1, 31, r1
1379 andc r0, r1, r0
1380 putcon r0, ssr
1381 rte
1382 nop
13831: /* Now in real mode */
1384 blink tr0, r63
1385 nop
1386
1387 .global peek_real_address_q
1388peek_real_address_q:
1389 /* Two args:
1390 r2 : real mode address to peek
1391 r2(out) : result quadword
1392
1393 This is provided as a cheapskate way of manipulating device
1394 registers for debugging (to avoid the need to onchip_remap the debug
1395 module, and to avoid the need to onchip_remap the watchpoint
1396 controller in a way that identity maps sufficient bits to avoid the
1397 SH5-101 cut2 silicon defect).
1398
1399 This code is not performance critical
1400 */
1401
1402 add.l r2, r63, r2 /* sign extend address */
1403 getcon sr, r0 /* r0 = saved original SR */
1404 movi 1, r1
1405 shlli r1, 28, r1
1406 or r0, r1, r1 /* r0 with block bit set */
1407 putcon r1, sr /* now in critical section */
1408 movi 1, r36
1409 shlli r36, 31, r36
1410 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1411
1412 putcon r1, ssr
1413 movi .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1414 movi 1f, r37 /* virtual mode return addr */
1415 putcon r36, spc
1416
1417 synco
1418 rte
1419 nop
1420
1421.peek0: /* come here in real mode, don't touch caches!!
1422 still in critical section (sr.bl==1) */
1423 putcon r0, ssr
1424 putcon r37, spc
1425 /* Here's the actual peek. If the address is bad, all bets are now off
1426 * what will happen (handlers invoked in real-mode = bad news) */
1427 ld.q r2, 0, r2
1428 synco
1429 rte /* Back to virtual mode */
1430 nop
1431
14321:
1433 ptabs LINK, tr0
1434 blink tr0, r63
1435
1436 .global poke_real_address_q
1437poke_real_address_q:
1438 /* Two args:
1439 r2 : real mode address to poke
1440 r3 : quadword value to write.
1441
1442 This is provided as a cheapskate way of manipulating device
1443 registers for debugging (to avoid the need to onchip_remap the debug
1444 module, and to avoid the need to onchip_remap the watchpoint
1445 controller in a way that identity maps sufficient bits to avoid the
1446 SH5-101 cut2 silicon defect).
1447
1448 This code is not performance critical
1449 */
1450
1451 add.l r2, r63, r2 /* sign extend address */
1452 getcon sr, r0 /* r0 = saved original SR */
1453 movi 1, r1
1454 shlli r1, 28, r1
1455 or r0, r1, r1 /* r0 with block bit set */
1456 putcon r1, sr /* now in critical section */
1457 movi 1, r36
1458 shlli r36, 31, r36
1459 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1460
1461 putcon r1, ssr
1462 movi .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1463 movi 1f, r37 /* virtual mode return addr */
1464 putcon r36, spc
1465
1466 synco
1467 rte
1468 nop
1469
1470.poke0: /* come here in real mode, don't touch caches!!
1471 still in critical section (sr.bl==1) */
1472 putcon r0, ssr
1473 putcon r37, spc
1474 /* Here's the actual poke. If the address is bad, all bets are now off
1475 * what will happen (handlers invoked in real-mode = bad news) */
1476 st.q r2, 0, r3
1477 synco
1478 rte /* Back to virtual mode */
1479 nop
1480
14811:
1482 ptabs LINK, tr0
1483 blink tr0, r63
1484
1485/*
1486 * --- User Access Handling Section
1487 */
1488
1489/*
1490 * User Access support. It all moved to non inlined Assembler
1491 * functions in here.
1492 *
1493 * __kernel_size_t __copy_user(void *__to, const void *__from,
1494 * __kernel_size_t __n)
1495 *
1496 * Inputs:
1497 * (r2) target address
1498 * (r3) source address
1499 * (r4) size in bytes
1500 *
1501 * Ouputs:
1502 * (*r2) target data
1503 * (r2) non-copied bytes
1504 *
1505 * If a fault occurs on the user pointer, bail out early and return the
1506 * number of bytes not copied in r2.
1507 * Strategy : for large blocks, call a real memcpy function which can
1508 * move >1 byte at a time using unaligned ld/st instructions, and can
1509 * manipulate the cache using prefetch + alloco to improve the speed
1510 * further. If a fault occurs in that function, just revert to the
1511 * byte-by-byte approach used for small blocks; this is rare so the
1512 * performance hit for that case does not matter.
1513 *
1514 * For small blocks it's not worth the overhead of setting up and calling
1515 * the memcpy routine; do the copy a byte at a time.
1516 *
1517 */
1518 .global __copy_user
1519__copy_user:
1520 pta __copy_user_byte_by_byte, tr1
1521 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1522 bge/u r0, r4, tr1
1523 pta copy_user_memcpy, tr0
1524 addi SP, -32, SP
1525 /* Save arguments in case we have to fix-up unhandled page fault */
1526 st.q SP, 0, r2
1527 st.q SP, 8, r3
1528 st.q SP, 16, r4
1529 st.q SP, 24, r35 ! r35 is callee-save
1530 /* Save LINK in a register to reduce RTS time later (otherwise
1531 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1532 ori LINK, 0, r35
1533 blink tr0, LINK
1534
1535 /* Copy completed normally if we get back here */
1536 ptabs r35, tr0
1537 ld.q SP, 24, r35
1538 /* don't restore r2-r4, pointless */
1539 /* set result=r2 to zero as the copy must have succeeded. */
1540 or r63, r63, r2
1541 addi SP, 32, SP
1542 blink tr0, r63 ! RTS
1543
1544 .global __copy_user_fixup
1545__copy_user_fixup:
1546 /* Restore stack frame */
1547 ori r35, 0, LINK
1548 ld.q SP, 24, r35
1549 ld.q SP, 16, r4
1550 ld.q SP, 8, r3
1551 ld.q SP, 0, r2
1552 addi SP, 32, SP
1553 /* Fall through to original code, in the 'same' state we entered with */
1554
1555/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1556 user address. In that rare case, the speed drop can be tolerated. */
1557__copy_user_byte_by_byte:
1558 pta ___copy_user_exit, tr1
1559 pta ___copy_user1, tr0
1560 beq/u r4, r63, tr1 /* early exit for zero length copy */
1561 sub r2, r3, r0
1562 addi r0, -1, r0
1563
1564___copy_user1:
1565 ld.b r3, 0, r5 /* Fault address 1 */
1566
1567 /* Could rewrite this to use just 1 add, but the second comes 'free'
1568 due to load latency */
1569 addi r3, 1, r3
1570 addi r4, -1, r4 /* No real fixup required */
1571___copy_user2:
1572 stx.b r3, r0, r5 /* Fault address 2 */
1573 bne r4, ZERO, tr0
1574
1575___copy_user_exit:
1576 or r4, ZERO, r2
1577 ptabs LINK, tr0
1578 blink tr0, ZERO
1579
1580/*
1581 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1582 *
1583 * Inputs:
1584 * (r2) target address
1585 * (r3) size in bytes
1586 *
1587 * Ouputs:
1588 * (*r2) zero-ed target data
1589 * (r2) non-zero-ed bytes
1590 */
1591 .global __clear_user
1592__clear_user:
1593 pta ___clear_user_exit, tr1
1594 pta ___clear_user1, tr0
1595 beq/u r3, r63, tr1
1596
1597___clear_user1:
1598 st.b r2, 0, ZERO /* Fault address */
1599 addi r2, 1, r2
1600 addi r3, -1, r3 /* No real fixup required */
1601 bne r3, ZERO, tr0
1602
1603___clear_user_exit:
1604 or r3, ZERO, r2
1605 ptabs LINK, tr0
1606 blink tr0, ZERO
1607
1608
1609/*
1610 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1611 * int __count)
1612 *
1613 * Inputs:
1614 * (r2) target address
1615 * (r3) source address
1616 * (r4) maximum size in bytes
1617 *
1618 * Ouputs:
1619 * (*r2) copied data
1620 * (r2) -EFAULT (in case of faulting)
1621 * copied data (otherwise)
1622 */
1623 .global __strncpy_from_user
1624__strncpy_from_user:
1625 pta ___strncpy_from_user1, tr0
1626 pta ___strncpy_from_user_done, tr1
1627 or r4, ZERO, r5 /* r5 = original count */
1628 beq/u r4, r63, tr1 /* early exit if r4==0 */
1629 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1630 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1631
1632___strncpy_from_user1:
1633 ld.b r3, 0, r7 /* Fault address: only in reading */
1634 st.b r2, 0, r7
1635 addi r2, 1, r2
1636 addi r3, 1, r3
1637 beq/u ZERO, r7, tr1
1638 addi r4, -1, r4 /* return real number of copied bytes */
1639 bne/l ZERO, r4, tr0
1640
1641___strncpy_from_user_done:
1642 sub r5, r4, r6 /* If done, return copied */
1643
1644___strncpy_from_user_exit:
1645 or r6, ZERO, r2
1646 ptabs LINK, tr0
1647 blink tr0, ZERO
1648
1649/*
1650 * extern long __strnlen_user(const char *__s, long __n)
1651 *
1652 * Inputs:
1653 * (r2) source address
1654 * (r3) source size in bytes
1655 *
1656 * Ouputs:
1657 * (r2) -EFAULT (in case of faulting)
1658 * string length (otherwise)
1659 */
1660 .global __strnlen_user
1661__strnlen_user:
1662 pta ___strnlen_user_set_reply, tr0
1663 pta ___strnlen_user1, tr1
1664 or ZERO, ZERO, r5 /* r5 = counter */
1665 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1666 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1667 beq r3, ZERO, tr0
1668
1669___strnlen_user1:
1670 ldx.b r2, r5, r7 /* Fault address: only in reading */
1671 addi r3, -1, r3 /* No real fixup */
1672 addi r5, 1, r5
1673 beq r3, ZERO, tr0
1674 bne r7, ZERO, tr1
1675! The line below used to be active. This meant led to a junk byte lying between each pair
1676! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1677! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1678! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1679! addi r5, 1, r5 /* Include '\0' */
1680
1681___strnlen_user_set_reply:
1682 or r5, ZERO, r6 /* If done, return counter */
1683
1684___strnlen_user_exit:
1685 or r6, ZERO, r2
1686 ptabs LINK, tr0
1687 blink tr0, ZERO
1688
1689/*
1690 * extern long __get_user_asm_?(void *val, long addr)
1691 *
1692 * Inputs:
1693 * (r2) dest address
1694 * (r3) source address (in User Space)
1695 *
1696 * Ouputs:
1697 * (r2) -EFAULT (faulting)
1698 * 0 (not faulting)
1699 */
1700 .global __get_user_asm_b
1701__get_user_asm_b:
1702 or r2, ZERO, r4
1703 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1704
1705___get_user_asm_b1:
1706 ld.b r3, 0, r5 /* r5 = data */
1707 st.b r4, 0, r5
1708 or ZERO, ZERO, r2
1709
1710___get_user_asm_b_exit:
1711 ptabs LINK, tr0
1712 blink tr0, ZERO
1713
1714
1715 .global __get_user_asm_w
1716__get_user_asm_w:
1717 or r2, ZERO, r4
1718 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1719
1720___get_user_asm_w1:
1721 ld.w r3, 0, r5 /* r5 = data */
1722 st.w r4, 0, r5
1723 or ZERO, ZERO, r2
1724
1725___get_user_asm_w_exit:
1726 ptabs LINK, tr0
1727 blink tr0, ZERO
1728
1729
1730 .global __get_user_asm_l
1731__get_user_asm_l:
1732 or r2, ZERO, r4
1733 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1734
1735___get_user_asm_l1:
1736 ld.l r3, 0, r5 /* r5 = data */
1737 st.l r4, 0, r5
1738 or ZERO, ZERO, r2
1739
1740___get_user_asm_l_exit:
1741 ptabs LINK, tr0
1742 blink tr0, ZERO
1743
1744
1745 .global __get_user_asm_q
1746__get_user_asm_q:
1747 or r2, ZERO, r4
1748 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1749
1750___get_user_asm_q1:
1751 ld.q r3, 0, r5 /* r5 = data */
1752 st.q r4, 0, r5
1753 or ZERO, ZERO, r2
1754
1755___get_user_asm_q_exit:
1756 ptabs LINK, tr0
1757 blink tr0, ZERO
1758
1759/*
1760 * extern long __put_user_asm_?(void *pval, long addr)
1761 *
1762 * Inputs:
1763 * (r2) kernel pointer to value
1764 * (r3) dest address (in User Space)
1765 *
1766 * Ouputs:
1767 * (r2) -EFAULT (faulting)
1768 * 0 (not faulting)
1769 */
1770 .global __put_user_asm_b
1771__put_user_asm_b:
1772 ld.b r2, 0, r4 /* r4 = data */
1773 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1774
1775___put_user_asm_b1:
1776 st.b r3, 0, r4
1777 or ZERO, ZERO, r2
1778
1779___put_user_asm_b_exit:
1780 ptabs LINK, tr0
1781 blink tr0, ZERO
1782
1783
1784 .global __put_user_asm_w
1785__put_user_asm_w:
1786 ld.w r2, 0, r4 /* r4 = data */
1787 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1788
1789___put_user_asm_w1:
1790 st.w r3, 0, r4
1791 or ZERO, ZERO, r2
1792
1793___put_user_asm_w_exit:
1794 ptabs LINK, tr0
1795 blink tr0, ZERO
1796
1797
1798 .global __put_user_asm_l
1799__put_user_asm_l:
1800 ld.l r2, 0, r4 /* r4 = data */
1801 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1802
1803___put_user_asm_l1:
1804 st.l r3, 0, r4
1805 or ZERO, ZERO, r2
1806
1807___put_user_asm_l_exit:
1808 ptabs LINK, tr0
1809 blink tr0, ZERO
1810
1811
1812 .global __put_user_asm_q
1813__put_user_asm_q:
1814 ld.q r2, 0, r4 /* r4 = data */
1815 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1816
1817___put_user_asm_q1:
1818 st.q r3, 0, r4
1819 or ZERO, ZERO, r2
1820
1821___put_user_asm_q_exit:
1822 ptabs LINK, tr0
1823 blink tr0, ZERO
1824
1825panic_stash_regs:
1826 /* The idea is : when we get an unhandled panic, we dump the registers
1827 to a known memory location, the just sit in a tight loop.
1828 This allows the human to look at the memory region through the GDB
1829 session (assuming the debug module's SHwy initiator isn't locked up
1830 or anything), to hopefully analyze the cause of the panic. */
1831
1832 /* On entry, former r15 (SP) is in DCR
1833 former r0 is at resvec_saved_area + 0
1834 former r1 is at resvec_saved_area + 8
1835 former tr0 is at resvec_saved_area + 32
1836 DCR is the only register whose value is lost altogether.
1837 */
1838
1839 movi 0xffffffff80000000, r0 ! phy of dump area
1840 ld.q SP, 0x000, r1 ! former r0
1841 st.q r0, 0x000, r1
1842 ld.q SP, 0x008, r1 ! former r1
1843 st.q r0, 0x008, r1
1844 st.q r0, 0x010, r2
1845 st.q r0, 0x018, r3
1846 st.q r0, 0x020, r4
1847 st.q r0, 0x028, r5
1848 st.q r0, 0x030, r6
1849 st.q r0, 0x038, r7
1850 st.q r0, 0x040, r8
1851 st.q r0, 0x048, r9
1852 st.q r0, 0x050, r10
1853 st.q r0, 0x058, r11
1854 st.q r0, 0x060, r12
1855 st.q r0, 0x068, r13
1856 st.q r0, 0x070, r14
1857 getcon dcr, r14
1858 st.q r0, 0x078, r14
1859 st.q r0, 0x080, r16
1860 st.q r0, 0x088, r17
1861 st.q r0, 0x090, r18
1862 st.q r0, 0x098, r19
1863 st.q r0, 0x0a0, r20
1864 st.q r0, 0x0a8, r21
1865 st.q r0, 0x0b0, r22
1866 st.q r0, 0x0b8, r23
1867 st.q r0, 0x0c0, r24
1868 st.q r0, 0x0c8, r25
1869 st.q r0, 0x0d0, r26
1870 st.q r0, 0x0d8, r27
1871 st.q r0, 0x0e0, r28
1872 st.q r0, 0x0e8, r29
1873 st.q r0, 0x0f0, r30
1874 st.q r0, 0x0f8, r31
1875 st.q r0, 0x100, r32
1876 st.q r0, 0x108, r33
1877 st.q r0, 0x110, r34
1878 st.q r0, 0x118, r35
1879 st.q r0, 0x120, r36
1880 st.q r0, 0x128, r37
1881 st.q r0, 0x130, r38
1882 st.q r0, 0x138, r39
1883 st.q r0, 0x140, r40
1884 st.q r0, 0x148, r41
1885 st.q r0, 0x150, r42
1886 st.q r0, 0x158, r43
1887 st.q r0, 0x160, r44
1888 st.q r0, 0x168, r45
1889 st.q r0, 0x170, r46
1890 st.q r0, 0x178, r47
1891 st.q r0, 0x180, r48
1892 st.q r0, 0x188, r49
1893 st.q r0, 0x190, r50
1894 st.q r0, 0x198, r51
1895 st.q r0, 0x1a0, r52
1896 st.q r0, 0x1a8, r53
1897 st.q r0, 0x1b0, r54
1898 st.q r0, 0x1b8, r55
1899 st.q r0, 0x1c0, r56
1900 st.q r0, 0x1c8, r57
1901 st.q r0, 0x1d0, r58
1902 st.q r0, 0x1d8, r59
1903 st.q r0, 0x1e0, r60
1904 st.q r0, 0x1e8, r61
1905 st.q r0, 0x1f0, r62
1906 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1907
1908 ld.q SP, 0x020, r1 ! former tr0
1909 st.q r0, 0x200, r1
1910 gettr tr1, r1
1911 st.q r0, 0x208, r1
1912 gettr tr2, r1
1913 st.q r0, 0x210, r1
1914 gettr tr3, r1
1915 st.q r0, 0x218, r1
1916 gettr tr4, r1
1917 st.q r0, 0x220, r1
1918 gettr tr5, r1
1919 st.q r0, 0x228, r1
1920 gettr tr6, r1
1921 st.q r0, 0x230, r1
1922 gettr tr7, r1
1923 st.q r0, 0x238, r1
1924
1925 getcon sr, r1
1926 getcon ssr, r2
1927 getcon pssr, r3
1928 getcon spc, r4
1929 getcon pspc, r5
1930 getcon intevt, r6
1931 getcon expevt, r7
1932 getcon pexpevt, r8
1933 getcon tra, r9
1934 getcon tea, r10
1935 getcon kcr0, r11
1936 getcon kcr1, r12
1937 getcon vbr, r13
1938 getcon resvec, r14
1939
1940 st.q r0, 0x240, r1
1941 st.q r0, 0x248, r2
1942 st.q r0, 0x250, r3
1943 st.q r0, 0x258, r4
1944 st.q r0, 0x260, r5
1945 st.q r0, 0x268, r6
1946 st.q r0, 0x270, r7
1947 st.q r0, 0x278, r8
1948 st.q r0, 0x280, r9
1949 st.q r0, 0x288, r10
1950 st.q r0, 0x290, r11
1951 st.q r0, 0x298, r12
1952 st.q r0, 0x2a0, r13
1953 st.q r0, 0x2a8, r14
1954
1955 getcon SPC,r2
1956 getcon SSR,r3
1957 getcon EXPEVT,r4
1958 /* Prepare to jump to C - physical address */
1959 movi panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
1960 ori r1, 1, r1
1961 ptabs r1, tr0
1962 getcon DCR, SP
1963 blink tr0, ZERO
1964 nop
1965 nop
1966 nop
1967 nop
1968
1969
1970
1971
1972/*
1973 * --- Signal Handling Section
1974 */
1975
1976/*
1977 * extern long long _sa_default_rt_restorer
1978 * extern long long _sa_default_restorer
1979 *
1980 * or, better,
1981 *
1982 * extern void _sa_default_rt_restorer(void)
1983 * extern void _sa_default_restorer(void)
1984 *
1985 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1986 * from user space. Copied into user space by signal management.
1987 * Both must be quad aligned and 2 quad long (4 instructions).
1988 *
1989 */
1990 .balign 8
1991 .global sa_default_rt_restorer
1992sa_default_rt_restorer:
1993 movi 0x10, r9
1994 shori __NR_rt_sigreturn, r9
1995 trapa r9
1996 nop
1997
1998 .balign 8
1999 .global sa_default_restorer
2000sa_default_restorer:
2001 movi 0x10, r9
2002 shori __NR_sigreturn, r9
2003 trapa r9
2004 nop
2005
2006/*
2007 * --- __ex_table Section
2008 */
2009
2010/*
2011 * User Access Exception Table.
2012 */
2013 .section __ex_table, "a"
2014
2015 .global asm_uaccess_start /* Just a marker */
2016asm_uaccess_start:
2017
2018 .long ___copy_user1, ___copy_user_exit
2019 .long ___copy_user2, ___copy_user_exit
2020 .long ___clear_user1, ___clear_user_exit
2021 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2022 .long ___strnlen_user1, ___strnlen_user_exit
2023 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2024 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2025 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2026 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2027 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2028 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2029 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2030 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2031
2032 .global asm_uaccess_end /* Just a marker */
2033asm_uaccess_end:
2034
2035
2036
2037
2038/*
2039 * --- .text.init Section
2040 */
2041
2042 .section .text.init, "ax"
2043
2044/*
2045 * void trap_init (void)
2046 *
2047 */
2048 .global trap_init
2049trap_init:
2050 addi SP, -24, SP /* Room to save r28/r29/r30 */
2051 st.q SP, 0, r28
2052 st.q SP, 8, r29
2053 st.q SP, 16, r30
2054
2055 /* Set VBR and RESVEC */
2056 movi LVBR_block, r19
2057 andi r19, -4, r19 /* reset MMUOFF + reserved */
2058 /* For RESVEC exceptions we force the MMU off, which means we need the
2059 physical address. */
2060 movi LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
2061 andi r20, -4, r20 /* reset reserved */
2062 ori r20, 1, r20 /* set MMUOFF */
2063 putcon r19, VBR
2064 putcon r20, RESVEC
2065
2066 /* Sanity check */
2067 movi LVBR_block_end, r21
2068 andi r21, -4, r21
2069 movi BLOCK_SIZE, r29 /* r29 = expected size */
2070 or r19, ZERO, r30
2071 add r19, r29, r19
2072
2073 /*
2074 * Ugly, but better loop forever now than crash afterwards.
2075 * We should print a message, but if we touch LVBR or
2076 * LRESVEC blocks we should not be surprised if we get stuck
2077 * in trap_init().
2078 */
2079 pta trap_init_loop, tr1
2080 gettr tr1, r28 /* r28 = trap_init_loop */
2081 sub r21, r30, r30 /* r30 = actual size */
2082
2083 /*
2084 * VBR/RESVEC handlers overlap by being bigger than
2085 * allowed. Very bad. Just loop forever.
2086 * (r28) panic/loop address
2087 * (r29) expected size
2088 * (r30) actual size
2089 */
2090trap_init_loop:
2091 bne r19, r21, tr1
2092
2093 /* Now that exception vectors are set up reset SR.BL */
2094 getcon SR, r22
2095 movi SR_UNBLOCK_EXC, r23
2096 and r22, r23, r22
2097 putcon r22, SR
2098
2099 addi SP, 24, SP
2100 ptabs LINK, tr0
2101 blink tr0, ZERO
2102
diff --git a/arch/sh64/kernel/fpu.c b/arch/sh64/kernel/fpu.c
deleted file mode 100644
index 8ad4ed6a6c9b..000000000000
--- a/arch/sh64/kernel/fpu.c
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/fpu.c
7 *
8 * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
9 * Copyright (C) 2002 STMicroelectronics Limited
10 * Author : Stuart Menefy
11 *
12 * Started from SH4 version:
13 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
14 *
15 */
16
17#include <linux/sched.h>
18#include <linux/signal.h>
19#include <asm/processor.h>
20#include <asm/user.h>
21#include <asm/io.h>
22
23/*
24 * Initially load the FPU with signalling NANS. This bit pattern
25 * has the property that no matter whether considered as single or as
26 * double precision, it still represents a signalling NAN.
27 */
28#define sNAN64 0xFFFFFFFFFFFFFFFFULL
29#define sNAN32 0xFFFFFFFFUL
30
31static union sh_fpu_union init_fpuregs = {
32 .hard = {
33 .fp_regs = { [0 ... 63] = sNAN32 },
34 .fpscr = FPSCR_INIT
35 }
36};
37
38inline void fpsave(struct sh_fpu_hard_struct *fpregs)
39{
40 asm volatile("fst.p %0, (0*8), fp0\n\t"
41 "fst.p %0, (1*8), fp2\n\t"
42 "fst.p %0, (2*8), fp4\n\t"
43 "fst.p %0, (3*8), fp6\n\t"
44 "fst.p %0, (4*8), fp8\n\t"
45 "fst.p %0, (5*8), fp10\n\t"
46 "fst.p %0, (6*8), fp12\n\t"
47 "fst.p %0, (7*8), fp14\n\t"
48 "fst.p %0, (8*8), fp16\n\t"
49 "fst.p %0, (9*8), fp18\n\t"
50 "fst.p %0, (10*8), fp20\n\t"
51 "fst.p %0, (11*8), fp22\n\t"
52 "fst.p %0, (12*8), fp24\n\t"
53 "fst.p %0, (13*8), fp26\n\t"
54 "fst.p %0, (14*8), fp28\n\t"
55 "fst.p %0, (15*8), fp30\n\t"
56 "fst.p %0, (16*8), fp32\n\t"
57 "fst.p %0, (17*8), fp34\n\t"
58 "fst.p %0, (18*8), fp36\n\t"
59 "fst.p %0, (19*8), fp38\n\t"
60 "fst.p %0, (20*8), fp40\n\t"
61 "fst.p %0, (21*8), fp42\n\t"
62 "fst.p %0, (22*8), fp44\n\t"
63 "fst.p %0, (23*8), fp46\n\t"
64 "fst.p %0, (24*8), fp48\n\t"
65 "fst.p %0, (25*8), fp50\n\t"
66 "fst.p %0, (26*8), fp52\n\t"
67 "fst.p %0, (27*8), fp54\n\t"
68 "fst.p %0, (28*8), fp56\n\t"
69 "fst.p %0, (29*8), fp58\n\t"
70 "fst.p %0, (30*8), fp60\n\t"
71 "fst.p %0, (31*8), fp62\n\t"
72
73 "fgetscr fr63\n\t"
74 "fst.s %0, (32*8), fr63\n\t"
75 : /* no output */
76 : "r" (fpregs)
77 : "memory");
78}
79
80
81static inline void
82fpload(struct sh_fpu_hard_struct *fpregs)
83{
84 asm volatile("fld.p %0, (0*8), fp0\n\t"
85 "fld.p %0, (1*8), fp2\n\t"
86 "fld.p %0, (2*8), fp4\n\t"
87 "fld.p %0, (3*8), fp6\n\t"
88 "fld.p %0, (4*8), fp8\n\t"
89 "fld.p %0, (5*8), fp10\n\t"
90 "fld.p %0, (6*8), fp12\n\t"
91 "fld.p %0, (7*8), fp14\n\t"
92 "fld.p %0, (8*8), fp16\n\t"
93 "fld.p %0, (9*8), fp18\n\t"
94 "fld.p %0, (10*8), fp20\n\t"
95 "fld.p %0, (11*8), fp22\n\t"
96 "fld.p %0, (12*8), fp24\n\t"
97 "fld.p %0, (13*8), fp26\n\t"
98 "fld.p %0, (14*8), fp28\n\t"
99 "fld.p %0, (15*8), fp30\n\t"
100 "fld.p %0, (16*8), fp32\n\t"
101 "fld.p %0, (17*8), fp34\n\t"
102 "fld.p %0, (18*8), fp36\n\t"
103 "fld.p %0, (19*8), fp38\n\t"
104 "fld.p %0, (20*8), fp40\n\t"
105 "fld.p %0, (21*8), fp42\n\t"
106 "fld.p %0, (22*8), fp44\n\t"
107 "fld.p %0, (23*8), fp46\n\t"
108 "fld.p %0, (24*8), fp48\n\t"
109 "fld.p %0, (25*8), fp50\n\t"
110 "fld.p %0, (26*8), fp52\n\t"
111 "fld.p %0, (27*8), fp54\n\t"
112 "fld.p %0, (28*8), fp56\n\t"
113 "fld.p %0, (29*8), fp58\n\t"
114 "fld.p %0, (30*8), fp60\n\t"
115
116 "fld.s %0, (32*8), fr63\n\t"
117 "fputscr fr63\n\t"
118
119 "fld.p %0, (31*8), fp62\n\t"
120 : /* no output */
121 : "r" (fpregs) );
122}
123
124void fpinit(struct sh_fpu_hard_struct *fpregs)
125{
126 *fpregs = init_fpuregs.hard;
127}
128
129asmlinkage void
130do_fpu_error(unsigned long ex, struct pt_regs *regs)
131{
132 struct task_struct *tsk = current;
133
134 regs->pc += 4;
135
136 tsk->thread.trap_no = 11;
137 tsk->thread.error_code = 0;
138 force_sig(SIGFPE, tsk);
139}
140
141
142asmlinkage void
143do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
144{
145 void die(const char *str, struct pt_regs *regs, long err);
146
147 if (! user_mode(regs))
148 die("FPU used in kernel", regs, ex);
149
150 regs->sr &= ~SR_FD;
151
152 if (last_task_used_math == current)
153 return;
154
155 grab_fpu();
156 if (last_task_used_math != NULL) {
157 /* Other processes fpu state, save away */
158 fpsave(&last_task_used_math->thread.fpu.hard);
159 }
160 last_task_used_math = current;
161 if (used_math()) {
162 fpload(&current->thread.fpu.hard);
163 } else {
164 /* First time FPU user. */
165 fpload(&init_fpuregs.hard);
166 set_used_math();
167 }
168 release_fpu();
169}
170
diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S
deleted file mode 100644
index 186406d3ad9c..000000000000
--- a/arch/sh64/kernel/head.S
+++ /dev/null
@@ -1,372 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/head.S
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 *
11 *
12 * benedict.gaster@superh.com: 2nd May 2002
13 * Moved definition of empty_zero_page to its own section allowing
14 * it to be placed at an absolute address known at load time.
15 *
16 * lethal@linux-sh.org: 9th May 2003
17 * Kill off GLOBAL_NAME() usage.
18 *
19 * lethal@linux-sh.org: 8th May 2004
20 * Add early SCIF console DTLB mapping.
21 */
22
23
24#include <asm/page.h>
25#include <asm/mmu_context.h>
26#include <asm/cache.h>
27#include <asm/tlb.h>
28#include <asm/processor.h>
29#include <asm/registers.h>
30#include <asm/thread_info.h>
31
32/*
33 * MMU defines: TLB boundaries.
34 */
35
36#define MMUIR_FIRST ITLB_FIXED
37#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
38#define MMUIR_STEP TLB_STEP
39
40#define MMUDR_FIRST DTLB_FIXED
41#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
42#define MMUDR_STEP TLB_STEP
43
44/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
45#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
46#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
47#endif
48
49/*
50 * MMU defines: Fixed TLBs.
51 */
52/* Deal safely with the case where the base of RAM is not 512Mb aligned */
53
54#define ALIGN_512M_MASK (0xffffffffe0000000)
55#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
56#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
57
58#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
59 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
60
61#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
62 /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
63
64#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
65 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
66#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
67 /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
68
69#ifdef CONFIG_ICACHE_DISABLED
70#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
71#else
72#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
73#endif
74#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
75
76#if defined (CONFIG_DCACHE_DISABLED)
77#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
78#elif defined (CONFIG_DCACHE_WRITE_THROUGH)
79#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
80 /* WT, invalidate */
81#elif defined (CONFIG_DCACHE_WRITE_BACK)
82#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
83 /* WB, invalidate */
84#else
85#error preprocessor flag CONFIG_DCACHE_... not recognized!
86#endif
87
88#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
89
90 .section .empty_zero_page, "aw"
91 .global empty_zero_page
92
93empty_zero_page:
94 .long 1 /* MOUNT_ROOT_RDONLY */
95 .long 0 /* RAMDISK_FLAGS */
96 .long 0x0200 /* ORIG_ROOT_DEV */
97 .long 1 /* LOADER_TYPE */
98 .long 0x00800000 /* INITRD_START */
99 .long 0x00800000 /* INITRD_SIZE */
100 .long 0
101
102 .text
103 .balign 4096,0,4096
104
105 .section .data, "aw"
106 .balign PAGE_SIZE
107
108 .section .data, "aw"
109 .balign PAGE_SIZE
110
111 .global swapper_pg_dir
112swapper_pg_dir:
113 .space PAGE_SIZE, 0
114
115 .global empty_bad_page
116empty_bad_page:
117 .space PAGE_SIZE, 0
118
119 .global empty_bad_pte_table
120empty_bad_pte_table:
121 .space PAGE_SIZE, 0
122
123 .global fpu_in_use
124fpu_in_use: .quad 0
125
126
127 .section .text.head, "ax"
128 .balign L1_CACHE_BYTES
129/*
130 * Condition at the entry of __stext:
131 * . Reset state:
132 * . SR.FD = 1 (FPU disabled)
133 * . SR.BL = 1 (Exceptions disabled)
134 * . SR.MD = 1 (Privileged Mode)
135 * . SR.MMU = 0 (MMU Disabled)
136 * . SR.CD = 0 (CTC User Visible)
137 * . SR.IMASK = Undefined (Interrupt Mask)
138 *
139 * Operations supposed to be performed by __stext:
140 * . prevent speculative fetch onto device memory while MMU is off
141 * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
142 * . first, save CPU state and set it to something harmless
143 * . any CPU detection and/or endianness settings (?)
144 * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
145 * . set initial TLB entries for cached and uncached regions
146 * (no fine granularity paging)
147 * . set initial cache state
148 * . enable MMU and caches
149 * . set CPU to a consistent state
150 * . registers (including stack pointer and current/KCR0)
151 * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
152 * at this stage. This is all to later Linux initialization steps.
153 * . initialize FPU
154 * . clear BSS
155 * . jump into start_kernel()
156 * . be prepared to hopeless start_kernel() returns.
157 *
158 */
159 .global _stext
160_stext:
161 /*
162 * Prevent speculative fetch on device memory due to
163 * uninitialized target registers.
164 */
165 ptabs/u ZERO, tr0
166 ptabs/u ZERO, tr1
167 ptabs/u ZERO, tr2
168 ptabs/u ZERO, tr3
169 ptabs/u ZERO, tr4
170 ptabs/u ZERO, tr5
171 ptabs/u ZERO, tr6
172 ptabs/u ZERO, tr7
173 synci
174
175 /*
176 * Read/Set CPU state. After this block:
177 * r29 = Initial SR
178 */
179 getcon SR, r29
180 movi SR_HARMLESS, r20
181 putcon r20, SR
182
183 /*
184 * Initialize EMI/LMI. To Be Done.
185 */
186
187 /*
188 * CPU detection and/or endianness settings (?). To Be Done.
189 * Pure PIC code here, please ! Just save state into r30.
190 * After this block:
191 * r30 = CPU type/Platform Endianness
192 */
193
194 /*
195 * Set initial TLB entries for cached and uncached regions.
196 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
197 */
198 /* Clear ITLBs */
199 pta clear_ITLB, tr1
200 movi MMUIR_FIRST, r21
201 movi MMUIR_END, r22
202clear_ITLB:
203 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
204 addi r21, MMUIR_STEP, r21
205 bne r21, r22, tr1
206
207 /* Clear DTLBs */
208 pta clear_DTLB, tr1
209 movi MMUDR_FIRST, r21
210 movi MMUDR_END, r22
211clear_DTLB:
212 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
213 addi r21, MMUDR_STEP, r21
214 bne r21, r22, tr1
215
216 /* Map one big (512Mb) page for ITLB */
217 movi MMUIR_FIRST, r21
218 movi MMUIR_TEXT_L, r22 /* PTEL first */
219 add.l r22, r63, r22 /* Sign extend */
220 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
221 movi MMUIR_TEXT_H, r22 /* PTEH last */
222 add.l r22, r63, r22 /* Sign extend */
223 putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
224
225 /* Map one big CACHED (512Mb) page for DTLB */
226 movi MMUDR_FIRST, r21
227 movi MMUDR_CACHED_L, r22 /* PTEL first */
228 add.l r22, r63, r22 /* Sign extend */
229 putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
230 movi MMUDR_CACHED_H, r22 /* PTEH last */
231 add.l r22, r63, r22 /* Sign extend */
232 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
233
234#ifdef CONFIG_EARLY_PRINTK
235 /*
236 * Setup a DTLB translation for SCIF phys.
237 */
238 addi r21, MMUDR_STEP, r21
239 movi 0x0a03, r22 /* SCIF phys */
240 shori 0x0148, r22
241 putcfg r21, 1, r22 /* PTEL first */
242 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
243 shori 0x0003, r22
244 putcfg r21, 0, r22 /* PTEH last */
245#endif
246
247 /*
248 * Set cache behaviours.
249 */
250 /* ICache */
251 movi ICCR_BASE, r21
252 movi ICCR0_INIT_VAL, r22
253 movi ICCR1_INIT_VAL, r23
254 putcfg r21, ICCR_REG0, r22
255 putcfg r21, ICCR_REG1, r23
256
257 /* OCache */
258 movi OCCR_BASE, r21
259 movi OCCR0_INIT_VAL, r22
260 movi OCCR1_INIT_VAL, r23
261 putcfg r21, OCCR_REG0, r22
262 putcfg r21, OCCR_REG1, r23
263
264
265 /*
266 * Enable Caches and MMU. Do the first non-PIC jump.
267 * Now head.S global variables, constants and externs
268 * can be used.
269 */
270 getcon SR, r21
271 movi SR_ENABLE_MMU, r22
272 or r21, r22, r21
273 putcon r21, SSR
274 movi hyperspace, r22
275 ori r22, 1, r22 /* Make it SHmedia, not required but..*/
276 putcon r22, SPC
277 synco
278 rte /* And now go into the hyperspace ... */
279hyperspace: /* ... that's the next instruction ! */
280
281 /*
282 * Set CPU to a consistent state.
283 * r31 = FPU support flag
284 * tr0/tr7 in use. Others give a chance to loop somewhere safe
285 */
286 movi start_kernel, r32
287 ori r32, 1, r32
288
289 ptabs r32, tr0 /* r32 = _start_kernel address */
290 pta/u hopeless, tr1
291 pta/u hopeless, tr2
292 pta/u hopeless, tr3
293 pta/u hopeless, tr4
294 pta/u hopeless, tr5
295 pta/u hopeless, tr6
296 pta/u hopeless, tr7
297 gettr tr1, r28 /* r28 = hopeless address */
298
299 /* Set initial stack pointer */
300 movi init_thread_union, SP
301 putcon SP, KCR0 /* Set current to init_task */
302 movi THREAD_SIZE, r22 /* Point to the end */
303 add SP, r22, SP
304
305 /*
306 * Initialize FPU.
307 * Keep FPU flag in r31. After this block:
308 * r31 = FPU flag
309 */
310 movi fpu_in_use, r31 /* Temporary */
311
312#ifdef CONFIG_SH_FPU
313 getcon SR, r21
314 movi SR_ENABLE_FPU, r22
315 and r21, r22, r22
316 putcon r22, SR /* Try to enable */
317 getcon SR, r22
318 xor r21, r22, r21
319 shlri r21, 15, r21 /* Supposedly 0/1 */
320 st.q r31, 0 , r21 /* Set fpu_in_use */
321#else
322 movi 0, r21
323 st.q r31, 0 , r21 /* Set fpu_in_use */
324#endif
325 or r21, ZERO, r31 /* Set FPU flag at last */
326
327#ifndef CONFIG_SH_NO_BSS_INIT
328/* Don't clear BSS if running on slow platforms such as an RTL simulation,
329 remote memory via SHdebug link, etc. For these the memory can be guaranteed
330 to be all zero on boot anyway. */
331 /*
332 * Clear bss
333 */
334 pta clear_quad, tr1
335 movi __bss_start, r22
336 movi _end, r23
337clear_quad:
338 st.q r22, 0, ZERO
339 addi r22, 8, r22
340 bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
341#endif
342 pta/u hopeless, tr1
343
344 /* Say bye to head.S but be prepared to wrongly get back ... */
345 blink tr0, LINK
346
347 /* If we ever get back here through LINK/tr1-tr7 */
348 pta/u hopeless, tr7
349
350hopeless:
351 /*
352 * Something's badly wrong here. Loop endlessly,
353 * there's nothing more we can do about it.
354 *
355 * Note on hopeless: it can be jumped into invariably
356 * before or after jumping into hyperspace. The only
357 * requirement is to be PIC called (PTA) before and
358 * any way (PTA/PTABS) after. According to Virtual
359 * to Physical mapping a simulator/emulator can easily
360 * tell where we came here from just looking at hopeless
361 * (PC) address.
362 *
363 * For debugging purposes:
364 * (r28) hopeless/loop address
365 * (r29) Original SR
366 * (r30) CPU type/Platform endianness
367 * (r31) FPU Support
368 * (r32) _start_kernel address
369 */
370 blink tr7, ZERO
371
372
diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c
deleted file mode 100644
index deee8bfd3270..000000000000
--- a/arch/sh64/kernel/init_task.c
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/init_task.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 */
12#include <linux/rwsem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/init_task.h>
16#include <linux/mqueue.h>
17#include <linux/fs.h>
18#include <asm/uaccess.h>
19#include <asm/pgtable.h>
20
21static struct fs_struct init_fs = INIT_FS;
22static struct files_struct init_files = INIT_FILES;
23static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
24static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
25struct mm_struct init_mm = INIT_MM(init_mm);
26
27struct pt_regs fake_swapper_regs;
28
29/*
30 * Initial thread structure.
31 *
32 * We need to make sure that this is THREAD_SIZE-byte aligned due
33 * to the way process stacks are handled. This is done by having a
34 * special "init_task" linker map entry..
35 */
36union thread_union init_thread_union
37 __attribute__((__section__(".data.init_task"))) =
38 { INIT_THREAD_INFO(init_task) };
39
40/*
41 * Initial task structure.
42 *
43 * All other task structs will be allocated on slabs in fork.c
44 */
45struct task_struct init_task = INIT_TASK(init_task);
46
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
deleted file mode 100644
index 9412b7166700..000000000000
--- a/arch/sh64/kernel/irq.c
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/irq.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 */
12
13/*
14 * IRQs are in fact implemented a bit like signal handlers for the kernel.
15 * Naturally it's not a 1:1 relation, but there are similarities.
16 */
17
18#include <linux/errno.h>
19#include <linux/kernel_stat.h>
20#include <linux/signal.h>
21#include <linux/rwsem.h>
22#include <linux/sched.h>
23#include <linux/ioport.h>
24#include <linux/interrupt.h>
25#include <linux/timex.h>
26#include <linux/slab.h>
27#include <linux/random.h>
28#include <linux/smp.h>
29#include <linux/init.h>
30#include <linux/seq_file.h>
31#include <linux/bitops.h>
32#include <asm/system.h>
33#include <asm/io.h>
34#include <asm/smp.h>
35#include <asm/pgalloc.h>
36#include <asm/delay.h>
37#include <asm/irq.h>
38#include <linux/irq.h>
39
40void ack_bad_irq(unsigned int irq)
41{
42 printk("unexpected IRQ trap at irq %02x\n", irq);
43}
44
45#if defined(CONFIG_PROC_FS)
46int show_interrupts(struct seq_file *p, void *v)
47{
48 int i = *(loff_t *) v, j;
49 struct irqaction * action;
50 unsigned long flags;
51
52 if (i == 0) {
53 seq_puts(p, " ");
54 for_each_online_cpu(j)
55 seq_printf(p, "CPU%d ",j);
56 seq_putc(p, '\n');
57 }
58
59 if (i < NR_IRQS) {
60 spin_lock_irqsave(&irq_desc[i].lock, flags);
61 action = irq_desc[i].action;
62 if (!action)
63 goto unlock;
64 seq_printf(p, "%3d: ",i);
65 seq_printf(p, "%10u ", kstat_irqs(i));
66 seq_printf(p, " %14s", irq_desc[i].chip->typename);
67 seq_printf(p, " %s", action->name);
68
69 for (action=action->next; action; action = action->next)
70 seq_printf(p, ", %s", action->name);
71 seq_putc(p, '\n');
72unlock:
73 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
74 }
75 return 0;
76}
77#endif
78
79/*
80 * do_NMI handles all Non-Maskable Interrupts.
81 */
82asmlinkage void do_NMI(unsigned long vector_num, struct pt_regs * regs)
83{
84 if (regs->sr & 0x40000000)
85 printk("unexpected NMI trap in system mode\n");
86 else
87 printk("unexpected NMI trap in user mode\n");
88
89 /* No statistics */
90}
91
92/*
93 * do_IRQ handles all normal device IRQ's.
94 */
95asmlinkage int do_IRQ(unsigned long vector_num, struct pt_regs * regs)
96{
97 struct pt_regs *old_regs = set_irq_regs(regs);
98 int irq;
99
100 irq_enter();
101
102 irq = irq_demux(vector_num);
103
104 if (irq >= 0) {
105 __do_IRQ(irq);
106 } else {
107 printk("unexpected IRQ trap at vector %03lx\n", vector_num);
108 }
109
110 irq_exit();
111
112 set_irq_regs(old_regs);
113 return 1;
114}
115
diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c
deleted file mode 100644
index 3b63a93198f2..000000000000
--- a/arch/sh64/kernel/irq_intc.c
+++ /dev/null
@@ -1,272 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/irq_intc.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 * Interrupt Controller support for SH5 INTC.
12 * Per-interrupt selective. IRLM=0 (Fixed priority) is not
13 * supported being useless without a cascaded interrupt
14 * controller.
15 *
16 */
17
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/irq.h>
21#include <linux/kernel.h>
22#include <linux/stddef.h>
23#include <linux/bitops.h> /* this includes also <asm/registers.h */
24 /* which is required to remap register */
25 /* names used into __asm__ blocks... */
26
27#include <asm/hardware.h>
28#include <asm/platform.h>
29#include <asm/page.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32
33/*
34 * Maybe the generic Peripheral block could move to a more
35 * generic include file. INTC Block will be defined here
36 * and only here to make INTC self-contained in a single
37 * file.
38 */
39#define INTC_BLOCK_OFFSET 0x01000000
40
41/* Base */
42#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
43 INTC_BLOCK_OFFSET
44
45/* Address */
46#define INTC_ICR_SET (intc_virt + 0x0)
47#define INTC_ICR_CLEAR (intc_virt + 0x8)
48#define INTC_INTPRI_0 (intc_virt + 0x10)
49#define INTC_INTSRC_0 (intc_virt + 0x50)
50#define INTC_INTSRC_1 (intc_virt + 0x58)
51#define INTC_INTREQ_0 (intc_virt + 0x60)
52#define INTC_INTREQ_1 (intc_virt + 0x68)
53#define INTC_INTENB_0 (intc_virt + 0x70)
54#define INTC_INTENB_1 (intc_virt + 0x78)
55#define INTC_INTDSB_0 (intc_virt + 0x80)
56#define INTC_INTDSB_1 (intc_virt + 0x88)
57
58#define INTC_ICR_IRLM 0x1
59#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
60#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
61
62
63/*
64 * Mapper between the vector ordinal and the IRQ number
65 * passed to kernel/device drivers.
66 */
67int intc_evt_to_irq[(0xE20/0x20)+1] = {
68 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
69 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
70 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
71 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
72 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
73 -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
74 -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
75 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
76 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
77 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
78 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
79 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
80 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
81 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
82 -1, -1 /* 0xE00 - 0xE20 */
83};
84
85/*
86 * Opposite mapper.
87 */
88static int IRQ_to_vectorN[NR_INTC_IRQS] = {
89 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
90 -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
91 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
92 -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
93 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
94 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
95 -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
96 -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
97
98};
99
100static unsigned long intc_virt;
101
102static unsigned int startup_intc_irq(unsigned int irq);
103static void shutdown_intc_irq(unsigned int irq);
104static void enable_intc_irq(unsigned int irq);
105static void disable_intc_irq(unsigned int irq);
106static void mask_and_ack_intc(unsigned int);
107static void end_intc_irq(unsigned int irq);
108
109static struct hw_interrupt_type intc_irq_type = {
110 .typename = "INTC",
111 .startup = startup_intc_irq,
112 .shutdown = shutdown_intc_irq,
113 .enable = enable_intc_irq,
114 .disable = disable_intc_irq,
115 .ack = mask_and_ack_intc,
116 .end = end_intc_irq
117};
118
119static int irlm; /* IRL mode */
120
121static unsigned int startup_intc_irq(unsigned int irq)
122{
123 enable_intc_irq(irq);
124 return 0; /* never anything pending */
125}
126
127static void shutdown_intc_irq(unsigned int irq)
128{
129 disable_intc_irq(irq);
130}
131
132static void enable_intc_irq(unsigned int irq)
133{
134 unsigned long reg;
135 unsigned long bitmask;
136
137 if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
138 printk("Trying to use straight IRL0-3 with an encoding platform.\n");
139
140 if (irq < 32) {
141 reg = INTC_INTENB_0;
142 bitmask = 1 << irq;
143 } else {
144 reg = INTC_INTENB_1;
145 bitmask = 1 << (irq - 32);
146 }
147
148 ctrl_outl(bitmask, reg);
149}
150
151static void disable_intc_irq(unsigned int irq)
152{
153 unsigned long reg;
154 unsigned long bitmask;
155
156 if (irq < 32) {
157 reg = INTC_INTDSB_0;
158 bitmask = 1 << irq;
159 } else {
160 reg = INTC_INTDSB_1;
161 bitmask = 1 << (irq - 32);
162 }
163
164 ctrl_outl(bitmask, reg);
165}
166
167static void mask_and_ack_intc(unsigned int irq)
168{
169 disable_intc_irq(irq);
170}
171
172static void end_intc_irq(unsigned int irq)
173{
174 enable_intc_irq(irq);
175}
176
177/* For future use, if we ever support IRLM=0) */
178void make_intc_irq(unsigned int irq)
179{
180 disable_irq_nosync(irq);
181 irq_desc[irq].chip = &intc_irq_type;
182 disable_intc_irq(irq);
183}
184
185#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
186int intc_irq_describe(char* p, int irq)
187{
188 if (irq < NR_INTC_IRQS)
189 return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
190 else
191 return 0;
192}
193#endif
194
195void __init init_IRQ(void)
196{
197 unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
198 unsigned long reg;
199 unsigned long data;
200 int i;
201
202 intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
203 if (!intc_virt) {
204 panic("Unable to remap INTC\n");
205 }
206
207
208 /* Set default: per-line enable/disable, priority driven ack/eoi */
209 for (i = 0; i < NR_INTC_IRQS; i++) {
210 if (platform_int_priority[i] != NO_PRIORITY) {
211 irq_desc[i].chip = &intc_irq_type;
212 }
213 }
214
215
216 /* Disable all interrupts and set all priorities to 0 to avoid trouble */
217 ctrl_outl(-1, INTC_INTDSB_0);
218 ctrl_outl(-1, INTC_INTDSB_1);
219
220 for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
221 ctrl_outl( NO_PRIORITY, reg);
222
223
224 /* Set IRLM */
225 /* If all the priorities are set to 'no priority', then
226 * assume we are using encoded mode.
227 */
228 irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
229 platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
230
231 if (irlm == NO_PRIORITY) {
232 /* IRLM = 0 */
233 reg = INTC_ICR_CLEAR;
234 i = IRQ_INTA;
235 printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
236 } else {
237 /* IRLM = 1 */
238 reg = INTC_ICR_SET;
239 i = IRQ_IRL0;
240 }
241 ctrl_outl(INTC_ICR_IRLM, reg);
242
243 /* Set interrupt priorities according to platform description */
244 for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
245 data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
246 if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
247 /* Upon the 7th, set Priority Register */
248 ctrl_outl(data, reg);
249 data = 0;
250 reg += 8;
251 }
252 }
253
254#ifdef CONFIG_SH_CAYMAN
255 {
256 extern void init_cayman_irq(void);
257
258 init_cayman_irq();
259 }
260#endif
261
262 /*
263 * And now let interrupts come in.
264 * sti() is not enough, we need to
265 * lower priority, too.
266 */
267 __asm__ __volatile__("getcon " __SR ", %0\n\t"
268 "and %0, %1, %0\n\t"
269 "putcon %0, " __SR "\n\t"
270 : "=&r" (__dummy0)
271 : "r" (__dummy1));
272}
diff --git a/arch/sh64/kernel/led.c b/arch/sh64/kernel/led.c
deleted file mode 100644
index e35d3f667fb4..000000000000
--- a/arch/sh64/kernel/led.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * arch/sh64/kernel/led.c
3 *
4 * Copyright (C) 2002 Stuart Menefy <stuart.menefy@st.com>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Flash the LEDs
10 */
11#include <linux/stddef.h>
12#include <linux/sched.h>
13
14void mach_led(int pos, int val);
15
16/* acts like an actual heart beat -- ie thump-thump-pause... */
17void heartbeat(void)
18{
19 static unsigned int cnt = 0, period = 0, dist = 0;
20
21 if (cnt == 0 || cnt == dist) {
22 mach_led(-1, 1);
23 } else if (cnt == 7 || cnt == dist + 7) {
24 mach_led(-1, 0);
25 }
26
27 if (++cnt > period) {
28 cnt = 0;
29
30 /*
31 * The hyperbolic function below modifies the heartbeat period
32 * length in dependency of the current (5min) load. It goes
33 * through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30.
34 */
35 period = ((672 << FSHIFT) / (5 * avenrun[0] +
36 (7 << FSHIFT))) + 30;
37 dist = period / 4;
38 }
39}
40
diff --git a/arch/sh64/kernel/module.c b/arch/sh64/kernel/module.c
deleted file mode 100644
index 2598f6b88b44..000000000000
--- a/arch/sh64/kernel/module.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/* Kernel module help for sh64.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; either version 2 of the License, or
6 (at your option) any later version.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16
17 Copyright 2004 SuperH (UK) Ltd
18 Author: Richard Curnow
19
20 Based on the sh version, and on code from the sh64-specific parts of
21 modutils, originally written by Richard Curnow and Ben Gaster.
22
23*/
24#include <linux/moduleloader.h>
25#include <linux/elf.h>
26#include <linux/vmalloc.h>
27#include <linux/fs.h>
28#include <linux/string.h>
29#include <linux/kernel.h>
30
31#if 0
32#define DEBUGP printk
33#else
34#define DEBUGP(fmt...)
35#endif
36
37void *module_alloc(unsigned long size)
38{
39 if (size == 0)
40 return NULL;
41 return vmalloc(size);
42}
43
44
45/* Free memory returned from module_alloc */
46void module_free(struct module *mod, void *module_region)
47{
48 vfree(module_region);
49 /* FIXME: If module_region == mod->init_region, trim exception
50 table entries. */
51}
52
53/* We don't need anything special. */
54int module_frob_arch_sections(Elf_Ehdr *hdr,
55 Elf_Shdr *sechdrs,
56 char *secstrings,
57 struct module *mod)
58{
59 return 0;
60}
61
62int apply_relocate_add(Elf32_Shdr *sechdrs,
63 const char *strtab,
64 unsigned int symindex,
65 unsigned int relsec,
66 struct module *me)
67{
68 unsigned int i;
69 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
70 Elf32_Sym *sym;
71 Elf32_Addr relocation;
72 uint32_t *location;
73 int align;
74 int is_shmedia;
75
76 DEBUGP("Applying relocate section %u to %u\n", relsec,
77 sechdrs[relsec].sh_info);
78 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
79 /* This is where to make the change */
80 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
81 + rel[i].r_offset;
82 /* This is the symbol it is referring to. Note that all
83 undefined symbols have been resolved. */
84 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
85 + ELF32_R_SYM(rel[i].r_info);
86 relocation = sym->st_value + rel[i].r_addend;
87 align = (int)location & 3;
88
89 /* For text addresses, bit2 of the st_other field indicates
90 * whether the symbol is SHmedia (1) or SHcompact (0). If
91 * SHmedia, the LSB of the symbol needs to be asserted
92 * for the CPU to be in SHmedia mode when it starts executing
93 * the branch target. */
94 is_shmedia = (sym->st_other & 4) ? 1 : 0;
95 if (is_shmedia) {
96 relocation |= 1;
97 }
98
99 switch (ELF32_R_TYPE(rel[i].r_info)) {
100 case R_SH_DIR32:
101 DEBUGP("R_SH_DIR32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
102 *location += relocation;
103 break;
104 case R_SH_REL32:
105 DEBUGP("R_SH_REL32 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
106 relocation -= (Elf32_Addr) location;
107 *location += relocation;
108 break;
109 case R_SH_IMM_LOW16:
110 DEBUGP("R_SH_IMM_LOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
111 *location = (*location & ~0x3fffc00) |
112 ((relocation & 0xffff) << 10);
113 break;
114 case R_SH_IMM_MEDLOW16:
115 DEBUGP("R_SH_IMM_MEDLOW16 @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
116 *location = (*location & ~0x3fffc00) |
117 (((relocation >> 16) & 0xffff) << 10);
118 break;
119 case R_SH_IMM_LOW16_PCREL:
120 DEBUGP("R_SH_IMM_LOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
121 relocation -= (Elf32_Addr) location;
122 *location = (*location & ~0x3fffc00) |
123 ((relocation & 0xffff) << 10);
124 break;
125 case R_SH_IMM_MEDLOW16_PCREL:
126 DEBUGP("R_SH_IMM_MEDLOW16_PCREL @%08lx = %08lx\n", (unsigned long) location, (unsigned long) relocation);
127 relocation -= (Elf32_Addr) location;
128 *location = (*location & ~0x3fffc00) |
129 (((relocation >> 16) & 0xffff) << 10);
130 break;
131 default:
132 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
133 me->name, ELF32_R_TYPE(rel[i].r_info));
134 return -ENOEXEC;
135 }
136 }
137 return 0;
138}
139
140int apply_relocate(Elf32_Shdr *sechdrs,
141 const char *strtab,
142 unsigned int symindex,
143 unsigned int relsec,
144 struct module *me)
145{
146 printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
147 me->name);
148 return -ENOEXEC;
149}
150
151int module_finalize(const Elf_Ehdr *hdr,
152 const Elf_Shdr *sechdrs,
153 struct module *me)
154{
155 return 0;
156}
157
158void module_arch_cleanup(struct module *mod)
159{
160}
161
diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c
deleted file mode 100644
index b4d9534d2b0e..000000000000
--- a/arch/sh64/kernel/pci_sh5.c
+++ /dev/null
@@ -1,536 +0,0 @@
1/*
2 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
3 * Copyright (C) 2003, 2004 Paul Mundt
4 * Copyright (C) 2004 Richard Curnow
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Support functions for the SH5 PCI hardware.
10 */
11
12#include <linux/kernel.h>
13#include <linux/rwsem.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/types.h>
21#include <asm/pci.h>
22#include <linux/irq.h>
23
24#include <asm/io.h>
25#include <asm/hardware.h>
26#include "pci_sh5.h"
27
28static unsigned long pcicr_virt;
29unsigned long pciio_virt;
30
31static void __init pci_fixup_ide_bases(struct pci_dev *d)
32{
33 int i;
34
35 /*
36 * PCI IDE controllers use non-standard I/O port decoding, respect it.
37 */
38 if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
39 return;
40 printk("PCI: IDE base address fixup for %s\n", pci_name(d));
41 for(i=0; i<4; i++) {
42 struct resource *r = &d->resource[i];
43 if ((r->start & ~0x80) == 0x374) {
44 r->start |= 2;
45 r->end = r->start;
46 }
47 }
48}
49DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
50
51char * __devinit pcibios_setup(char *str)
52{
53 return str;
54}
55
56/* Rounds a number UP to the nearest power of two. Used for
57 * sizing the PCI window.
58 */
59static u32 __init r2p2(u32 num)
60{
61 int i = 31;
62 u32 tmp = num;
63
64 if (num == 0)
65 return 0;
66
67 do {
68 if (tmp & (1 << 31))
69 break;
70 i--;
71 tmp <<= 1;
72 } while (i >= 0);
73
74 tmp = 1 << i;
75 /* If the original number isn't a power of 2, round it up */
76 if (tmp != num)
77 tmp <<= 1;
78
79 return tmp;
80}
81
82extern unsigned long long memory_start, memory_end;
83
84int __init sh5pci_init(unsigned memStart, unsigned memSize)
85{
86 u32 lsr0;
87 u32 uval;
88
89 pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR");
90 if (!pcicr_virt) {
91 panic("Unable to remap PCICR\n");
92 }
93
94 pciio_virt = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO");
95 if (!pciio_virt) {
96 panic("Unable to remap PCIIO\n");
97 }
98
99 pr_debug("Register base addres is 0x%08lx\n", pcicr_virt);
100
101 /* Clear snoop registers */
102 SH5PCI_WRITE(CSCR0, 0);
103 SH5PCI_WRITE(CSCR1, 0);
104
105 pr_debug("Wrote to reg\n");
106
107 /* Switch off interrupts */
108 SH5PCI_WRITE(INTM, 0);
109 SH5PCI_WRITE(AINTM, 0);
110 SH5PCI_WRITE(PINTM, 0);
111
112 /* Set bus active, take it out of reset */
113 uval = SH5PCI_READ(CR);
114
115 /* Set command Register */
116 SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM);
117
118 uval=SH5PCI_READ(CR);
119 pr_debug("CR is actually 0x%08x\n",uval);
120
121 /* Allow it to be a master */
122 /* NB - WE DISABLE I/O ACCESS to stop overlap */
123 /* set WAIT bit to enable stepping, an attempt to improve stability */
124 SH5PCI_WRITE_SHORT(CSR_CMD,
125 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT);
126
127 /*
128 ** Set translation mapping memory in order to convert the address
129 ** used for the main bus, to the PCI internal address.
130 */
131 SH5PCI_WRITE(MBR,0x40000000);
132
133 /* Always set the max size 512M */
134 SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
135
136 /*
137 ** I/O addresses are mapped at internal PCI specific address
138 ** as is described into the configuration bridge table.
139 ** These are changed to 0, to allow cards that have legacy
140 ** io such as vga to function correctly. We set the SH5 IOBAR to
141 ** 256K, which is a bit big as we can only have 64K of address space
142 */
143
144 SH5PCI_WRITE(IOBR,0x0);
145
146 pr_debug("PCI:Writing 0x%08x to IOBR\n",0);
147
148 /* Set up a 256K window. Totally pointless waste of address space */
149 SH5PCI_WRITE(IOBMR,0);
150 pr_debug("PCI:Writing 0x%08x to IOBMR\n",0);
151
152 /* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. Ideally,
153 * we would want to map the I/O region somewhere, but it is so big this is not
154 * that easy!
155 */
156 SH5PCI_WRITE(CSR_IBAR0,~0);
157 /* Set memory size value */
158 memSize = memory_end - memory_start;
159
160 /* Now we set up the mbars so the PCI bus can see the memory of the machine */
161 if (memSize < (1024 * 1024)) {
162 printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%x?\n", memSize);
163 return -EINVAL;
164 }
165
166 /* Set LSR 0 */
167 lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1);
168 SH5PCI_WRITE(LSR0, lsr0);
169
170 pr_debug("PCI:Writing 0x%08x to LSR0\n",lsr0);
171
172 /* Set MBAR 0 */
173 SH5PCI_WRITE(CSR_MBAR0, memory_start);
174 SH5PCI_WRITE(LAR0, memory_start);
175
176 SH5PCI_WRITE(CSR_MBAR1,0);
177 SH5PCI_WRITE(LAR1,0);
178 SH5PCI_WRITE(LSR1,0);
179
180 pr_debug("PCI:Writing 0x%08llx to CSR_MBAR0\n",memory_start);
181 pr_debug("PCI:Writing 0x%08llx to LAR0\n",memory_start);
182
183 /* Enable the PCI interrupts on the device */
184 SH5PCI_WRITE(INTM, ~0);
185 SH5PCI_WRITE(AINTM, ~0);
186 SH5PCI_WRITE(PINTM, ~0);
187
188 pr_debug("Switching on all error interrupts\n");
189
190 return(0);
191}
192
193static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
194 int size, u32 *val)
195{
196 SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
197
198 switch (size) {
199 case 1:
200 *val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
201 break;
202 case 2:
203 *val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
204 break;
205 case 4:
206 *val = SH5PCI_READ(PDR);
207 break;
208 }
209
210 return PCIBIOS_SUCCESSFUL;
211}
212
213static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
214 int size, u32 val)
215{
216 SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
217
218 switch (size) {
219 case 1:
220 SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
221 break;
222 case 2:
223 SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
224 break;
225 case 4:
226 SH5PCI_WRITE(PDR, val);
227 break;
228 }
229
230 return PCIBIOS_SUCCESSFUL;
231}
232
233static struct pci_ops pci_config_ops = {
234 .read = sh5pci_read,
235 .write = sh5pci_write,
236};
237
238/* Everything hangs off this */
239static struct pci_bus *pci_root_bus;
240
241
242static u8 __init no_swizzle(struct pci_dev *dev, u8 * pin)
243{
244 pr_debug("swizzle for dev %d on bus %d slot %d pin is %d\n",
245 dev->devfn,dev->bus->number, PCI_SLOT(dev->devfn),*pin);
246 return PCI_SLOT(dev->devfn);
247}
248
249static inline u8 bridge_swizzle(u8 pin, u8 slot)
250{
251 return (((pin-1) + slot) % 4) + 1;
252}
253
254u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp)
255{
256 if (dev->bus->number != 0) {
257 u8 pin = *pinp;
258 do {
259 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
260 /* Move up the chain of bridges. */
261 dev = dev->bus->self;
262 } while (dev->bus->self);
263 *pinp = pin;
264
265 /* The slot is the slot of the last bridge. */
266 }
267
268 return PCI_SLOT(dev->devfn);
269}
270
271/* This needs to be shunted out of here into the board specific bit */
272
273static int __init map_cayman_irq(struct pci_dev *dev, u8 slot, u8 pin)
274{
275 int result = -1;
276
277 /* The complication here is that the PCI IRQ lines from the Cayman's 2
278 5V slots get into the CPU via a different path from the IRQ lines
279 from the 3 3.3V slots. Thus, we have to detect whether the card's
280 interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
281 at the point where we cross from 5V to 3.3V is not the normal case.
282
283 The added complication is that we don't know that the 5V slots are
284 always bus 2, because a card containing a PCI-PCI bridge may be
285 plugged into a 3.3V slot, and this changes the bus numbering.
286
287 Also, the Cayman has an intermediate PCI bus that goes a custom
288 expansion board header (and to the secondary bridge). This bus has
289 never been used in practice.
290
291 The 1ary onboard PCI-PCI bridge is device 3 on bus 0
292 The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge.
293 */
294
295 struct slot_pin {
296 int slot;
297 int pin;
298 } path[4];
299 int i=0;
300
301 while (dev->bus->number > 0) {
302
303 slot = path[i].slot = PCI_SLOT(dev->devfn);
304 pin = path[i].pin = bridge_swizzle(pin, slot);
305 dev = dev->bus->self;
306 i++;
307 if (i > 3) panic("PCI path to root bus too long!\n");
308 }
309
310 slot = PCI_SLOT(dev->devfn);
311 /* This is the slot on bus 0 through which the device is eventually
312 reachable. */
313
314 /* Now work back up. */
315 if ((slot < 3) || (i == 0)) {
316 /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
317 swizzle now. */
318 result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
319 } else {
320 i--;
321 slot = path[i].slot;
322 pin = path[i].pin;
323 if (slot > 0) {
324 panic("PCI expansion bus device found - not handled!\n");
325 } else {
326 if (i > 0) {
327 /* 5V slots */
328 i--;
329 slot = path[i].slot;
330 pin = path[i].pin;
331 /* 'pin' was swizzled earlier wrt slot, don't do it again. */
332 result = IRQ_P2INTA + (pin - 1);
333 } else {
334 /* IRQ for 2ary PCI-PCI bridge : unused */
335 result = -1;
336 }
337 }
338 }
339
340 return result;
341}
342
343static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
344{
345 struct pt_regs *regs = get_irq_regs();
346 unsigned pci_int, pci_air, pci_cir, pci_aint;
347
348 pci_int = SH5PCI_READ(INT);
349 pci_cir = SH5PCI_READ(CIR);
350 pci_air = SH5PCI_READ(AIR);
351
352 if (pci_int) {
353 printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
354 printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
355 printk("PCI AIR -> 0x%x\n", pci_air);
356 printk("PCI CIR -> 0x%x\n", pci_cir);
357 SH5PCI_WRITE(INT, ~0);
358 }
359
360 pci_aint = SH5PCI_READ(AINT);
361 if (pci_aint) {
362 printk("PCI ARB INTERRUPT!\n");
363 printk("PCI AINT -> 0x%x\n", pci_aint);
364 printk("PCI AIR -> 0x%x\n", pci_air);
365 printk("PCI CIR -> 0x%x\n", pci_cir);
366 SH5PCI_WRITE(AINT, ~0);
367 }
368
369 return IRQ_HANDLED;
370}
371
372static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
373{
374 printk("SERR IRQ\n");
375
376 return IRQ_NONE;
377}
378
379static void __init
380pcibios_size_bridge(struct pci_bus *bus, struct resource *ior,
381 struct resource *memr)
382{
383 struct resource io_res, mem_res;
384 struct pci_dev *dev;
385 struct pci_dev *bridge = bus->self;
386 struct list_head *ln;
387
388 if (!bridge)
389 return; /* host bridge, nothing to do */
390
391 /* set reasonable default locations for pcibios_align_resource */
392 io_res.start = PCIBIOS_MIN_IO;
393 mem_res.start = PCIBIOS_MIN_MEM;
394
395 io_res.end = io_res.start;
396 mem_res.end = mem_res.start;
397
398 /* Collect information about how our direct children are layed out. */
399 for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
400 int i;
401 dev = pci_dev_b(ln);
402
403 /* Skip bridges for now */
404 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
405 continue;
406
407 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
408 struct resource res;
409 unsigned long size;
410
411 memcpy(&res, &dev->resource[i], sizeof(res));
412 size = res.end - res.start + 1;
413
414 if (res.flags & IORESOURCE_IO) {
415 res.start = io_res.end;
416 pcibios_align_resource(dev, &res, size, 0);
417 io_res.end = res.start + size;
418 } else if (res.flags & IORESOURCE_MEM) {
419 res.start = mem_res.end;
420 pcibios_align_resource(dev, &res, size, 0);
421 mem_res.end = res.start + size;
422 }
423 }
424 }
425
426 /* And for all of the subordinate busses. */
427 for (ln=bus->children.next; ln != &bus->children; ln=ln->next)
428 pcibios_size_bridge(pci_bus_b(ln), &io_res, &mem_res);
429
430 /* turn the ending locations into sizes (subtract start) */
431 io_res.end -= io_res.start;
432 mem_res.end -= mem_res.start;
433
434 /* Align the sizes up by bridge rules */
435 io_res.end = ALIGN(io_res.end, 4*1024) - 1;
436 mem_res.end = ALIGN(mem_res.end, 1*1024*1024) - 1;
437
438 /* Adjust the bridge's allocation requirements */
439 bridge->resource[0].end = bridge->resource[0].start + io_res.end;
440 bridge->resource[1].end = bridge->resource[1].start + mem_res.end;
441
442 bridge->resource[PCI_BRIDGE_RESOURCES].end =
443 bridge->resource[PCI_BRIDGE_RESOURCES].start + io_res.end;
444 bridge->resource[PCI_BRIDGE_RESOURCES+1].end =
445 bridge->resource[PCI_BRIDGE_RESOURCES+1].start + mem_res.end;
446
447 /* adjust parent's resource requirements */
448 if (ior) {
449 ior->end = ALIGN(ior->end, 4*1024);
450 ior->end += io_res.end;
451 }
452
453 if (memr) {
454 memr->end = ALIGN(memr->end, 1*1024*1024);
455 memr->end += mem_res.end;
456 }
457}
458
459static void __init pcibios_size_bridges(void)
460{
461 struct resource io_res, mem_res;
462
463 memset(&io_res, 0, sizeof(io_res));
464 memset(&mem_res, 0, sizeof(mem_res));
465
466 pcibios_size_bridge(pci_root_bus, &io_res, &mem_res);
467}
468
469static int __init pcibios_init(void)
470{
471 if (request_irq(IRQ_ERR, pcish5_err_irq,
472 IRQF_DISABLED, "PCI Error",NULL) < 0) {
473 printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
474 return -EINVAL;
475 }
476
477 if (request_irq(IRQ_SERR, pcish5_serr_irq,
478 IRQF_DISABLED, "PCI SERR interrupt", NULL) < 0) {
479 printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
480 return -EINVAL;
481 }
482
483 /* The pci subsystem needs to know where memory is and how much
484 * of it there is. I've simply made these globals. A better mechanism
485 * is probably needed.
486 */
487 sh5pci_init(__pa(memory_start),
488 __pa(memory_end) - __pa(memory_start));
489
490 pci_root_bus = pci_scan_bus(0, &pci_config_ops, NULL);
491 pcibios_size_bridges();
492 pci_assign_unassigned_resources();
493 pci_fixup_irqs(no_swizzle, map_cayman_irq);
494
495 return 0;
496}
497
498subsys_initcall(pcibios_init);
499
500void __devinit pcibios_fixup_bus(struct pci_bus *bus)
501{
502 struct pci_dev *dev = bus->self;
503 int i;
504
505#if 1
506 if(dev) {
507 for(i=0; i<3; i++) {
508 bus->resource[i] =
509 &dev->resource[PCI_BRIDGE_RESOURCES+i];
510 bus->resource[i]->name = bus->name;
511 }
512 bus->resource[0]->flags |= IORESOURCE_IO;
513 bus->resource[1]->flags |= IORESOURCE_MEM;
514
515 /* For now, propagate host limits to the bus;
516 * we'll adjust them later. */
517
518#if 1
519 bus->resource[0]->end = 64*1024 - 1 ;
520 bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1;
521 bus->resource[0]->start = PCIBIOS_MIN_IO;
522 bus->resource[1]->start = PCIBIOS_MIN_MEM;
523#else
524 bus->resource[0]->end = 0;
525 bus->resource[1]->end = 0;
526 bus->resource[0]->start =0;
527 bus->resource[1]->start = 0;
528#endif
529 /* Turn off downstream PF memory address range by default */
530 bus->resource[2]->start = 1024*1024;
531 bus->resource[2]->end = bus->resource[2]->start - 1;
532 }
533#endif
534
535}
536
diff --git a/arch/sh64/kernel/pci_sh5.h b/arch/sh64/kernel/pci_sh5.h
deleted file mode 100644
index c71159dd04b9..000000000000
--- a/arch/sh64/kernel/pci_sh5.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
3 *
4 * May be copied or modified under the terms of the GNU General Public
5 * License. See linux/COPYING for more information.
6 *
7 * Definitions for the SH5 PCI hardware.
8 */
9
10/* Product ID */
11#define PCISH5_PID 0x350d
12
13/* vendor ID */
14#define PCISH5_VID 0x1054
15
16/* Configuration types */
17#define ST_TYPE0 0x00 /* Configuration cycle type 0 */
18#define ST_TYPE1 0x01 /* Configuration cycle type 1 */
19
20/* VCR data */
21#define PCISH5_VCR_STATUS 0x00
22#define PCISH5_VCR_VERSION 0x08
23
24/*
25** ICR register offsets and bits
26*/
27#define PCISH5_ICR_CR 0x100 /* PCI control register values */
28#define CR_PBAM (1<<12)
29#define CR_PFCS (1<<11)
30#define CR_FTO (1<<10)
31#define CR_PFE (1<<9)
32#define CR_TBS (1<<8)
33#define CR_SPUE (1<<7)
34#define CR_BMAM (1<<6)
35#define CR_HOST (1<<5)
36#define CR_CLKEN (1<<4)
37#define CR_SOCS (1<<3)
38#define CR_IOCS (1<<2)
39#define CR_RSTCTL (1<<1)
40#define CR_CFINT (1<<0)
41#define CR_LOCK_MASK 0xa5000000
42
43#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */
44#define INT_MADIM (1<<2)
45
46#define PCISH5_ICR_LSR0 0X104 /* Local space register values */
47#define PCISH5_ICR_LSR1 0X108 /* Local space register values */
48#define PCISH5_ICR_LAR0 0x10c /* Local address register values */
49#define PCISH5_ICR_LAR1 0x110 /* Local address register values */
50#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */
51#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */
52#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */
53#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */
54#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */
55#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */
56#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */
57#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */
58#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */
59#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */
60#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */
61#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */
62#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */
63#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */
64#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */
65#define PCISH5_ICR_PDR 0x220 /* Pio data register values */
66
67/* These are configs space registers */
68#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */
69#define PCISH5_ICR_CSR_DID 0x002 /* Device id */
70#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */
71#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */
72#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */
73#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */
74#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */
75
76
77
78/* Base address of registers */
79#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
80#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000)
81/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */
82
83/* Register selection macro */
84#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x))
85/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
86
87/* Write I/O functions */
88#define SH5PCI_WRITE(reg,val) ctrl_outl((u32)(val),PCISH5_ICR_REG(reg))
89#define SH5PCI_WRITE_SHORT(reg,val) ctrl_outw((u16)(val),PCISH5_ICR_REG(reg))
90#define SH5PCI_WRITE_BYTE(reg,val) ctrl_outb((u8)(val),PCISH5_ICR_REG(reg))
91
92/* Read I/O functions */
93#define SH5PCI_READ(reg) ctrl_inl(PCISH5_ICR_REG(reg))
94#define SH5PCI_READ_SHORT(reg) ctrl_inw(PCISH5_ICR_REG(reg))
95#define SH5PCI_READ_BYTE(reg) ctrl_inb(PCISH5_ICR_REG(reg))
96
97/* Set PCI config bits */
98#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
99
100/* Set PCI command register */
101#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where)
102
103/* Size converters */
104#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
105#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)
106
107
diff --git a/arch/sh64/kernel/pcibios.c b/arch/sh64/kernel/pcibios.c
deleted file mode 100644
index 945920bc24db..000000000000
--- a/arch/sh64/kernel/pcibios.c
+++ /dev/null
@@ -1,168 +0,0 @@
1/*
2 * $Id: pcibios.c,v 1.1 2001/08/24 12:38:19 dwmw2 Exp $
3 *
4 * arch/sh/kernel/pcibios.c
5 *
6 * Copyright (C) 2002 STMicroelectronics Limited
7 * Author : David J. McKay
8 *
9 * Copyright (C) 2004 Richard Curnow, SuperH UK Limited
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 * This is GPL'd.
15 *
16 * Provided here are generic versions of:
17 * pcibios_update_resource()
18 * pcibios_align_resource()
19 * pcibios_enable_device()
20 * pcibios_set_master()
21 * pcibios_update_irq()
22 *
23 * These functions are collected here to reduce duplication of common
24 * code amongst the many platform-specific PCI support code files.
25 *
26 * Platform-specific files are expected to provide:
27 * pcibios_fixup_bus()
28 * pcibios_init()
29 * pcibios_setup()
30 * pcibios_fixup_pbus_ranges()
31 */
32
33#include <linux/kernel.h>
34#include <linux/pci.h>
35#include <linux/init.h>
36
37void
38pcibios_update_resource(struct pci_dev *dev, struct resource *root,
39 struct resource *res, int resource)
40{
41 u32 new, check;
42 int reg;
43
44 new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
45 if (resource < 6) {
46 reg = PCI_BASE_ADDRESS_0 + 4*resource;
47 } else if (resource == PCI_ROM_RESOURCE) {
48 res->flags |= IORESOURCE_ROM_ENABLE;
49 new |= PCI_ROM_ADDRESS_ENABLE;
50 reg = dev->rom_base_reg;
51 } else {
52 /* Somebody might have asked allocation of a non-standard resource */
53 return;
54 }
55
56 pci_write_config_dword(dev, reg, new);
57 pci_read_config_dword(dev, reg, &check);
58 if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
59 printk(KERN_ERR "PCI: Error while updating region "
60 "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
61 new, check);
62 }
63}
64
65/*
66 * We need to avoid collisions with `mirrored' VGA ports
67 * and other strange ISA hardware, so we always want the
68 * addresses to be allocated in the 0x000-0x0ff region
69 * modulo 0x400.
70 */
71void pcibios_align_resource(void *data, struct resource *res,
72 resource_size_t size, resource_size_t align)
73{
74 if (res->flags & IORESOURCE_IO) {
75 resource_size_t start = res->start;
76
77 if (start & 0x300) {
78 start = (start + 0x3ff) & ~0x3ff;
79 res->start = start;
80 }
81 }
82}
83
84static void pcibios_enable_bridge(struct pci_dev *dev)
85{
86 struct pci_bus *bus = dev->subordinate;
87 u16 cmd, old_cmd;
88
89 pci_read_config_word(dev, PCI_COMMAND, &cmd);
90 old_cmd = cmd;
91
92 if (bus->resource[0]->flags & IORESOURCE_IO) {
93 cmd |= PCI_COMMAND_IO;
94 }
95 if ((bus->resource[1]->flags & IORESOURCE_MEM) ||
96 (bus->resource[2]->flags & IORESOURCE_PREFETCH)) {
97 cmd |= PCI_COMMAND_MEMORY;
98 }
99
100 if (cmd != old_cmd) {
101 pci_write_config_word(dev, PCI_COMMAND, cmd);
102 }
103
104 printk("PCI bridge %s, command register -> %04x\n",
105 pci_name(dev), cmd);
106
107}
108
109
110
111int pcibios_enable_device(struct pci_dev *dev, int mask)
112{
113 u16 cmd, old_cmd;
114 int idx;
115 struct resource *r;
116
117 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
118 pcibios_enable_bridge(dev);
119 }
120
121 pci_read_config_word(dev, PCI_COMMAND, &cmd);
122 old_cmd = cmd;
123 for(idx=0; idx<6; idx++) {
124 if (!(mask & (1 << idx)))
125 continue;
126 r = &dev->resource[idx];
127 if (!r->start && r->end) {
128 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
129 return -EINVAL;
130 }
131 if (r->flags & IORESOURCE_IO)
132 cmd |= PCI_COMMAND_IO;
133 if (r->flags & IORESOURCE_MEM)
134 cmd |= PCI_COMMAND_MEMORY;
135 }
136 if (dev->resource[PCI_ROM_RESOURCE].start)
137 cmd |= PCI_COMMAND_MEMORY;
138 if (cmd != old_cmd) {
139 printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
140 pci_write_config_word(dev, PCI_COMMAND, cmd);
141 }
142 return 0;
143}
144
145/*
146 * If we set up a device for bus mastering, we need to check and set
147 * the latency timer as it may not be properly set.
148 */
149unsigned int pcibios_max_latency = 255;
150
151void pcibios_set_master(struct pci_dev *dev)
152{
153 u8 lat;
154 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
155 if (lat < 16)
156 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
157 else if (lat > pcibios_max_latency)
158 lat = pcibios_max_latency;
159 else
160 return;
161 printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
162 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
163}
164
165void __init pcibios_update_irq(struct pci_dev *dev, int irq)
166{
167 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
168}
diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c
deleted file mode 100644
index 0761af4d2a42..000000000000
--- a/arch/sh64/kernel/process.c
+++ /dev/null
@@ -1,691 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/process.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 * Started from SH3/4 version:
13 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
14 *
15 * In turn started from i386 version:
16 * Copyright (C) 1995 Linus Torvalds
17 *
18 */
19
20/*
21 * This file handles the architecture-dependent parts of process handling..
22 */
23#include <linux/mm.h>
24#include <linux/fs.h>
25#include <linux/ptrace.h>
26#include <linux/reboot.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/proc_fs.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32
33struct task_struct *last_task_used_math = NULL;
34
35static int hlt_counter = 1;
36
37#define HARD_IDLE_TIMEOUT (HZ / 3)
38
39void disable_hlt(void)
40{
41 hlt_counter++;
42}
43
44void enable_hlt(void)
45{
46 hlt_counter--;
47}
48
49static int __init nohlt_setup(char *__unused)
50{
51 hlt_counter = 1;
52 return 1;
53}
54
55static int __init hlt_setup(char *__unused)
56{
57 hlt_counter = 0;
58 return 1;
59}
60
61__setup("nohlt", nohlt_setup);
62__setup("hlt", hlt_setup);
63
64static inline void hlt(void)
65{
66 __asm__ __volatile__ ("sleep" : : : "memory");
67}
68
69/*
70 * The idle loop on a uniprocessor SH..
71 */
72void cpu_idle(void)
73{
74 /* endless idle loop with no priority at all */
75 while (1) {
76 if (hlt_counter) {
77 while (!need_resched())
78 cpu_relax();
79 } else {
80 local_irq_disable();
81 while (!need_resched()) {
82 local_irq_enable();
83 hlt();
84 local_irq_disable();
85 }
86 local_irq_enable();
87 }
88 preempt_enable_no_resched();
89 schedule();
90 preempt_disable();
91 }
92
93}
94
95void machine_restart(char * __unused)
96{
97 extern void phys_stext(void);
98
99 phys_stext();
100}
101
102void machine_halt(void)
103{
104 for (;;);
105}
106
107void machine_power_off(void)
108{
109 extern void enter_deep_standby(void);
110
111 enter_deep_standby();
112}
113
114void (*pm_power_off)(void) = machine_power_off;
115EXPORT_SYMBOL(pm_power_off);
116
117void show_regs(struct pt_regs * regs)
118{
119 unsigned long long ah, al, bh, bl, ch, cl;
120
121 printk("\n");
122
123 ah = (regs->pc) >> 32;
124 al = (regs->pc) & 0xffffffff;
125 bh = (regs->regs[18]) >> 32;
126 bl = (regs->regs[18]) & 0xffffffff;
127 ch = (regs->regs[15]) >> 32;
128 cl = (regs->regs[15]) & 0xffffffff;
129 printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
130 ah, al, bh, bl, ch, cl);
131
132 ah = (regs->sr) >> 32;
133 al = (regs->sr) & 0xffffffff;
134 asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
135 asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
136 bh = (bh) >> 32;
137 bl = (bl) & 0xffffffff;
138 asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
139 asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
140 ch = (ch) >> 32;
141 cl = (cl) & 0xffffffff;
142 printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
143 ah, al, bh, bl, ch, cl);
144
145 ah = (regs->regs[0]) >> 32;
146 al = (regs->regs[0]) & 0xffffffff;
147 bh = (regs->regs[1]) >> 32;
148 bl = (regs->regs[1]) & 0xffffffff;
149 ch = (regs->regs[2]) >> 32;
150 cl = (regs->regs[2]) & 0xffffffff;
151 printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
152 ah, al, bh, bl, ch, cl);
153
154 ah = (regs->regs[3]) >> 32;
155 al = (regs->regs[3]) & 0xffffffff;
156 bh = (regs->regs[4]) >> 32;
157 bl = (regs->regs[4]) & 0xffffffff;
158 ch = (regs->regs[5]) >> 32;
159 cl = (regs->regs[5]) & 0xffffffff;
160 printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
161 ah, al, bh, bl, ch, cl);
162
163 ah = (regs->regs[6]) >> 32;
164 al = (regs->regs[6]) & 0xffffffff;
165 bh = (regs->regs[7]) >> 32;
166 bl = (regs->regs[7]) & 0xffffffff;
167 ch = (regs->regs[8]) >> 32;
168 cl = (regs->regs[8]) & 0xffffffff;
169 printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
170 ah, al, bh, bl, ch, cl);
171
172 ah = (regs->regs[9]) >> 32;
173 al = (regs->regs[9]) & 0xffffffff;
174 bh = (regs->regs[10]) >> 32;
175 bl = (regs->regs[10]) & 0xffffffff;
176 ch = (regs->regs[11]) >> 32;
177 cl = (regs->regs[11]) & 0xffffffff;
178 printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
179 ah, al, bh, bl, ch, cl);
180
181 ah = (regs->regs[12]) >> 32;
182 al = (regs->regs[12]) & 0xffffffff;
183 bh = (regs->regs[13]) >> 32;
184 bl = (regs->regs[13]) & 0xffffffff;
185 ch = (regs->regs[14]) >> 32;
186 cl = (regs->regs[14]) & 0xffffffff;
187 printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
188 ah, al, bh, bl, ch, cl);
189
190 ah = (regs->regs[16]) >> 32;
191 al = (regs->regs[16]) & 0xffffffff;
192 bh = (regs->regs[17]) >> 32;
193 bl = (regs->regs[17]) & 0xffffffff;
194 ch = (regs->regs[19]) >> 32;
195 cl = (regs->regs[19]) & 0xffffffff;
196 printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
197 ah, al, bh, bl, ch, cl);
198
199 ah = (regs->regs[20]) >> 32;
200 al = (regs->regs[20]) & 0xffffffff;
201 bh = (regs->regs[21]) >> 32;
202 bl = (regs->regs[21]) & 0xffffffff;
203 ch = (regs->regs[22]) >> 32;
204 cl = (regs->regs[22]) & 0xffffffff;
205 printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
206 ah, al, bh, bl, ch, cl);
207
208 ah = (regs->regs[23]) >> 32;
209 al = (regs->regs[23]) & 0xffffffff;
210 bh = (regs->regs[24]) >> 32;
211 bl = (regs->regs[24]) & 0xffffffff;
212 ch = (regs->regs[25]) >> 32;
213 cl = (regs->regs[25]) & 0xffffffff;
214 printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
215 ah, al, bh, bl, ch, cl);
216
217 ah = (regs->regs[26]) >> 32;
218 al = (regs->regs[26]) & 0xffffffff;
219 bh = (regs->regs[27]) >> 32;
220 bl = (regs->regs[27]) & 0xffffffff;
221 ch = (regs->regs[28]) >> 32;
222 cl = (regs->regs[28]) & 0xffffffff;
223 printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
224 ah, al, bh, bl, ch, cl);
225
226 ah = (regs->regs[29]) >> 32;
227 al = (regs->regs[29]) & 0xffffffff;
228 bh = (regs->regs[30]) >> 32;
229 bl = (regs->regs[30]) & 0xffffffff;
230 ch = (regs->regs[31]) >> 32;
231 cl = (regs->regs[31]) & 0xffffffff;
232 printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
233 ah, al, bh, bl, ch, cl);
234
235 ah = (regs->regs[32]) >> 32;
236 al = (regs->regs[32]) & 0xffffffff;
237 bh = (regs->regs[33]) >> 32;
238 bl = (regs->regs[33]) & 0xffffffff;
239 ch = (regs->regs[34]) >> 32;
240 cl = (regs->regs[34]) & 0xffffffff;
241 printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
242 ah, al, bh, bl, ch, cl);
243
244 ah = (regs->regs[35]) >> 32;
245 al = (regs->regs[35]) & 0xffffffff;
246 bh = (regs->regs[36]) >> 32;
247 bl = (regs->regs[36]) & 0xffffffff;
248 ch = (regs->regs[37]) >> 32;
249 cl = (regs->regs[37]) & 0xffffffff;
250 printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
251 ah, al, bh, bl, ch, cl);
252
253 ah = (regs->regs[38]) >> 32;
254 al = (regs->regs[38]) & 0xffffffff;
255 bh = (regs->regs[39]) >> 32;
256 bl = (regs->regs[39]) & 0xffffffff;
257 ch = (regs->regs[40]) >> 32;
258 cl = (regs->regs[40]) & 0xffffffff;
259 printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
260 ah, al, bh, bl, ch, cl);
261
262 ah = (regs->regs[41]) >> 32;
263 al = (regs->regs[41]) & 0xffffffff;
264 bh = (regs->regs[42]) >> 32;
265 bl = (regs->regs[42]) & 0xffffffff;
266 ch = (regs->regs[43]) >> 32;
267 cl = (regs->regs[43]) & 0xffffffff;
268 printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
269 ah, al, bh, bl, ch, cl);
270
271 ah = (regs->regs[44]) >> 32;
272 al = (regs->regs[44]) & 0xffffffff;
273 bh = (regs->regs[45]) >> 32;
274 bl = (regs->regs[45]) & 0xffffffff;
275 ch = (regs->regs[46]) >> 32;
276 cl = (regs->regs[46]) & 0xffffffff;
277 printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
278 ah, al, bh, bl, ch, cl);
279
280 ah = (regs->regs[47]) >> 32;
281 al = (regs->regs[47]) & 0xffffffff;
282 bh = (regs->regs[48]) >> 32;
283 bl = (regs->regs[48]) & 0xffffffff;
284 ch = (regs->regs[49]) >> 32;
285 cl = (regs->regs[49]) & 0xffffffff;
286 printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
287 ah, al, bh, bl, ch, cl);
288
289 ah = (regs->regs[50]) >> 32;
290 al = (regs->regs[50]) & 0xffffffff;
291 bh = (regs->regs[51]) >> 32;
292 bl = (regs->regs[51]) & 0xffffffff;
293 ch = (regs->regs[52]) >> 32;
294 cl = (regs->regs[52]) & 0xffffffff;
295 printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
296 ah, al, bh, bl, ch, cl);
297
298 ah = (regs->regs[53]) >> 32;
299 al = (regs->regs[53]) & 0xffffffff;
300 bh = (regs->regs[54]) >> 32;
301 bl = (regs->regs[54]) & 0xffffffff;
302 ch = (regs->regs[55]) >> 32;
303 cl = (regs->regs[55]) & 0xffffffff;
304 printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
305 ah, al, bh, bl, ch, cl);
306
307 ah = (regs->regs[56]) >> 32;
308 al = (regs->regs[56]) & 0xffffffff;
309 bh = (regs->regs[57]) >> 32;
310 bl = (regs->regs[57]) & 0xffffffff;
311 ch = (regs->regs[58]) >> 32;
312 cl = (regs->regs[58]) & 0xffffffff;
313 printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
314 ah, al, bh, bl, ch, cl);
315
316 ah = (regs->regs[59]) >> 32;
317 al = (regs->regs[59]) & 0xffffffff;
318 bh = (regs->regs[60]) >> 32;
319 bl = (regs->regs[60]) & 0xffffffff;
320 ch = (regs->regs[61]) >> 32;
321 cl = (regs->regs[61]) & 0xffffffff;
322 printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
323 ah, al, bh, bl, ch, cl);
324
325 ah = (regs->regs[62]) >> 32;
326 al = (regs->regs[62]) & 0xffffffff;
327 bh = (regs->tregs[0]) >> 32;
328 bl = (regs->tregs[0]) & 0xffffffff;
329 ch = (regs->tregs[1]) >> 32;
330 cl = (regs->tregs[1]) & 0xffffffff;
331 printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
332 ah, al, bh, bl, ch, cl);
333
334 ah = (regs->tregs[2]) >> 32;
335 al = (regs->tregs[2]) & 0xffffffff;
336 bh = (regs->tregs[3]) >> 32;
337 bl = (regs->tregs[3]) & 0xffffffff;
338 ch = (regs->tregs[4]) >> 32;
339 cl = (regs->tregs[4]) & 0xffffffff;
340 printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
341 ah, al, bh, bl, ch, cl);
342
343 ah = (regs->tregs[5]) >> 32;
344 al = (regs->tregs[5]) & 0xffffffff;
345 bh = (regs->tregs[6]) >> 32;
346 bl = (regs->tregs[6]) & 0xffffffff;
347 ch = (regs->tregs[7]) >> 32;
348 cl = (regs->tregs[7]) & 0xffffffff;
349 printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
350 ah, al, bh, bl, ch, cl);
351
352 /*
353 * If we're in kernel mode, dump the stack too..
354 */
355 if (!user_mode(regs)) {
356 void show_stack(struct task_struct *tsk, unsigned long *sp);
357 unsigned long sp = regs->regs[15] & 0xffffffff;
358 struct task_struct *tsk = get_current();
359
360 tsk->thread.kregs = regs;
361
362 show_stack(tsk, (unsigned long *)sp);
363 }
364}
365
366struct task_struct * alloc_task_struct(void)
367{
368 /* Get task descriptor pages */
369 return (struct task_struct *)
370 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
371}
372
373void free_task_struct(struct task_struct *p)
374{
375 free_pages((unsigned long) p, get_order(THREAD_SIZE));
376}
377
378/*
379 * Create a kernel thread
380 */
381ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
382{
383 do_exit(fn(arg));
384}
385
386/*
387 * This is the mechanism for creating a new kernel thread.
388 *
389 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
390 * who haven't done an "execve()") should use this: it will work within
391 * a system call from a "real" process, but the process memory space will
392 * not be freed until both the parent and the child have exited.
393 */
394int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
395{
396 struct pt_regs regs;
397
398 memset(&regs, 0, sizeof(regs));
399 regs.regs[2] = (unsigned long)arg;
400 regs.regs[3] = (unsigned long)fn;
401
402 regs.pc = (unsigned long)kernel_thread_helper;
403 regs.sr = (1 << 30);
404
405 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
406 &regs, 0, NULL, NULL);
407}
408
409/*
410 * Free current thread data structures etc..
411 */
412void exit_thread(void)
413{
414 /* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
415
416 The SH-5 FPU save/restore approach relies on last_task_used_math
417 pointing to a live task_struct. When another task tries to use the
418 FPU for the 1st time, the FPUDIS trap handling (see
419 arch/sh64/kernel/fpu.c) will save the existing FPU state to the
420 FP regs field within last_task_used_math before re-loading the new
421 task's FPU state (or initialising it if the FPU has been used
422 before). So if last_task_used_math is stale, and its page has already been
423 re-allocated for another use, the consequences are rather grim. Unless we
424 null it here, there is no other path through which it would get safely
425 nulled. */
426
427#ifdef CONFIG_SH_FPU
428 if (last_task_used_math == current) {
429 last_task_used_math = NULL;
430 }
431#endif
432}
433
434void flush_thread(void)
435{
436
437 /* Called by fs/exec.c (flush_old_exec) to remove traces of a
438 * previously running executable. */
439#ifdef CONFIG_SH_FPU
440 if (last_task_used_math == current) {
441 last_task_used_math = NULL;
442 }
443 /* Force FPU state to be reinitialised after exec */
444 clear_used_math();
445#endif
446
447 /* if we are a kernel thread, about to change to user thread,
448 * update kreg
449 */
450 if(current->thread.kregs==&fake_swapper_regs) {
451 current->thread.kregs =
452 ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
453 current->thread.uregs = current->thread.kregs;
454 }
455}
456
457void release_thread(struct task_struct *dead_task)
458{
459 /* do nothing */
460}
461
462/* Fill in the fpu structure for a core dump.. */
463int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
464{
465#ifdef CONFIG_SH_FPU
466 int fpvalid;
467 struct task_struct *tsk = current;
468
469 fpvalid = !!tsk_used_math(tsk);
470 if (fpvalid) {
471 if (current == last_task_used_math) {
472 grab_fpu();
473 fpsave(&tsk->thread.fpu.hard);
474 release_fpu();
475 last_task_used_math = 0;
476 regs->sr |= SR_FD;
477 }
478
479 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
480 }
481
482 return fpvalid;
483#else
484 return 0; /* Task didn't use the fpu at all. */
485#endif
486}
487
488asmlinkage void ret_from_fork(void);
489
490int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
491 unsigned long unused,
492 struct task_struct *p, struct pt_regs *regs)
493{
494 struct pt_regs *childregs;
495 unsigned long long se; /* Sign extension */
496
497#ifdef CONFIG_SH_FPU
498 if(last_task_used_math == current) {
499 grab_fpu();
500 fpsave(&current->thread.fpu.hard);
501 release_fpu();
502 last_task_used_math = NULL;
503 regs->sr |= SR_FD;
504 }
505#endif
506 /* Copy from sh version */
507 childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
508
509 *childregs = *regs;
510
511 if (user_mode(regs)) {
512 childregs->regs[15] = usp;
513 p->thread.uregs = childregs;
514 } else {
515 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
516 }
517
518 childregs->regs[9] = 0; /* Set return value for child */
519 childregs->sr |= SR_FD; /* Invalidate FPU flag */
520
521 p->thread.sp = (unsigned long) childregs;
522 p->thread.pc = (unsigned long) ret_from_fork;
523
524 /*
525 * Sign extend the edited stack.
526 * Note that thread.pc and thread.pc will stay
527 * 32-bit wide and context switch must take care
528 * of NEFF sign extension.
529 */
530
531 se = childregs->regs[15];
532 se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
533 childregs->regs[15] = se;
534
535 return 0;
536}
537
538asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
539 unsigned long r4, unsigned long r5,
540 unsigned long r6, unsigned long r7,
541 struct pt_regs *pregs)
542{
543 return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
544}
545
546asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
547 unsigned long r4, unsigned long r5,
548 unsigned long r6, unsigned long r7,
549 struct pt_regs *pregs)
550{
551 if (!newsp)
552 newsp = pregs->regs[15];
553 return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
554}
555
556/*
557 * This is trivial, and on the face of it looks like it
558 * could equally well be done in user mode.
559 *
560 * Not so, for quite unobvious reasons - register pressure.
561 * In user mode vfork() cannot have a stack frame, and if
562 * done by calling the "clone()" system call directly, you
563 * do not have enough call-clobbered registers to hold all
564 * the information you need.
565 */
566asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
567 unsigned long r4, unsigned long r5,
568 unsigned long r6, unsigned long r7,
569 struct pt_regs *pregs)
570{
571 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
572}
573
574/*
575 * sys_execve() executes a new program.
576 */
577asmlinkage int sys_execve(char *ufilename, char **uargv,
578 char **uenvp, unsigned long r5,
579 unsigned long r6, unsigned long r7,
580 struct pt_regs *pregs)
581{
582 int error;
583 char *filename;
584
585 lock_kernel();
586 filename = getname((char __user *)ufilename);
587 error = PTR_ERR(filename);
588 if (IS_ERR(filename))
589 goto out;
590
591 error = do_execve(filename,
592 (char __user * __user *)uargv,
593 (char __user * __user *)uenvp,
594 pregs);
595 if (error == 0) {
596 task_lock(current);
597 current->ptrace &= ~PT_DTRACE;
598 task_unlock(current);
599 }
600 putname(filename);
601out:
602 unlock_kernel();
603 return error;
604}
605
606/*
607 * These bracket the sleeping functions..
608 */
609extern void interruptible_sleep_on(wait_queue_head_t *q);
610
611#define mid_sched ((unsigned long) interruptible_sleep_on)
612
613static int in_sh64_switch_to(unsigned long pc)
614{
615 extern char __sh64_switch_to_end;
616 /* For a sleeping task, the PC is somewhere in the middle of the function,
617 so we don't have to worry about masking the LSB off */
618 return (pc >= (unsigned long) sh64_switch_to) &&
619 (pc < (unsigned long) &__sh64_switch_to_end);
620}
621
622unsigned long get_wchan(struct task_struct *p)
623{
624 unsigned long schedule_fp;
625 unsigned long sh64_switch_to_fp;
626 unsigned long schedule_caller_pc;
627 unsigned long pc;
628
629 if (!p || p == current || p->state == TASK_RUNNING)
630 return 0;
631
632 /*
633 * The same comment as on the Alpha applies here, too ...
634 */
635 pc = thread_saved_pc(p);
636
637#ifdef CONFIG_FRAME_POINTER
638 if (in_sh64_switch_to(pc)) {
639 sh64_switch_to_fp = (long) p->thread.sp;
640 /* r14 is saved at offset 4 in the sh64_switch_to frame */
641 schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
642
643 /* and the caller of 'schedule' is (currently!) saved at offset 24
644 in the frame of schedule (from disasm) */
645 schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
646 return schedule_caller_pc;
647 }
648#endif
649 return pc;
650}
651
652/* Provide a /proc/asids file that lists out the
653 ASIDs currently associated with the processes. (If the DM.PC register is
654 examined through the debug link, this shows ASID + PC. To make use of this,
655 the PID->ASID relationship needs to be known. This is primarily for
656 debugging.)
657 */
658
659#if defined(CONFIG_SH64_PROC_ASIDS)
660static int
661asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
662{
663 int len=0;
664 struct task_struct *p;
665 read_lock(&tasklist_lock);
666 for_each_process(p) {
667 int pid = p->pid;
668 struct mm_struct *mm;
669 if (!pid) continue;
670 mm = p->mm;
671 if (mm) {
672 unsigned long asid, context;
673 context = mm->context;
674 asid = (context & 0xff);
675 len += sprintf(buf+len, "%5d : %02lx\n", pid, asid);
676 } else {
677 len += sprintf(buf+len, "%5d : (none)\n", pid);
678 }
679 }
680 read_unlock(&tasklist_lock);
681 *eof = 1;
682 return len;
683}
684
685static int __init register_proc_asids(void)
686{
687 create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
688 return 0;
689}
690__initcall(register_proc_asids);
691#endif
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
deleted file mode 100644
index 8a2d339cf760..000000000000
--- a/arch/sh64/kernel/ptrace.c
+++ /dev/null
@@ -1,332 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/ptrace.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 * Started from SH3/4 version:
12 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
13 *
14 * Original x86 implementation:
15 * By Ross Biro 1/23/92
16 * edited by Linus Torvalds
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/rwsem.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/errno.h>
27#include <linux/ptrace.h>
28#include <linux/user.h>
29#include <linux/signal.h>
30#include <linux/syscalls.h>
31
32#include <asm/io.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/system.h>
36#include <asm/processor.h>
37#include <asm/mmu_context.h>
38
39/* This mask defines the bits of the SR which the user is not allowed to
40 change, which are everything except S, Q, M, PR, SZ, FR. */
41#define SR_MASK (0xffff8cfd)
42
43/*
44 * does not yet catch signals sent when the child dies.
45 * in exit.c or in signal.c.
46 */
47
48/*
49 * This routine will get a word from the user area in the process kernel stack.
50 */
51static inline int get_stack_long(struct task_struct *task, int offset)
52{
53 unsigned char *stack;
54
55 stack = (unsigned char *)(task->thread.uregs);
56 stack += offset;
57 return (*((int *)stack));
58}
59
60static inline unsigned long
61get_fpu_long(struct task_struct *task, unsigned long addr)
62{
63 unsigned long tmp;
64 struct pt_regs *regs;
65 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
66
67 if (!tsk_used_math(task)) {
68 if (addr == offsetof(struct user_fpu_struct, fpscr)) {
69 tmp = FPSCR_INIT;
70 } else {
71 tmp = 0xffffffffUL; /* matches initial value in fpu.c */
72 }
73 return tmp;
74 }
75
76 if (last_task_used_math == task) {
77 grab_fpu();
78 fpsave(&task->thread.fpu.hard);
79 release_fpu();
80 last_task_used_math = 0;
81 regs->sr |= SR_FD;
82 }
83
84 tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
85 return tmp;
86}
87
88/*
89 * This routine will put a word into the user area in the process kernel stack.
90 */
91static inline int put_stack_long(struct task_struct *task, int offset,
92 unsigned long data)
93{
94 unsigned char *stack;
95
96 stack = (unsigned char *)(task->thread.uregs);
97 stack += offset;
98 *(unsigned long *) stack = data;
99 return 0;
100}
101
102static inline int
103put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
104{
105 struct pt_regs *regs;
106
107 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
108
109 if (!tsk_used_math(task)) {
110 fpinit(&task->thread.fpu.hard);
111 set_stopped_child_used_math(task);
112 } else if (last_task_used_math == task) {
113 grab_fpu();
114 fpsave(&task->thread.fpu.hard);
115 release_fpu();
116 last_task_used_math = 0;
117 regs->sr |= SR_FD;
118 }
119
120 ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
121 return 0;
122}
123
124
125long arch_ptrace(struct task_struct *child, long request, long addr, long data)
126{
127 int ret;
128
129 switch (request) {
130 /* when I and D space are separate, these will need to be fixed. */
131 case PTRACE_PEEKTEXT: /* read word at location addr. */
132 case PTRACE_PEEKDATA:
133 ret = generic_ptrace_peekdata(child, addr, data);
134 break;
135
136 /* read the word at location addr in the USER area. */
137 case PTRACE_PEEKUSR: {
138 unsigned long tmp;
139
140 ret = -EIO;
141 if ((addr & 3) || addr < 0)
142 break;
143
144 if (addr < sizeof(struct pt_regs))
145 tmp = get_stack_long(child, addr);
146 else if ((addr >= offsetof(struct user, fpu)) &&
147 (addr < offsetof(struct user, u_fpvalid))) {
148 tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
149 } else if (addr == offsetof(struct user, u_fpvalid)) {
150 tmp = !!tsk_used_math(child);
151 } else {
152 break;
153 }
154 ret = put_user(tmp, (unsigned long *)data);
155 break;
156 }
157
158 /* when I and D space are separate, this will have to be fixed. */
159 case PTRACE_POKETEXT: /* write the word at location addr. */
160 case PTRACE_POKEDATA:
161 ret = generic_ptrace_pokedata(child, addr, data);
162 break;
163
164 case PTRACE_POKEUSR:
165 /* write the word at location addr in the USER area. We must
166 disallow any changes to certain SR bits or u_fpvalid, since
167 this could crash the kernel or result in a security
168 loophole. */
169 ret = -EIO;
170 if ((addr & 3) || addr < 0)
171 break;
172
173 if (addr < sizeof(struct pt_regs)) {
174 /* Ignore change of top 32 bits of SR */
175 if (addr == offsetof (struct pt_regs, sr)+4)
176 {
177 ret = 0;
178 break;
179 }
180 /* If lower 32 bits of SR, ignore non-user bits */
181 if (addr == offsetof (struct pt_regs, sr))
182 {
183 long cursr = get_stack_long(child, addr);
184 data &= ~(SR_MASK);
185 data |= (cursr & SR_MASK);
186 }
187 ret = put_stack_long(child, addr, data);
188 }
189 else if ((addr >= offsetof(struct user, fpu)) &&
190 (addr < offsetof(struct user, u_fpvalid))) {
191 ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
192 }
193 break;
194
195 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
196 case PTRACE_CONT: { /* restart after signal. */
197 ret = -EIO;
198 if (!valid_signal(data))
199 break;
200 if (request == PTRACE_SYSCALL)
201 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
202 else
203 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
204 child->exit_code = data;
205 wake_up_process(child);
206 ret = 0;
207 break;
208 }
209
210/*
211 * make the child exit. Best I can do is send it a sigkill.
212 * perhaps it should be put in the status that it wants to
213 * exit.
214 */
215 case PTRACE_KILL: {
216 ret = 0;
217 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
218 break;
219 child->exit_code = SIGKILL;
220 wake_up_process(child);
221 break;
222 }
223
224 case PTRACE_SINGLESTEP: { /* set the trap flag. */
225 struct pt_regs *regs;
226
227 ret = -EIO;
228 if (!valid_signal(data))
229 break;
230 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
231 if ((child->ptrace & PT_DTRACE) == 0) {
232 /* Spurious delayed TF traps may occur */
233 child->ptrace |= PT_DTRACE;
234 }
235
236 regs = child->thread.uregs;
237
238 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
239
240 child->exit_code = data;
241 /* give it a chance to run. */
242 wake_up_process(child);
243 ret = 0;
244 break;
245 }
246
247 default:
248 ret = ptrace_request(child, request, addr, data);
249 break;
250 }
251 return ret;
252}
253
254asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
255{
256 extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
257#define WPC_DBRMODE 0x0d104008
258 static int first_call = 1;
259
260 lock_kernel();
261 if (first_call) {
262 /* Set WPC.DBRMODE to 0. This makes all debug events get
263 * delivered through RESVEC, i.e. into the handlers in entry.S.
264 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
265 * would normally be left set to 1, which makes debug events get
266 * delivered through DBRVEC, i.e. into the remote gdb's
267 * handlers. This prevents ptrace getting them, and confuses
268 * the remote gdb.) */
269 printk("DBRMODE set to 0 to permit native debugging\n");
270 poke_real_address_q(WPC_DBRMODE, 0);
271 first_call = 0;
272 }
273 unlock_kernel();
274
275 return sys_ptrace(request, pid, addr, data);
276}
277
278asmlinkage void syscall_trace(void)
279{
280 struct task_struct *tsk = current;
281
282 if (!test_thread_flag(TIF_SYSCALL_TRACE))
283 return;
284 if (!(tsk->ptrace & PT_PTRACED))
285 return;
286
287 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
288 ? 0x80 : 0));
289 /*
290 * this isn't the same as continuing with a signal, but it will do
291 * for normal use. strace only continues with a signal if the
292 * stopping signal is not SIGTRAP. -brl
293 */
294 if (tsk->exit_code) {
295 send_sig(tsk->exit_code, tsk, 1);
296 tsk->exit_code = 0;
297 }
298}
299
300/* Called with interrupts disabled */
301asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
302{
303 /* This is called after a single step exception (DEBUGSS).
304 There is no need to change the PC, as it is a post-execution
305 exception, as entry.S does not do anything to the PC for DEBUGSS.
306 We need to clear the Single Step setting in SR to avoid
307 continually stepping. */
308 local_irq_enable();
309 regs->sr &= ~SR_SSTEP;
310 force_sig(SIGTRAP, current);
311}
312
313/* Called with interrupts disabled */
314asmlinkage void do_software_break_point(unsigned long long vec,
315 struct pt_regs *regs)
316{
317 /* We need to forward step the PC, to counteract the backstep done
318 in signal.c. */
319 local_irq_enable();
320 force_sig(SIGTRAP, current);
321 regs->pc += 4;
322}
323
324/*
325 * Called by kernel/ptrace.c when detaching..
326 *
327 * Make sure single step bits etc are not set.
328 */
329void ptrace_disable(struct task_struct *child)
330{
331 /* nothing to do.. */
332}
diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c
deleted file mode 100644
index 72c16533436e..000000000000
--- a/arch/sh64/kernel/semaphore.c
+++ /dev/null
@@ -1,140 +0,0 @@
1/*
2 * Just taken from alpha implementation.
3 * This can't work well, perhaps.
4 */
5/*
6 * Generic semaphore code. Buyer beware. Do your own
7 * specific changes in <asm/semaphore-helper.h>
8 */
9
10#include <linux/errno.h>
11#include <linux/rwsem.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/init.h>
15#include <asm/semaphore.h>
16#include <asm/semaphore-helper.h>
17
18spinlock_t semaphore_wake_lock;
19
20/*
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to sleep, while the "waking" variable is
24 * incremented when the "up()" code goes to wake up waiting
25 * processes.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * waking_non_zero() (from asm/semaphore.h) must execute
33 * atomically.
34 *
35 * When __up() is called, the count was negative before
36 * incrementing it, and we need to wake up somebody.
37 *
38 * This routine adds one to the count of processes that need to
39 * wake up and exit. ALL waiting processes actually wake up but
40 * only the one that gets to the "waking" field first will gate
41 * through and acquire the semaphore. The others will go back
42 * to sleep.
43 *
44 * Note that these functions are only called when there is
45 * contention on the lock, and as such all this is the
46 * "non-critical" part of the whole semaphore business. The
47 * critical part is the inline stuff in <asm/semaphore.h>
48 * where we want to avoid any extra jumps and calls.
49 */
50void __up(struct semaphore *sem)
51{
52 wake_one_more(sem);
53 wake_up(&sem->wait);
54}
55
56/*
57 * Perform the "down" function. Return zero for semaphore acquired,
58 * return negative for signalled out of the function.
59 *
60 * If called from __down, the return is ignored and the wait loop is
61 * not interruptible. This means that a task waiting on a semaphore
62 * using "down()" cannot be killed until someone does an "up()" on
63 * the semaphore.
64 *
65 * If called from __down_interruptible, the return value gets checked
66 * upon return. If the return value is negative then the task continues
67 * with the negative value in the return register (it can be tested by
68 * the caller).
69 *
70 * Either form may be used in conjunction with "up()".
71 *
72 */
73
74#define DOWN_VAR \
75 struct task_struct *tsk = current; \
76 wait_queue_t wait; \
77 init_waitqueue_entry(&wait, tsk);
78
79#define DOWN_HEAD(task_state) \
80 \
81 \
82 tsk->state = (task_state); \
83 add_wait_queue(&sem->wait, &wait); \
84 \
85 /* \
86 * Ok, we're set up. sem->count is known to be less than zero \
87 * so we must wait. \
88 * \
89 * We can let go the lock for purposes of waiting. \
90 * We re-acquire it after awaking so as to protect \
91 * all semaphore operations. \
92 * \
93 * If "up()" is called before we call waking_non_zero() then \
94 * we will catch it right away. If it is called later then \
95 * we will have to go through a wakeup cycle to catch it. \
96 * \
97 * Multiple waiters contend for the semaphore lock to see \
98 * who gets to gate through and who has to wait some more. \
99 */ \
100 for (;;) {
101
102#define DOWN_TAIL(task_state) \
103 tsk->state = (task_state); \
104 } \
105 tsk->state = TASK_RUNNING; \
106 remove_wait_queue(&sem->wait, &wait);
107
108void __sched __down(struct semaphore * sem)
109{
110 DOWN_VAR
111 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
112 if (waking_non_zero(sem))
113 break;
114 schedule();
115 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
116}
117
118int __sched __down_interruptible(struct semaphore * sem)
119{
120 int ret = 0;
121 DOWN_VAR
122 DOWN_HEAD(TASK_INTERRUPTIBLE)
123
124 ret = waking_non_zero_interruptible(sem, tsk);
125 if (ret)
126 {
127 if (ret == 1)
128 /* ret != 0 only if we get interrupted -arca */
129 ret = 0;
130 break;
131 }
132 schedule();
133 DOWN_TAIL(TASK_INTERRUPTIBLE)
134 return ret;
135}
136
137int __down_trylock(struct semaphore * sem)
138{
139 return waking_non_zero_trylock(sem);
140}
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
deleted file mode 100644
index 2b7264c0c6f7..000000000000
--- a/arch/sh64/kernel/setup.c
+++ /dev/null
@@ -1,379 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/setup.c
7 *
8 * sh64 Arch Support
9 *
10 * This file handles the architecture-dependent parts of initialization
11 *
12 * Copyright (C) 2000, 2001 Paolo Alberelli
13 * Copyright (C) 2003, 2004 Paul Mundt
14 *
15 * benedict.gaster@superh.com: 2nd May 2002
16 * Modified to use the empty_zero_page to pass command line arguments.
17 *
18 * benedict.gaster@superh.com: 3rd May 2002
19 * Added support for ramdisk, removing statically linked romfs at the same time.
20 *
21 * lethal@linux-sh.org: 15th May 2003
22 * Added generic procfs cpuinfo reporting. Make boards just export their name.
23 *
24 * lethal@linux-sh.org: 25th May 2003
25 * Added generic get_cpu_subtype() for subtype reporting from cpu_data->type.
26 *
27 */
28#include <linux/errno.h>
29#include <linux/rwsem.h>
30#include <linux/sched.h>
31#include <linux/kernel.h>
32#include <linux/mm.h>
33#include <linux/stddef.h>
34#include <linux/unistd.h>
35#include <linux/ptrace.h>
36#include <linux/slab.h>
37#include <linux/user.h>
38#include <linux/a.out.h>
39#include <linux/screen_info.h>
40#include <linux/ioport.h>
41#include <linux/delay.h>
42#include <linux/init.h>
43#include <linux/seq_file.h>
44#include <linux/blkdev.h>
45#include <linux/bootmem.h>
46#include <linux/console.h>
47#include <linux/root_dev.h>
48#include <linux/cpu.h>
49#include <linux/initrd.h>
50#include <linux/pfn.h>
51#include <asm/processor.h>
52#include <asm/page.h>
53#include <asm/pgtable.h>
54#include <asm/platform.h>
55#include <asm/uaccess.h>
56#include <asm/system.h>
57#include <asm/io.h>
58#include <asm/sections.h>
59#include <asm/setup.h>
60#include <asm/smp.h>
61
62struct screen_info screen_info;
63
64#ifdef CONFIG_BLK_DEV_RAM
65extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
66extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
67extern int rd_image_start; /* starting block # of image */
68#endif
69
70extern int root_mountflags;
71extern char *get_system_type(void);
72extern void platform_setup(void);
73extern void platform_monitor(void);
74extern void platform_reserve(void);
75extern int sh64_cache_init(void);
76extern int sh64_tlb_init(void);
77
78#define RAMDISK_IMAGE_START_MASK 0x07FF
79#define RAMDISK_PROMPT_FLAG 0x8000
80#define RAMDISK_LOAD_FLAG 0x4000
81
82static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
83unsigned long long memory_start = CONFIG_MEMORY_START;
84unsigned long long memory_end = CONFIG_MEMORY_START + (CONFIG_MEMORY_SIZE_IN_MB * 1024 * 1024);
85
86struct sh_cpuinfo boot_cpu_data;
87
88static inline void parse_mem_cmdline (char ** cmdline_p)
89{
90 char c = ' ', *to = command_line, *from = COMMAND_LINE;
91 int len = 0;
92
93 /* Save unparsed command line copy for /proc/cmdline */
94 memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
95 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
96
97 for (;;) {
98 /*
99 * "mem=XXX[kKmM]" defines a size of memory.
100 */
101 if (c == ' ' && !memcmp(from, "mem=", 4)) {
102 if (to != command_line)
103 to--;
104 {
105 unsigned long mem_size;
106
107 mem_size = memparse(from+4, &from);
108 memory_end = memory_start + mem_size;
109 }
110 }
111 c = *(from++);
112 if (!c)
113 break;
114 if (COMMAND_LINE_SIZE <= ++len)
115 break;
116 *(to++) = c;
117 }
118 *to = '\0';
119
120 *cmdline_p = command_line;
121}
122
123static void __init sh64_cpu_type_detect(void)
124{
125 extern unsigned long long peek_real_address_q(unsigned long long addr);
126 unsigned long long cir;
127 /* Do peeks in real mode to avoid having to set up a mapping for the
128 WPC registers. On SH5-101 cut2, such a mapping would be exposed to
129 an address translation erratum which would make it hard to set up
130 correctly. */
131 cir = peek_real_address_q(0x0d000008);
132
133 if ((cir & 0xffff) == 0x5103) {
134 boot_cpu_data.type = CPU_SH5_103;
135 } else if (((cir >> 32) & 0xffff) == 0x51e2) {
136 /* CPU.VCR aliased at CIR address on SH5-101 */
137 boot_cpu_data.type = CPU_SH5_101;
138 } else {
139 boot_cpu_data.type = CPU_SH_NONE;
140 }
141}
142
143void __init setup_arch(char **cmdline_p)
144{
145 unsigned long bootmap_size, i;
146 unsigned long first_pfn, start_pfn, last_pfn, pages;
147
148#ifdef CONFIG_EARLY_PRINTK
149 extern void enable_early_printk(void);
150
151 /*
152 * Setup Early SCIF console
153 */
154 enable_early_printk();
155#endif
156
157 /*
158 * Setup TLB mappings
159 */
160 sh64_tlb_init();
161
162 /*
163 * Caches are already initialized by the time we get here, so we just
164 * fill in cpu_data info for the caches.
165 */
166 sh64_cache_init();
167
168 platform_setup();
169 platform_monitor();
170
171 sh64_cpu_type_detect();
172
173 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
174
175#ifdef CONFIG_BLK_DEV_RAM
176 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
177 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
178 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
179#endif
180
181 if (!MOUNT_ROOT_RDONLY)
182 root_mountflags &= ~MS_RDONLY;
183 init_mm.start_code = (unsigned long) _text;
184 init_mm.end_code = (unsigned long) _etext;
185 init_mm.end_data = (unsigned long) _edata;
186 init_mm.brk = (unsigned long) _end;
187
188 code_resource.start = __pa(_text);
189 code_resource.end = __pa(_etext)-1;
190 data_resource.start = __pa(_etext);
191 data_resource.end = __pa(_edata)-1;
192
193 parse_mem_cmdline(cmdline_p);
194
195 /*
196 * Find the lowest and highest page frame numbers we have available
197 */
198 first_pfn = PFN_DOWN(memory_start);
199 last_pfn = PFN_DOWN(memory_end);
200 pages = last_pfn - first_pfn;
201
202 /*
203 * Partially used pages are not usable - thus
204 * we are rounding upwards:
205 */
206 start_pfn = PFN_UP(__pa(_end));
207
208 /*
209 * Find a proper area for the bootmem bitmap. After this
210 * bootstrap step all allocations (until the page allocator
211 * is intact) must be done via bootmem_alloc().
212 */
213 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
214 first_pfn,
215 last_pfn);
216 /*
217 * Round it up.
218 */
219 bootmap_size = PFN_PHYS(PFN_UP(bootmap_size));
220
221 /*
222 * Register fully available RAM pages with the bootmem allocator.
223 */
224 free_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), PFN_PHYS(pages));
225
226 /*
227 * Reserve all kernel sections + bootmem bitmap + a guard page.
228 */
229 reserve_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn),
230 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE) - PFN_PHYS(first_pfn));
231
232 /*
233 * Reserve platform dependent sections
234 */
235 platform_reserve();
236
237#ifdef CONFIG_BLK_DEV_INITRD
238 if (LOADER_TYPE && INITRD_START) {
239 if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
240 reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
241
242 initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START;
243 initrd_end = initrd_start + INITRD_SIZE;
244 } else {
245 printk("initrd extends beyond end of memory "
246 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
247 (long) INITRD_START + INITRD_SIZE,
248 PFN_PHYS(last_pfn));
249 initrd_start = 0;
250 }
251 }
252#endif
253
254 /*
255 * Claim all RAM, ROM, and I/O resources.
256 */
257
258 /* Kernel RAM */
259 request_resource(&iomem_resource, &code_resource);
260 request_resource(&iomem_resource, &data_resource);
261
262 /* Other KRAM space */
263 for (i = 0; i < STANDARD_KRAM_RESOURCES - 2; i++)
264 request_resource(&iomem_resource,
265 &platform_parms.kram_res_p[i]);
266
267 /* XRAM space */
268 for (i = 0; i < STANDARD_XRAM_RESOURCES; i++)
269 request_resource(&iomem_resource,
270 &platform_parms.xram_res_p[i]);
271
272 /* ROM space */
273 for (i = 0; i < STANDARD_ROM_RESOURCES; i++)
274 request_resource(&iomem_resource,
275 &platform_parms.rom_res_p[i]);
276
277 /* I/O space */
278 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
279 request_resource(&ioport_resource,
280 &platform_parms.io_res_p[i]);
281
282
283#ifdef CONFIG_VT
284#if defined(CONFIG_VGA_CONSOLE)
285 conswitchp = &vga_con;
286#elif defined(CONFIG_DUMMY_CONSOLE)
287 conswitchp = &dummy_con;
288#endif
289#endif
290
291 printk("Hardware FPU: %s\n", fpu_in_use ? "enabled" : "disabled");
292
293 paging_init();
294}
295
296void __xchg_called_with_bad_pointer(void)
297{
298 printk(KERN_EMERG "xchg() called with bad pointer !\n");
299}
300
301static struct cpu cpu[1];
302
303static int __init topology_init(void)
304{
305 return register_cpu(cpu, 0);
306}
307
308subsys_initcall(topology_init);
309
310/*
311 * Get CPU information
312 */
313static const char *cpu_name[] = {
314 [CPU_SH5_101] = "SH5-101",
315 [CPU_SH5_103] = "SH5-103",
316 [CPU_SH_NONE] = "Unknown",
317};
318
319const char *get_cpu_subtype(void)
320{
321 return cpu_name[boot_cpu_data.type];
322}
323
324#ifdef CONFIG_PROC_FS
325static int show_cpuinfo(struct seq_file *m,void *v)
326{
327 unsigned int cpu = smp_processor_id();
328
329 if (!cpu)
330 seq_printf(m, "machine\t\t: %s\n", get_system_type());
331
332 seq_printf(m, "processor\t: %d\n", cpu);
333 seq_printf(m, "cpu family\t: SH-5\n");
334 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype());
335
336 seq_printf(m, "icache size\t: %dK-bytes\n",
337 (boot_cpu_data.icache.ways *
338 boot_cpu_data.icache.sets *
339 boot_cpu_data.icache.linesz) >> 10);
340 seq_printf(m, "dcache size\t: %dK-bytes\n",
341 (boot_cpu_data.dcache.ways *
342 boot_cpu_data.dcache.sets *
343 boot_cpu_data.dcache.linesz) >> 10);
344 seq_printf(m, "itlb entries\t: %d\n", boot_cpu_data.itlb.entries);
345 seq_printf(m, "dtlb entries\t: %d\n", boot_cpu_data.dtlb.entries);
346
347#define PRINT_CLOCK(name, value) \
348 seq_printf(m, name " clock\t: %d.%02dMHz\n", \
349 ((value) / 1000000), ((value) % 1000000)/10000)
350
351 PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock);
352 PRINT_CLOCK("bus", boot_cpu_data.bus_clock);
353 PRINT_CLOCK("module", boot_cpu_data.module_clock);
354
355 seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
356 (loops_per_jiffy*HZ+2500)/500000,
357 ((loops_per_jiffy*HZ+2500)/5000) % 100);
358
359 return 0;
360}
361
362static void *c_start(struct seq_file *m, loff_t *pos)
363{
364 return (void*)(*pos == 0);
365}
366static void *c_next(struct seq_file *m, void *v, loff_t *pos)
367{
368 return NULL;
369}
370static void c_stop(struct seq_file *m, void *v)
371{
372}
373struct seq_operations cpuinfo_op = {
374 .start = c_start,
375 .next = c_next,
376 .stop = c_stop,
377 .show = show_cpuinfo,
378};
379#endif /* CONFIG_PROC_FS */
diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c
deleted file mode 100644
index b1705acc8e64..000000000000
--- a/arch/sh64/kernel/sh_ksyms.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/sh_ksyms.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 *
10 */
11
12#include <linux/rwsem.h>
13#include <linux/module.h>
14#include <linux/smp.h>
15#include <linux/user.h>
16#include <linux/elfcore.h>
17#include <linux/sched.h>
18#include <linux/in6.h>
19#include <linux/interrupt.h>
20#include <linux/screen_info.h>
21
22#include <asm/semaphore.h>
23#include <asm/processor.h>
24#include <asm/uaccess.h>
25#include <asm/checksum.h>
26#include <asm/io.h>
27#include <asm/delay.h>
28#include <asm/irq.h>
29
30extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
31
32/* platform dependent support */
33EXPORT_SYMBOL(dump_fpu);
34EXPORT_SYMBOL(kernel_thread);
35
36/* Networking helper routines. */
37EXPORT_SYMBOL(csum_partial_copy_nocheck);
38
39#ifdef CONFIG_VT
40EXPORT_SYMBOL(screen_info);
41#endif
42
43EXPORT_SYMBOL(__down);
44EXPORT_SYMBOL(__down_trylock);
45EXPORT_SYMBOL(__up);
46EXPORT_SYMBOL(__put_user_asm_l);
47EXPORT_SYMBOL(__get_user_asm_l);
48EXPORT_SYMBOL(__copy_user);
49EXPORT_SYMBOL(memcpy);
50EXPORT_SYMBOL(udelay);
51EXPORT_SYMBOL(__udelay);
52EXPORT_SYMBOL(ndelay);
53EXPORT_SYMBOL(__ndelay);
54EXPORT_SYMBOL(flush_dcache_page);
55EXPORT_SYMBOL(sh64_page_clear);
56
57/* Ugh. These come in from libgcc.a at link time. */
58#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
59
60DECLARE_EXPORT(__sdivsi3);
61DECLARE_EXPORT(__muldi3);
62DECLARE_EXPORT(__udivsi3);
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
deleted file mode 100644
index 79fc48cf54c6..000000000000
--- a/arch/sh64/kernel/signal.c
+++ /dev/null
@@ -1,750 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/signal.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 * Copyright (C) 2004 Richard Curnow
11 *
12 * Started from sh version.
13 *
14 */
15#include <linux/rwsem.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/kernel.h>
20#include <linux/signal.h>
21#include <linux/errno.h>
22#include <linux/wait.h>
23#include <linux/personality.h>
24#include <linux/freezer.h>
25#include <linux/ptrace.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <asm/ucontext.h>
29#include <asm/uaccess.h>
30#include <asm/pgtable.h>
31
32
33#define REG_RET 9
34#define REG_ARG1 2
35#define REG_ARG2 3
36#define REG_ARG3 4
37#define REG_SP 15
38#define REG_PR 18
39#define REF_REG_RET regs->regs[REG_RET]
40#define REF_REG_SP regs->regs[REG_SP]
41#define DEREF_REG_PR regs->regs[REG_PR]
42
43#define DEBUG_SIG 0
44
45#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
46
47asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
48
49/*
50 * Atomically swap in the new signal mask, and wait for a signal.
51 */
52
53asmlinkage int
54sys_sigsuspend(old_sigset_t mask,
55 unsigned long r3, unsigned long r4, unsigned long r5,
56 unsigned long r6, unsigned long r7,
57 struct pt_regs * regs)
58{
59 sigset_t saveset;
60
61 mask &= _BLOCKABLE;
62 spin_lock_irq(&current->sighand->siglock);
63 saveset = current->blocked;
64 siginitset(&current->blocked, mask);
65 recalc_sigpending();
66 spin_unlock_irq(&current->sighand->siglock);
67
68 REF_REG_RET = -EINTR;
69 while (1) {
70 current->state = TASK_INTERRUPTIBLE;
71 schedule();
72 regs->pc += 4; /* because sys_sigreturn decrements the pc */
73 if (do_signal(regs, &saveset)) {
74 /* pc now points at signal handler. Need to decrement
75 it because entry.S will increment it. */
76 regs->pc -= 4;
77 return -EINTR;
78 }
79 }
80}
81
82asmlinkage int
83sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
84 unsigned long r4, unsigned long r5, unsigned long r6,
85 unsigned long r7,
86 struct pt_regs * regs)
87{
88 sigset_t saveset, newset;
89
90 /* XXX: Don't preclude handling different sized sigset_t's. */
91 if (sigsetsize != sizeof(sigset_t))
92 return -EINVAL;
93
94 if (copy_from_user(&newset, unewset, sizeof(newset)))
95 return -EFAULT;
96 sigdelsetmask(&newset, ~_BLOCKABLE);
97 spin_lock_irq(&current->sighand->siglock);
98 saveset = current->blocked;
99 current->blocked = newset;
100 recalc_sigpending();
101 spin_unlock_irq(&current->sighand->siglock);
102
103 REF_REG_RET = -EINTR;
104 while (1) {
105 current->state = TASK_INTERRUPTIBLE;
106 schedule();
107 regs->pc += 4; /* because sys_sigreturn decrements the pc */
108 if (do_signal(regs, &saveset)) {
109 /* pc now points at signal handler. Need to decrement
110 it because entry.S will increment it. */
111 regs->pc -= 4;
112 return -EINTR;
113 }
114 }
115}
116
117asmlinkage int
118sys_sigaction(int sig, const struct old_sigaction __user *act,
119 struct old_sigaction __user *oact)
120{
121 struct k_sigaction new_ka, old_ka;
122 int ret;
123
124 if (act) {
125 old_sigset_t mask;
126 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
127 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
128 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
129 return -EFAULT;
130 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
131 __get_user(mask, &act->sa_mask);
132 siginitset(&new_ka.sa.sa_mask, mask);
133 }
134
135 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
136
137 if (!ret && oact) {
138 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
139 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
140 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
141 return -EFAULT;
142 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
143 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
144 }
145
146 return ret;
147}
148
149asmlinkage int
150sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
151 unsigned long r4, unsigned long r5, unsigned long r6,
152 unsigned long r7,
153 struct pt_regs * regs)
154{
155 return do_sigaltstack(uss, uoss, REF_REG_SP);
156}
157
158
159/*
160 * Do a signal return; undo the signal stack.
161 */
162
163struct sigframe
164{
165 struct sigcontext sc;
166 unsigned long extramask[_NSIG_WORDS-1];
167 long long retcode[2];
168};
169
170struct rt_sigframe
171{
172 struct siginfo __user *pinfo;
173 void *puc;
174 struct siginfo info;
175 struct ucontext uc;
176 long long retcode[2];
177};
178
179#ifdef CONFIG_SH_FPU
180static inline int
181restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
182{
183 int err = 0;
184 int fpvalid;
185
186 err |= __get_user (fpvalid, &sc->sc_fpvalid);
187 conditional_used_math(fpvalid);
188 if (! fpvalid)
189 return err;
190
191 if (current == last_task_used_math) {
192 last_task_used_math = NULL;
193 regs->sr |= SR_FD;
194 }
195
196 err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
197 (sizeof(long long) * 32) + (sizeof(int) * 1));
198
199 return err;
200}
201
202static inline int
203setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
204{
205 int err = 0;
206 int fpvalid;
207
208 fpvalid = !!used_math();
209 err |= __put_user(fpvalid, &sc->sc_fpvalid);
210 if (! fpvalid)
211 return err;
212
213 if (current == last_task_used_math) {
214 grab_fpu();
215 fpsave(&current->thread.fpu.hard);
216 release_fpu();
217 last_task_used_math = NULL;
218 regs->sr |= SR_FD;
219 }
220
221 err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
222 (sizeof(long long) * 32) + (sizeof(int) * 1));
223 clear_used_math();
224
225 return err;
226}
227#else
228static inline int
229restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
230{}
231static inline int
232setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
233{}
234#endif
235
236static int
237restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
238{
239 unsigned int err = 0;
240 unsigned long long current_sr, new_sr;
241#define SR_MASK 0xffff8cfd
242
243#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
244
245 COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
246 COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
247 COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
248 COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
249 COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
250 COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
251 COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
252 COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
253 COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
254 COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
255 COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
256 COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
257 COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
258 COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
259 COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
260 COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
261 COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
262 COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
263
264 /* Prevent the signal handler manipulating SR in a way that can
265 crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
266 modified */
267 current_sr = regs->sr;
268 err |= __get_user(new_sr, &sc->sc_sr);
269 regs->sr &= SR_MASK;
270 regs->sr |= (new_sr & ~SR_MASK);
271
272 COPY(pc);
273
274#undef COPY
275
276 /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
277 * has been restored above.) */
278 err |= restore_sigcontext_fpu(regs, sc);
279
280 regs->syscall_nr = -1; /* disable syscall checks */
281 err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
282 return err;
283}
284
285asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
286 unsigned long r4, unsigned long r5,
287 unsigned long r6, unsigned long r7,
288 struct pt_regs * regs)
289{
290 struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
291 sigset_t set;
292 long long ret;
293
294 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
295 goto badframe;
296
297 if (__get_user(set.sig[0], &frame->sc.oldmask)
298 || (_NSIG_WORDS > 1
299 && __copy_from_user(&set.sig[1], &frame->extramask,
300 sizeof(frame->extramask))))
301 goto badframe;
302
303 sigdelsetmask(&set, ~_BLOCKABLE);
304
305 spin_lock_irq(&current->sighand->siglock);
306 current->blocked = set;
307 recalc_sigpending();
308 spin_unlock_irq(&current->sighand->siglock);
309
310 if (restore_sigcontext(regs, &frame->sc, &ret))
311 goto badframe;
312 regs->pc -= 4;
313
314 return (int) ret;
315
316badframe:
317 force_sig(SIGSEGV, current);
318 return 0;
319}
320
321asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
322 unsigned long r4, unsigned long r5,
323 unsigned long r6, unsigned long r7,
324 struct pt_regs * regs)
325{
326 struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
327 sigset_t set;
328 stack_t __user st;
329 long long ret;
330
331 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
332 goto badframe;
333
334 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
335 goto badframe;
336
337 sigdelsetmask(&set, ~_BLOCKABLE);
338 spin_lock_irq(&current->sighand->siglock);
339 current->blocked = set;
340 recalc_sigpending();
341 spin_unlock_irq(&current->sighand->siglock);
342
343 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
344 goto badframe;
345 regs->pc -= 4;
346
347 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
348 goto badframe;
349 /* It is more difficult to avoid calling this function than to
350 call it and ignore errors. */
351 do_sigaltstack(&st, NULL, REF_REG_SP);
352
353 return (int) ret;
354
355badframe:
356 force_sig(SIGSEGV, current);
357 return 0;
358}
359
360/*
361 * Set up a signal frame.
362 */
363
364static int
365setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
366 unsigned long mask)
367{
368 int err = 0;
369
370 /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
371 err |= setup_sigcontext_fpu(regs, sc);
372
373#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
374
375 COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
376 COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
377 COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
378 COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
379 COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
380 COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
381 COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
382 COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
383 COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
384 COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
385 COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
386 COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
387 COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
388 COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
389 COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
390 COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
391 COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
392 COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
393 COPY(sr); COPY(pc);
394
395#undef COPY
396
397 err |= __put_user(mask, &sc->oldmask);
398
399 return err;
400}
401
402/*
403 * Determine which stack to use..
404 */
405static inline void __user *
406get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
407{
408 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
409 sp = current->sas_ss_sp + current->sas_ss_size;
410
411 return (void __user *)((sp - frame_size) & -8ul);
412}
413
414void sa_default_restorer(void); /* See comments below */
415void sa_default_rt_restorer(void); /* See comments below */
416
417static void setup_frame(int sig, struct k_sigaction *ka,
418 sigset_t *set, struct pt_regs *regs)
419{
420 struct sigframe __user *frame;
421 int err = 0;
422 int signal;
423
424 frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
425
426 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
427 goto give_sigsegv;
428
429 signal = current_thread_info()->exec_domain
430 && current_thread_info()->exec_domain->signal_invmap
431 && sig < 32
432 ? current_thread_info()->exec_domain->signal_invmap[sig]
433 : sig;
434
435 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
436
437 /* Give up earlier as i386, in case */
438 if (err)
439 goto give_sigsegv;
440
441 if (_NSIG_WORDS > 1) {
442 err |= __copy_to_user(frame->extramask, &set->sig[1],
443 sizeof(frame->extramask)); }
444
445 /* Give up earlier as i386, in case */
446 if (err)
447 goto give_sigsegv;
448
449 /* Set up to return from userspace. If provided, use a stub
450 already in userspace. */
451 if (ka->sa.sa_flags & SA_RESTORER) {
452 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
453
454 /*
455 * On SH5 all edited pointers are subject to NEFF
456 */
457 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
458 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
459 } else {
460 /*
461 * Different approach on SH5.
462 * . Endianness independent asm code gets placed in entry.S .
463 * This is limited to four ASM instructions corresponding
464 * to two long longs in size.
465 * . err checking is done on the else branch only
466 * . flush_icache_range() is called upon __put_user() only
467 * . all edited pointers are subject to NEFF
468 * . being code, linker turns ShMedia bit on, always
469 * dereference index -1.
470 */
471 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
472 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
473 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
474
475 if (__copy_to_user(frame->retcode,
476 (unsigned long long)sa_default_restorer & (~1), 16) != 0)
477 goto give_sigsegv;
478
479 /* Cohere the trampoline with the I-cache. */
480 flush_cache_sigtramp(DEREF_REG_PR-1, DEREF_REG_PR-1+16);
481 }
482
483 /*
484 * Set up registers for signal handler.
485 * All edited pointers are subject to NEFF.
486 */
487 regs->regs[REG_SP] = (unsigned long) frame;
488 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
489 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
490 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
491
492 /* FIXME:
493 The glibc profiling support for SH-5 needs to be passed a sigcontext
494 so it can retrieve the PC. At some point during 2003 the glibc
495 support was changed to receive the sigcontext through the 2nd
496 argument, but there are still versions of libc.so in use that use
497 the 3rd argument. Until libc.so is stabilised, pass the sigcontext
498 through both 2nd and 3rd arguments.
499 */
500
501 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
502 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
503
504 regs->pc = (unsigned long) ka->sa.sa_handler;
505 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
506
507 set_fs(USER_DS);
508
509#if DEBUG_SIG
510 /* Broken %016Lx */
511 printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
512 signal,
513 current->comm, current->pid, frame,
514 regs->pc >> 32, regs->pc & 0xffffffff,
515 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
516#endif
517
518 return;
519
520give_sigsegv:
521 force_sigsegv(sig, current);
522}
523
524static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
525 sigset_t *set, struct pt_regs *regs)
526{
527 struct rt_sigframe __user *frame;
528 int err = 0;
529 int signal;
530
531 frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
532
533 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
534 goto give_sigsegv;
535
536 signal = current_thread_info()->exec_domain
537 && current_thread_info()->exec_domain->signal_invmap
538 && sig < 32
539 ? current_thread_info()->exec_domain->signal_invmap[sig]
540 : sig;
541
542 err |= __put_user(&frame->info, &frame->pinfo);
543 err |= __put_user(&frame->uc, &frame->puc);
544 err |= copy_siginfo_to_user(&frame->info, info);
545
546 /* Give up earlier as i386, in case */
547 if (err)
548 goto give_sigsegv;
549
550 /* Create the ucontext. */
551 err |= __put_user(0, &frame->uc.uc_flags);
552 err |= __put_user(0, &frame->uc.uc_link);
553 err |= __put_user((void *)current->sas_ss_sp,
554 &frame->uc.uc_stack.ss_sp);
555 err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
556 &frame->uc.uc_stack.ss_flags);
557 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
558 err |= setup_sigcontext(&frame->uc.uc_mcontext,
559 regs, set->sig[0]);
560 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
561
562 /* Give up earlier as i386, in case */
563 if (err)
564 goto give_sigsegv;
565
566 /* Set up to return from userspace. If provided, use a stub
567 already in userspace. */
568 if (ka->sa.sa_flags & SA_RESTORER) {
569 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
570
571 /*
572 * On SH5 all edited pointers are subject to NEFF
573 */
574 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
575 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
576 } else {
577 /*
578 * Different approach on SH5.
579 * . Endianness independent asm code gets placed in entry.S .
580 * This is limited to four ASM instructions corresponding
581 * to two long longs in size.
582 * . err checking is done on the else branch only
583 * . flush_icache_range() is called upon __put_user() only
584 * . all edited pointers are subject to NEFF
585 * . being code, linker turns ShMedia bit on, always
586 * dereference index -1.
587 */
588
589 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
590 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
591 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
592
593 if (__copy_to_user(frame->retcode,
594 (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
595 goto give_sigsegv;
596
597 flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
598 }
599
600 /*
601 * Set up registers for signal handler.
602 * All edited pointers are subject to NEFF.
603 */
604 regs->regs[REG_SP] = (unsigned long) frame;
605 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
606 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
607 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
608 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
609 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
610 regs->pc = (unsigned long) ka->sa.sa_handler;
611 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
612
613 set_fs(USER_DS);
614
615#if DEBUG_SIG
616 /* Broken %016Lx */
617 printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
618 signal,
619 current->comm, current->pid, frame,
620 regs->pc >> 32, regs->pc & 0xffffffff,
621 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
622#endif
623
624 return;
625
626give_sigsegv:
627 force_sigsegv(sig, current);
628}
629
630/*
631 * OK, we're invoking a handler
632 */
633
634static void
635handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
636 sigset_t *oldset, struct pt_regs * regs)
637{
638 /* Are we from a system call? */
639 if (regs->syscall_nr >= 0) {
640 /* If so, check system call restarting.. */
641 switch (regs->regs[REG_RET]) {
642 case -ERESTART_RESTARTBLOCK:
643 case -ERESTARTNOHAND:
644 regs->regs[REG_RET] = -EINTR;
645 break;
646
647 case -ERESTARTSYS:
648 if (!(ka->sa.sa_flags & SA_RESTART)) {
649 regs->regs[REG_RET] = -EINTR;
650 break;
651 }
652 /* fallthrough */
653 case -ERESTARTNOINTR:
654 /* Decode syscall # */
655 regs->regs[REG_RET] = regs->syscall_nr;
656 regs->pc -= 4;
657 }
658 }
659
660 /* Set up the stack frame */
661 if (ka->sa.sa_flags & SA_SIGINFO)
662 setup_rt_frame(sig, ka, info, oldset, regs);
663 else
664 setup_frame(sig, ka, oldset, regs);
665
666 spin_lock_irq(&current->sighand->siglock);
667 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
668 if (!(ka->sa.sa_flags & SA_NODEFER))
669 sigaddset(&current->blocked,sig);
670 recalc_sigpending();
671 spin_unlock_irq(&current->sighand->siglock);
672}
673
674/*
675 * Note that 'init' is a special process: it doesn't get signals it doesn't
676 * want to handle. Thus you cannot kill init even with a SIGKILL even by
677 * mistake.
678 *
679 * Note that we go through the signals twice: once to check the signals that
680 * the kernel can handle, and then we build all the user-level signal handling
681 * stack-frames in one go after that.
682 */
683int do_signal(struct pt_regs *regs, sigset_t *oldset)
684{
685 siginfo_t info;
686 int signr;
687 struct k_sigaction ka;
688
689 /*
690 * We want the common case to go fast, which
691 * is why we may in certain cases get here from
692 * kernel mode. Just return without doing anything
693 * if so.
694 */
695 if (!user_mode(regs))
696 return 1;
697
698 if (try_to_freeze())
699 goto no_signal;
700
701 if (test_thread_flag(TIF_RESTORE_SIGMASK))
702 oldset = &current->saved_sigmask;
703 else if (!oldset)
704 oldset = &current->blocked;
705
706 signr = get_signal_to_deliver(&info, &ka, regs, 0);
707
708 if (signr > 0) {
709 /* Whee! Actually deliver the signal. */
710 handle_signal(signr, &info, &ka, oldset, regs);
711
712 /*
713 * If a signal was successfully delivered, the saved sigmask
714 * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
715 * flag.
716 */
717 if (test_thread_flag(TIF_RESTORE_SIGMASK))
718 clear_thread_flag(TIF_RESTORE_SIGMASK);
719
720 return 1;
721 }
722
723no_signal:
724 /* Did we come from a system call? */
725 if (regs->syscall_nr >= 0) {
726 /* Restart the system call - no handlers present */
727 switch (regs->regs[REG_RET]) {
728 case -ERESTARTNOHAND:
729 case -ERESTARTSYS:
730 case -ERESTARTNOINTR:
731 /* Decode Syscall # */
732 regs->regs[REG_RET] = regs->syscall_nr;
733 regs->pc -= 4;
734 break;
735
736 case -ERESTART_RESTARTBLOCK:
737 regs->regs[REG_RET] = __NR_restart_syscall;
738 regs->pc -= 4;
739 break;
740 }
741 }
742
743 /* No signal to deliver -- put the saved sigmask back */
744 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
745 clear_thread_flag(TIF_RESTORE_SIGMASK);
746 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
747 }
748
749 return 0;
750}
diff --git a/arch/sh64/kernel/switchto.S b/arch/sh64/kernel/switchto.S
deleted file mode 100644
index 45b2d90eed7d..000000000000
--- a/arch/sh64/kernel/switchto.S
+++ /dev/null
@@ -1,198 +0,0 @@
1/*
2 * arch/sh64/kernel/switchto.S
3 *
4 * sh64 context switch
5 *
6 * Copyright (C) 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11*/
12
13 .section .text..SHmedia32,"ax"
14 .little
15
16 .balign 32
17
18 .type sh64_switch_to,@function
19 .global sh64_switch_to
20 .global __sh64_switch_to_end
21sh64_switch_to:
22
23/* Incoming args
24 r2 - prev
25 r3 - &prev->thread
26 r4 - next
27 r5 - &next->thread
28
29 Outgoing results
30 r2 - last (=prev) : this just stays in r2 throughout
31
32 Want to create a full (struct pt_regs) on the stack to allow backtracing
33 functions to work. However, we only need to populate the callee-save
34 register slots in this structure; since we're a function our ancestors must
35 have themselves preserved all caller saved state in the stack. This saves
36 some wasted effort since we won't need to look at the values.
37
38 In particular, all caller-save registers are immediately available for
39 scratch use.
40
41*/
42
43#define FRAME_SIZE (76*8 + 8)
44
45 movi FRAME_SIZE, r0
46 sub.l r15, r0, r15
47 ! Do normal-style register save to support backtrace
48
49 st.l r15, 0, r18 ! save link reg
50 st.l r15, 4, r14 ! save fp
51 add.l r15, r63, r14 ! setup frame pointer
52
53 ! hopefully this looks normal to the backtrace now.
54
55 addi.l r15, 8, r1 ! base of pt_regs
56 addi.l r1, 24, r0 ! base of pt_regs.regs
57 addi.l r0, (63*8), r8 ! base of pt_regs.trregs
58
59 /* Note : to be fixed?
60 struct pt_regs is really designed for holding the state on entry
61 to an exception, i.e. pc,sr,regs etc. However, for the context
62 switch state, some of this is not required. But the unwinder takes
63 struct pt_regs * as an arg so we have to build this structure
64 to allow unwinding switched tasks in show_state() */
65
66 st.q r0, ( 9*8), r9
67 st.q r0, (10*8), r10
68 st.q r0, (11*8), r11
69 st.q r0, (12*8), r12
70 st.q r0, (13*8), r13
71 st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
72 ! the point where the process is left in suspended animation, i.e. current
73 ! fp here, not the saved one.
74 st.q r0, (16*8), r16
75
76 st.q r0, (24*8), r24
77 st.q r0, (25*8), r25
78 st.q r0, (26*8), r26
79 st.q r0, (27*8), r27
80 st.q r0, (28*8), r28
81 st.q r0, (29*8), r29
82 st.q r0, (30*8), r30
83 st.q r0, (31*8), r31
84 st.q r0, (32*8), r32
85 st.q r0, (33*8), r33
86 st.q r0, (34*8), r34
87 st.q r0, (35*8), r35
88
89 st.q r0, (44*8), r44
90 st.q r0, (45*8), r45
91 st.q r0, (46*8), r46
92 st.q r0, (47*8), r47
93 st.q r0, (48*8), r48
94 st.q r0, (49*8), r49
95 st.q r0, (50*8), r50
96 st.q r0, (51*8), r51
97 st.q r0, (52*8), r52
98 st.q r0, (53*8), r53
99 st.q r0, (54*8), r54
100 st.q r0, (55*8), r55
101 st.q r0, (56*8), r56
102 st.q r0, (57*8), r57
103 st.q r0, (58*8), r58
104 st.q r0, (59*8), r59
105
106 ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
107 ! Use a local label to avoid creating a symbol that will confuse the !
108 ! backtrace
109 pta .Lsave_pc, tr0
110
111 gettr tr5, r45
112 gettr tr6, r46
113 gettr tr7, r47
114 st.q r8, (5*8), r45
115 st.q r8, (6*8), r46
116 st.q r8, (7*8), r47
117
118 ! Now switch context
119 gettr tr0, r9
120 st.l r3, 0, r15 ! prev->thread.sp
121 st.l r3, 8, r1 ! prev->thread.kregs
122 st.l r3, 4, r9 ! prev->thread.pc
123 st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
124
125 ! Load PC for next task (init value or save_pc later)
126 ld.l r5, 4, r18 ! next->thread.pc
127 ! Switch stacks
128 ld.l r5, 0, r15 ! next->thread.sp
129 ptabs r18, tr0
130
131 ! Update current
132 ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
133 putcon r9, kcr0 ! current = next->thread_info
134
135 ! go to save_pc for a reschedule, or the initial thread.pc for a new process
136 blink tr0, r63
137
138 ! Restore (when we come back to a previously saved task)
139.Lsave_pc:
140 addi.l r15, 32, r0 ! r0 = next's regs
141 addi.l r0, (63*8), r8 ! r8 = next's tr_regs
142
143 ld.q r8, (5*8), r45
144 ld.q r8, (6*8), r46
145 ld.q r8, (7*8), r47
146 ptabs r45, tr5
147 ptabs r46, tr6
148 ptabs r47, tr7
149
150 ld.q r0, ( 9*8), r9
151 ld.q r0, (10*8), r10
152 ld.q r0, (11*8), r11
153 ld.q r0, (12*8), r12
154 ld.q r0, (13*8), r13
155 ld.q r0, (14*8), r14
156 ld.q r0, (16*8), r16
157
158 ld.q r0, (24*8), r24
159 ld.q r0, (25*8), r25
160 ld.q r0, (26*8), r26
161 ld.q r0, (27*8), r27
162 ld.q r0, (28*8), r28
163 ld.q r0, (29*8), r29
164 ld.q r0, (30*8), r30
165 ld.q r0, (31*8), r31
166 ld.q r0, (32*8), r32
167 ld.q r0, (33*8), r33
168 ld.q r0, (34*8), r34
169 ld.q r0, (35*8), r35
170
171 ld.q r0, (44*8), r44
172 ld.q r0, (45*8), r45
173 ld.q r0, (46*8), r46
174 ld.q r0, (47*8), r47
175 ld.q r0, (48*8), r48
176 ld.q r0, (49*8), r49
177 ld.q r0, (50*8), r50
178 ld.q r0, (51*8), r51
179 ld.q r0, (52*8), r52
180 ld.q r0, (53*8), r53
181 ld.q r0, (54*8), r54
182 ld.q r0, (55*8), r55
183 ld.q r0, (56*8), r56
184 ld.q r0, (57*8), r57
185 ld.q r0, (58*8), r58
186 ld.q r0, (59*8), r59
187
188 ! epilogue
189 ld.l r15, 0, r18
190 ld.l r15, 4, r14
191 ptabs r18, tr0
192 movi FRAME_SIZE, r0
193 add r15, r0, r15
194 blink tr0, r63
195__sh64_switch_to_end:
196.LFE1:
197 .size sh64_switch_to,.LFE1-sh64_switch_to
198
diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c
deleted file mode 100644
index de0a303ba26f..000000000000
--- a/arch/sh64/kernel/sys_sh64.c
+++ /dev/null
@@ -1,304 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/sys_sh64.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 *
10 * This file contains various random system calls that
11 * have a non-standard calling sequence on the Linux/SH5
12 * platform.
13 *
14 * Mostly taken from i386 version.
15 *
16 */
17
18#include <linux/errno.h>
19#include <linux/rwsem.h>
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/fs.h>
23#include <linux/smp.h>
24#include <linux/sem.h>
25#include <linux/msg.h>
26#include <linux/shm.h>
27#include <linux/stat.h>
28#include <linux/mman.h>
29#include <linux/file.h>
30#include <linux/utsname.h>
31#include <linux/syscalls.h>
32#include <linux/ipc.h>
33#include <asm/uaccess.h>
34#include <asm/ptrace.h>
35#include <asm/unistd.h>
36
37#define REG_3 3
38
39/*
40 * sys_pipe() is the normal C calling standard for creating
41 * a pipe. It's not the way Unix traditionally does this, though.
42 */
43#ifdef NEW_PIPE_IMPLEMENTATION
44asmlinkage int sys_pipe(unsigned long * fildes,
45 unsigned long dummy_r3,
46 unsigned long dummy_r4,
47 unsigned long dummy_r5,
48 unsigned long dummy_r6,
49 unsigned long dummy_r7,
50 struct pt_regs * regs) /* r8 = pt_regs forced by entry.S */
51{
52 int fd[2];
53 int ret;
54
55 ret = do_pipe(fd);
56 if (ret == 0)
57 /*
58 ***********************************************************************
59 * To avoid the copy_to_user we prefer to break the ABIs convention, *
60 * packing the valid pair of file IDs into a single register (r3); *
61 * while r2 is the return code as defined by the sh5-ABIs. *
62 * BE CAREFUL: pipe stub, into glibc, must be aware of this solution *
63 ***********************************************************************
64
65#ifdef __LITTLE_ENDIAN__
66 regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]);
67#else
68 regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]);
69#endif
70
71 */
72 /* although not very clever this is endianess independent */
73 regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd);
74
75 return ret;
76}
77
78#else
79asmlinkage int sys_pipe(unsigned long * fildes)
80{
81 int fd[2];
82 int error;
83
84 error = do_pipe(fd);
85 if (!error) {
86 if (copy_to_user(fildes, fd, 2*sizeof(int)))
87 error = -EFAULT;
88 }
89 return error;
90}
91
92#endif
93
94/*
95 * To avoid cache alias, we map the shard page with same color.
96 */
97#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
98
99unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
100 unsigned long len, unsigned long pgoff, unsigned long flags)
101{
102 struct vm_area_struct *vma;
103
104 if (flags & MAP_FIXED) {
105 /* We do not accept a shared mapping if it would violate
106 * cache aliasing constraints.
107 */
108 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
109 return -EINVAL;
110 return addr;
111 }
112
113 if (len > TASK_SIZE)
114 return -ENOMEM;
115 if (!addr)
116 addr = TASK_UNMAPPED_BASE;
117
118 if (flags & MAP_PRIVATE)
119 addr = PAGE_ALIGN(addr);
120 else
121 addr = COLOUR_ALIGN(addr);
122
123 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
124 /* At this point: (!vma || addr < vma->vm_end). */
125 if (TASK_SIZE - len < addr)
126 return -ENOMEM;
127 if (!vma || addr + len <= vma->vm_start)
128 return addr;
129 addr = vma->vm_end;
130 if (!(flags & MAP_PRIVATE))
131 addr = COLOUR_ALIGN(addr);
132 }
133}
134
135/* common code for old and new mmaps */
136static inline long do_mmap2(
137 unsigned long addr, unsigned long len,
138 unsigned long prot, unsigned long flags,
139 unsigned long fd, unsigned long pgoff)
140{
141 int error = -EBADF;
142 struct file * file = NULL;
143
144 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
145 if (!(flags & MAP_ANONYMOUS)) {
146 file = fget(fd);
147 if (!file)
148 goto out;
149 }
150
151 down_write(&current->mm->mmap_sem);
152 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
153 up_write(&current->mm->mmap_sem);
154
155 if (file)
156 fput(file);
157out:
158 return error;
159}
160
161asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
162 unsigned long prot, unsigned long flags,
163 unsigned long fd, unsigned long pgoff)
164{
165 return do_mmap2(addr, len, prot, flags, fd, pgoff);
166}
167
168asmlinkage int old_mmap(unsigned long addr, unsigned long len,
169 unsigned long prot, unsigned long flags,
170 int fd, unsigned long off)
171{
172 if (off & ~PAGE_MASK)
173 return -EINVAL;
174 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
175}
176
177/*
178 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
179 *
180 * This is really horribly ugly.
181 */
182asmlinkage int sys_ipc(uint call, int first, int second,
183 int third, void __user *ptr, long fifth)
184{
185 int version, ret;
186
187 version = call >> 16; /* hack for backward compatibility */
188 call &= 0xffff;
189
190 if (call <= SEMCTL)
191 switch (call) {
192 case SEMOP:
193 return sys_semtimedop(first, (struct sembuf __user *)ptr,
194 second, NULL);
195 case SEMTIMEDOP:
196 return sys_semtimedop(first, (struct sembuf __user *)ptr,
197 second,
198 (const struct timespec __user *)fifth);
199 case SEMGET:
200 return sys_semget (first, second, third);
201 case SEMCTL: {
202 union semun fourth;
203 if (!ptr)
204 return -EINVAL;
205 if (get_user(fourth.__pad, (void * __user *) ptr))
206 return -EFAULT;
207 return sys_semctl (first, second, third, fourth);
208 }
209 default:
210 return -EINVAL;
211 }
212
213 if (call <= MSGCTL)
214 switch (call) {
215 case MSGSND:
216 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
217 second, third);
218 case MSGRCV:
219 switch (version) {
220 case 0: {
221 struct ipc_kludge tmp;
222 if (!ptr)
223 return -EINVAL;
224
225 if (copy_from_user(&tmp,
226 (struct ipc_kludge __user *) ptr,
227 sizeof (tmp)))
228 return -EFAULT;
229 return sys_msgrcv (first, tmp.msgp, second,
230 tmp.msgtyp, third);
231 }
232 default:
233 return sys_msgrcv (first,
234 (struct msgbuf __user *) ptr,
235 second, fifth, third);
236 }
237 case MSGGET:
238 return sys_msgget ((key_t) first, second);
239 case MSGCTL:
240 return sys_msgctl (first, second,
241 (struct msqid_ds __user *) ptr);
242 default:
243 return -EINVAL;
244 }
245 if (call <= SHMCTL)
246 switch (call) {
247 case SHMAT:
248 switch (version) {
249 default: {
250 ulong raddr;
251 ret = do_shmat (first, (char __user *) ptr,
252 second, &raddr);
253 if (ret)
254 return ret;
255 return put_user (raddr, (ulong __user *) third);
256 }
257 case 1: /* iBCS2 emulator entry point */
258 if (!segment_eq(get_fs(), get_ds()))
259 return -EINVAL;
260 return do_shmat (first, (char __user *) ptr,
261 second, (ulong *) third);
262 }
263 case SHMDT:
264 return sys_shmdt ((char __user *)ptr);
265 case SHMGET:
266 return sys_shmget (first, second, third);
267 case SHMCTL:
268 return sys_shmctl (first, second,
269 (struct shmid_ds __user *) ptr);
270 default:
271 return -EINVAL;
272 }
273
274 return -EINVAL;
275}
276
277asmlinkage int sys_uname(struct old_utsname * name)
278{
279 int err;
280 if (!name)
281 return -EFAULT;
282 down_read(&uts_sem);
283 err = copy_to_user(name, utsname(), sizeof (*name));
284 up_read(&uts_sem);
285 return err?-EFAULT:0;
286}
287
288/*
289 * Do a system call from kernel instead of calling sys_execve so we
290 * end up with proper pt_regs.
291 */
292int kernel_execve(const char *filename, char *const argv[], char *const envp[])
293{
294 register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
295 register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
296 register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
297 register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
298 __asm__ __volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)"
299 : "=r" (__sc0)
300 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
301 __asm__ __volatile__ ("!dummy %0 %1 %2 %3"
302 : : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
303 return __sc0;
304}
diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S
deleted file mode 100644
index abb94c05d07a..000000000000
--- a/arch/sh64/kernel/syscalls.S
+++ /dev/null
@@ -1,381 +0,0 @@
1/*
2 * arch/sh64/kernel/syscalls.S
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/sys.h>
14
15 .section .data, "aw"
16 .balign 32
17
18/*
19 * System calls jump table
20 */
21 .globl sys_call_table
22sys_call_table:
23 .long sys_restart_syscall /* 0 - old "setup()" system call */
24 .long sys_exit
25 .long sys_fork
26 .long sys_read
27 .long sys_write
28 .long sys_open /* 5 */
29 .long sys_close
30 .long sys_waitpid
31 .long sys_creat
32 .long sys_link
33 .long sys_unlink /* 10 */
34 .long sys_execve
35 .long sys_chdir
36 .long sys_time
37 .long sys_mknod
38 .long sys_chmod /* 15 */
39 .long sys_lchown16
40 .long sys_ni_syscall /* old break syscall holder */
41 .long sys_stat
42 .long sys_lseek
43 .long sys_getpid /* 20 */
44 .long sys_mount
45 .long sys_oldumount
46 .long sys_setuid16
47 .long sys_getuid16
48 .long sys_stime /* 25 */
49 .long sh64_ptrace
50 .long sys_alarm
51 .long sys_fstat
52 .long sys_pause
53 .long sys_utime /* 30 */
54 .long sys_ni_syscall /* old stty syscall holder */
55 .long sys_ni_syscall /* old gtty syscall holder */
56 .long sys_access
57 .long sys_nice
58 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
59 .long sys_sync
60 .long sys_kill
61 .long sys_rename
62 .long sys_mkdir
63 .long sys_rmdir /* 40 */
64 .long sys_dup
65 .long sys_pipe
66 .long sys_times
67 .long sys_ni_syscall /* old prof syscall holder */
68 .long sys_brk /* 45 */
69 .long sys_setgid16
70 .long sys_getgid16
71 .long sys_signal
72 .long sys_geteuid16
73 .long sys_getegid16 /* 50 */
74 .long sys_acct
75 .long sys_umount /* recycled never used phys( */
76 .long sys_ni_syscall /* old lock syscall holder */
77 .long sys_ioctl
78 .long sys_fcntl /* 55 */
79 .long sys_ni_syscall /* old mpx syscall holder */
80 .long sys_setpgid
81 .long sys_ni_syscall /* old ulimit syscall holder */
82 .long sys_ni_syscall /* sys_olduname */
83 .long sys_umask /* 60 */
84 .long sys_chroot
85 .long sys_ustat
86 .long sys_dup2
87 .long sys_getppid
88 .long sys_getpgrp /* 65 */
89 .long sys_setsid
90 .long sys_sigaction
91 .long sys_sgetmask
92 .long sys_ssetmask
93 .long sys_setreuid16 /* 70 */
94 .long sys_setregid16
95 .long sys_sigsuspend
96 .long sys_sigpending
97 .long sys_sethostname
98 .long sys_setrlimit /* 75 */
99 .long sys_old_getrlimit
100 .long sys_getrusage
101 .long sys_gettimeofday
102 .long sys_settimeofday
103 .long sys_getgroups16 /* 80 */
104 .long sys_setgroups16
105 .long sys_ni_syscall /* sys_oldselect */
106 .long sys_symlink
107 .long sys_lstat
108 .long sys_readlink /* 85 */
109 .long sys_uselib
110 .long sys_swapon
111 .long sys_reboot
112 .long old_readdir
113 .long old_mmap /* 90 */
114 .long sys_munmap
115 .long sys_truncate
116 .long sys_ftruncate
117 .long sys_fchmod
118 .long sys_fchown16 /* 95 */
119 .long sys_getpriority
120 .long sys_setpriority
121 .long sys_ni_syscall /* old profil syscall holder */
122 .long sys_statfs
123 .long sys_fstatfs /* 100 */
124 .long sys_ni_syscall /* ioperm */
125 .long sys_socketcall /* Obsolete implementation of socket syscall */
126 .long sys_syslog
127 .long sys_setitimer
128 .long sys_getitimer /* 105 */
129 .long sys_newstat
130 .long sys_newlstat
131 .long sys_newfstat
132 .long sys_uname
133 .long sys_ni_syscall /* 110 */ /* iopl */
134 .long sys_vhangup
135 .long sys_ni_syscall /* idle */
136 .long sys_ni_syscall /* vm86old */
137 .long sys_wait4
138 .long sys_swapoff /* 115 */
139 .long sys_sysinfo
140 .long sys_ipc /* Obsolete ipc syscall implementation */
141 .long sys_fsync
142 .long sys_sigreturn
143 .long sys_clone /* 120 */
144 .long sys_setdomainname
145 .long sys_newuname
146 .long sys_ni_syscall /* sys_modify_ldt */
147 .long sys_adjtimex
148 .long sys_mprotect /* 125 */
149 .long sys_sigprocmask
150 .long sys_ni_syscall /* old "create_module" */
151 .long sys_init_module
152 .long sys_delete_module
153 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
154 .long sys_quotactl
155 .long sys_getpgid
156 .long sys_fchdir
157 .long sys_bdflush
158 .long sys_sysfs /* 135 */
159 .long sys_personality
160 .long sys_ni_syscall /* for afs_syscall */
161 .long sys_setfsuid16
162 .long sys_setfsgid16
163 .long sys_llseek /* 140 */
164 .long sys_getdents
165 .long sys_select
166 .long sys_flock
167 .long sys_msync
168 .long sys_readv /* 145 */
169 .long sys_writev
170 .long sys_getsid
171 .long sys_fdatasync
172 .long sys_sysctl
173 .long sys_mlock /* 150 */
174 .long sys_munlock
175 .long sys_mlockall
176 .long sys_munlockall
177 .long sys_sched_setparam
178 .long sys_sched_getparam /* 155 */
179 .long sys_sched_setscheduler
180 .long sys_sched_getscheduler
181 .long sys_sched_yield
182 .long sys_sched_get_priority_max
183 .long sys_sched_get_priority_min /* 160 */
184 .long sys_sched_rr_get_interval
185 .long sys_nanosleep
186 .long sys_mremap
187 .long sys_setresuid16
188 .long sys_getresuid16 /* 165 */
189 .long sys_ni_syscall /* vm86 */
190 .long sys_ni_syscall /* old "query_module" */
191 .long sys_poll
192 .long sys_nfsservctl
193 .long sys_setresgid16 /* 170 */
194 .long sys_getresgid16
195 .long sys_prctl
196 .long sys_rt_sigreturn
197 .long sys_rt_sigaction
198 .long sys_rt_sigprocmask /* 175 */
199 .long sys_rt_sigpending
200 .long sys_rt_sigtimedwait
201 .long sys_rt_sigqueueinfo
202 .long sys_rt_sigsuspend
203 .long sys_pread64 /* 180 */
204 .long sys_pwrite64
205 .long sys_chown16
206 .long sys_getcwd
207 .long sys_capget
208 .long sys_capset /* 185 */
209 .long sys_sigaltstack
210 .long sys_sendfile
211 .long sys_ni_syscall /* streams1 */
212 .long sys_ni_syscall /* streams2 */
213 .long sys_vfork /* 190 */
214 .long sys_getrlimit
215 .long sys_mmap2
216 .long sys_truncate64
217 .long sys_ftruncate64
218 .long sys_stat64 /* 195 */
219 .long sys_lstat64
220 .long sys_fstat64
221 .long sys_lchown
222 .long sys_getuid
223 .long sys_getgid /* 200 */
224 .long sys_geteuid
225 .long sys_getegid
226 .long sys_setreuid
227 .long sys_setregid
228 .long sys_getgroups /* 205 */
229 .long sys_setgroups
230 .long sys_fchown
231 .long sys_setresuid
232 .long sys_getresuid
233 .long sys_setresgid /* 210 */
234 .long sys_getresgid
235 .long sys_chown
236 .long sys_setuid
237 .long sys_setgid
238 .long sys_setfsuid /* 215 */
239 .long sys_setfsgid
240 .long sys_pivot_root
241 .long sys_mincore
242 .long sys_madvise
243 /* Broken-out socket family (maintain backwards compatibility in syscall
244 numbering with 2.4) */
245 .long sys_socket /* 220 */
246 .long sys_bind
247 .long sys_connect
248 .long sys_listen
249 .long sys_accept
250 .long sys_getsockname /* 225 */
251 .long sys_getpeername
252 .long sys_socketpair
253 .long sys_send
254 .long sys_sendto
255 .long sys_recv /* 230*/
256 .long sys_recvfrom
257 .long sys_shutdown
258 .long sys_setsockopt
259 .long sys_getsockopt
260 .long sys_sendmsg /* 235 */
261 .long sys_recvmsg
262 /* Broken-out IPC family (maintain backwards compatibility in syscall
263 numbering with 2.4) */
264 .long sys_semop
265 .long sys_semget
266 .long sys_semctl
267 .long sys_msgsnd /* 240 */
268 .long sys_msgrcv
269 .long sys_msgget
270 .long sys_msgctl
271 .long sys_shmat
272 .long sys_shmdt /* 245 */
273 .long sys_shmget
274 .long sys_shmctl
275 /* Rest of syscalls listed in 2.4 i386 unistd.h */
276 .long sys_getdents64
277 .long sys_fcntl64
278 .long sys_ni_syscall /* 250 reserved for TUX */
279 .long sys_ni_syscall /* Reserved for Security */
280 .long sys_gettid
281 .long sys_readahead
282 .long sys_setxattr
283 .long sys_lsetxattr /* 255 */
284 .long sys_fsetxattr
285 .long sys_getxattr
286 .long sys_lgetxattr
287 .long sys_fgetxattr
288 .long sys_listxattr /* 260 */
289 .long sys_llistxattr
290 .long sys_flistxattr
291 .long sys_removexattr
292 .long sys_lremovexattr
293 .long sys_fremovexattr /* 265 */
294 .long sys_tkill
295 .long sys_sendfile64
296 .long sys_futex
297 .long sys_sched_setaffinity
298 .long sys_sched_getaffinity /* 270 */
299 .long sys_ni_syscall
300 .long sys_ni_syscall
301 .long sys_io_setup
302 .long sys_io_destroy
303 .long sys_io_getevents /* 275 */
304 .long sys_io_submit
305 .long sys_io_cancel
306 .long sys_fadvise64
307 .long sys_ni_syscall
308 .long sys_exit_group /* 280 */
309 /* Rest of new 2.6 syscalls */
310 .long sys_lookup_dcookie
311 .long sys_epoll_create
312 .long sys_epoll_ctl
313 .long sys_epoll_wait
314 .long sys_remap_file_pages /* 285 */
315 .long sys_set_tid_address
316 .long sys_timer_create
317 .long sys_timer_settime
318 .long sys_timer_gettime
319 .long sys_timer_getoverrun /* 290 */
320 .long sys_timer_delete
321 .long sys_clock_settime
322 .long sys_clock_gettime
323 .long sys_clock_getres
324 .long sys_clock_nanosleep /* 295 */
325 .long sys_statfs64
326 .long sys_fstatfs64
327 .long sys_tgkill
328 .long sys_utimes
329 .long sys_fadvise64_64 /* 300 */
330 .long sys_ni_syscall /* Reserved for vserver */
331 .long sys_ni_syscall /* Reserved for mbind */
332 .long sys_ni_syscall /* get_mempolicy */
333 .long sys_ni_syscall /* set_mempolicy */
334 .long sys_mq_open /* 305 */
335 .long sys_mq_unlink
336 .long sys_mq_timedsend
337 .long sys_mq_timedreceive
338 .long sys_mq_notify
339 .long sys_mq_getsetattr /* 310 */
340 .long sys_ni_syscall /* Reserved for kexec */
341 .long sys_waitid
342 .long sys_add_key
343 .long sys_request_key
344 .long sys_keyctl /* 315 */
345 .long sys_ioprio_set
346 .long sys_ioprio_get
347 .long sys_inotify_init
348 .long sys_inotify_add_watch
349 .long sys_inotify_rm_watch /* 320 */
350 .long sys_ni_syscall
351 .long sys_migrate_pages
352 .long sys_openat
353 .long sys_mkdirat
354 .long sys_mknodat /* 325 */
355 .long sys_fchownat
356 .long sys_futimesat
357 .long sys_fstatat64
358 .long sys_unlinkat
359 .long sys_renameat /* 330 */
360 .long sys_linkat
361 .long sys_symlinkat
362 .long sys_readlinkat
363 .long sys_fchmodat
364 .long sys_faccessat /* 335 */
365 .long sys_pselect6
366 .long sys_ppoll
367 .long sys_unshare
368 .long sys_set_robust_list
369 .long sys_get_robust_list /* 340 */
370 .long sys_splice
371 .long sys_sync_file_range
372 .long sys_tee
373 .long sys_vmsplice
374 .long sys_move_pages /* 345 */
375 .long sys_getcpu
376 .long sys_epoll_pwait
377 .long sys_utimensat
378 .long sys_signalfd
379 .long sys_timerfd /* 350 */
380 .long sys_eventfd
381 .long sys_fallocate
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
deleted file mode 100644
index 06f3c179e345..000000000000
--- a/arch/sh64/kernel/time.c
+++ /dev/null
@@ -1,593 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/time.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 * Copyright (C) 2003 Richard Curnow
11 *
12 * Original TMU/RTC code taken from sh version.
13 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
14 * Some code taken from i386 version.
15 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
16 */
17
18#include <linux/errno.h>
19#include <linux/rwsem.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/param.h>
23#include <linux/string.h>
24#include <linux/mm.h>
25#include <linux/interrupt.h>
26#include <linux/time.h>
27#include <linux/delay.h>
28#include <linux/init.h>
29#include <linux/profile.h>
30#include <linux/smp.h>
31#include <linux/module.h>
32#include <linux/bcd.h>
33
34#include <asm/registers.h> /* required by inline __asm__ stmt. */
35
36#include <asm/processor.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/delay.h>
41
42#include <linux/timex.h>
43#include <linux/irq.h>
44#include <asm/hardware.h>
45
46#define TMU_TOCR_INIT 0x00
47#define TMU0_TCR_INIT 0x0020
48#define TMU_TSTR_INIT 1
49#define TMU_TSTR_OFF 0
50
51/* RCR1 Bits */
52#define RCR1_CF 0x80 /* Carry Flag */
53#define RCR1_CIE 0x10 /* Carry Interrupt Enable */
54#define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
55#define RCR1_AF 0x01 /* Alarm Flag */
56
57/* RCR2 Bits */
58#define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
59#define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
60#define RCR2_RTCEN 0x08 /* ENable RTC */
61#define RCR2_ADJ 0x04 /* ADJustment (30-second) */
62#define RCR2_RESET 0x02 /* Reset bit */
63#define RCR2_START 0x01 /* Start bit */
64
65/* Clock, Power and Reset Controller */
66#define CPRC_BLOCK_OFF 0x01010000
67#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
68
69#define FRQCR (cprc_base+0x0)
70#define WTCSR (cprc_base+0x0018)
71#define STBCR (cprc_base+0x0030)
72
73/* Time Management Unit */
74#define TMU_BLOCK_OFF 0x01020000
75#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
76#define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
77#define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
78#define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
79
80#define TMU_TOCR tmu_base+0x0 /* Byte access */
81#define TMU_TSTR tmu_base+0x4 /* Byte access */
82
83#define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
84#define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
85#define TMU0_TCR TMU0_BASE+0x8 /* Word access */
86
87/* Real Time Clock */
88#define RTC_BLOCK_OFF 0x01040000
89#define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
90
91#define R64CNT rtc_base+0x00
92#define RSECCNT rtc_base+0x04
93#define RMINCNT rtc_base+0x08
94#define RHRCNT rtc_base+0x0c
95#define RWKCNT rtc_base+0x10
96#define RDAYCNT rtc_base+0x14
97#define RMONCNT rtc_base+0x18
98#define RYRCNT rtc_base+0x1c /* 16bit */
99#define RSECAR rtc_base+0x20
100#define RMINAR rtc_base+0x24
101#define RHRAR rtc_base+0x28
102#define RWKAR rtc_base+0x2c
103#define RDAYAR rtc_base+0x30
104#define RMONAR rtc_base+0x34
105#define RCR1 rtc_base+0x38
106#define RCR2 rtc_base+0x3c
107
108#define TICK_SIZE (tick_nsec / 1000)
109
110static unsigned long tmu_base, rtc_base;
111unsigned long cprc_base;
112
113/* Variables to allow interpolation of time of day to resolution better than a
114 * jiffy. */
115
116/* This is effectively protected by xtime_lock */
117static unsigned long ctc_last_interrupt;
118static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
119
120#define CTC_JIFFY_SCALE_SHIFT 40
121
122/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
123static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
124
125/* Estimate number of microseconds that have elapsed since the last timer tick,
126 by scaling the delta that has occurred in the CTC register.
127
128 WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
129 the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
130 in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
131 probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
132 sleeping, though will be coarser.
133
134 FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
135 is running or if the freq or tick arguments of adjtimex are modified after
136 we have calibrated the scaling factor? This will result in either a jump at
137 the end of a tick period, or a wrap backwards at the start of the next one,
138 if the application is reading the time of day often enough. I think we
139 ought to do better than this. For this reason, usecs_per_jiffy is left
140 separated out in the calculation below. This allows some future hook into
141 the adjtime-related stuff in kernel/timer.c to remove this hazard.
142
143*/
144
145static unsigned long usecs_since_tick(void)
146{
147 unsigned long long current_ctc;
148 long ctc_ticks_since_interrupt;
149 unsigned long long ull_ctc_ticks_since_interrupt;
150 unsigned long result;
151
152 unsigned long long mul1_out;
153 unsigned long long mul1_out_high;
154 unsigned long long mul2_out_low, mul2_out_high;
155
156 /* Read CTC register */
157 asm ("getcon cr62, %0" : "=r" (current_ctc));
158 /* Note, the CTC counts down on each CPU clock, not up.
159 Note(2), use long type to get correct wraparound arithmetic when
160 the counter crosses zero. */
161 ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
162 ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
163
164 /* Inline assembly to do 32x32x32->64 multiplier */
165 asm volatile ("mulu.l %1, %2, %0" :
166 "=r" (mul1_out) :
167 "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
168
169 mul1_out_high = mul1_out >> 32;
170
171 asm volatile ("mulu.l %1, %2, %0" :
172 "=r" (mul2_out_low) :
173 "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
174
175#if 1
176 asm volatile ("mulu.l %1, %2, %0" :
177 "=r" (mul2_out_high) :
178 "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
179#endif
180
181 result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
182
183 return result;
184}
185
186void do_gettimeofday(struct timeval *tv)
187{
188 unsigned long flags;
189 unsigned long seq;
190 unsigned long usec, sec;
191
192 do {
193 seq = read_seqbegin_irqsave(&xtime_lock, flags);
194 usec = usecs_since_tick();
195 sec = xtime.tv_sec;
196 usec += xtime.tv_nsec / 1000;
197 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
198
199 while (usec >= 1000000) {
200 usec -= 1000000;
201 sec++;
202 }
203
204 tv->tv_sec = sec;
205 tv->tv_usec = usec;
206}
207
208int do_settimeofday(struct timespec *tv)
209{
210 time_t wtm_sec, sec = tv->tv_sec;
211 long wtm_nsec, nsec = tv->tv_nsec;
212
213 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
214 return -EINVAL;
215
216 write_seqlock_irq(&xtime_lock);
217 /*
218 * This is revolting. We need to set "xtime" correctly. However, the
219 * value in this location is the value at the most recent update of
220 * wall time. Discover what correction gettimeofday() would have
221 * made, and then undo it!
222 */
223 nsec -= 1000 * usecs_since_tick();
224
225 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
226 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
227
228 set_normalized_timespec(&xtime, sec, nsec);
229 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
230
231 ntp_clear();
232 write_sequnlock_irq(&xtime_lock);
233 clock_was_set();
234
235 return 0;
236}
237EXPORT_SYMBOL(do_settimeofday);
238
239static int set_rtc_time(unsigned long nowtime)
240{
241 int retval = 0;
242 int real_seconds, real_minutes, cmos_minutes;
243
244 ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
245
246 cmos_minutes = ctrl_inb(RMINCNT);
247 BCD_TO_BIN(cmos_minutes);
248
249 /*
250 * since we're only adjusting minutes and seconds,
251 * don't interfere with hour overflow. This avoids
252 * messing with unknown time zones but requires your
253 * RTC not to be off by more than 15 minutes
254 */
255 real_seconds = nowtime % 60;
256 real_minutes = nowtime / 60;
257 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
258 real_minutes += 30; /* correct for half hour time zone */
259 real_minutes %= 60;
260
261 if (abs(real_minutes - cmos_minutes) < 30) {
262 BIN_TO_BCD(real_seconds);
263 BIN_TO_BCD(real_minutes);
264 ctrl_outb(real_seconds, RSECCNT);
265 ctrl_outb(real_minutes, RMINCNT);
266 } else {
267 printk(KERN_WARNING
268 "set_rtc_time: can't update from %d to %d\n",
269 cmos_minutes, real_minutes);
270 retval = -1;
271 }
272
273 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
274
275 return retval;
276}
277
278/* last time the RTC clock got updated */
279static long last_rtc_update = 0;
280
281/*
282 * timer_interrupt() needs to keep up the real-time clock,
283 * as well as call the "do_timer()" routine every clocktick
284 */
285static inline void do_timer_interrupt(void)
286{
287 unsigned long long current_ctc;
288 asm ("getcon cr62, %0" : "=r" (current_ctc));
289 ctc_last_interrupt = (unsigned long) current_ctc;
290
291 do_timer(1);
292#ifndef CONFIG_SMP
293 update_process_times(user_mode(get_irq_regs()));
294#endif
295 if (current->pid)
296 profile_tick(CPU_PROFILING);
297
298#ifdef CONFIG_HEARTBEAT
299 {
300 extern void heartbeat(void);
301
302 heartbeat();
303 }
304#endif
305
306 /*
307 * If we have an externally synchronized Linux clock, then update
308 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
309 * called as close as possible to 500 ms before the new second starts.
310 */
311 if (ntp_synced() &&
312 xtime.tv_sec > last_rtc_update + 660 &&
313 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
314 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
315 if (set_rtc_time(xtime.tv_sec) == 0)
316 last_rtc_update = xtime.tv_sec;
317 else
318 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
319 }
320}
321
322/*
323 * This is the same as the above, except we _also_ save the current
324 * Time Stamp Counter value at the time of the timer interrupt, so that
325 * we later on can estimate the time of day more exactly.
326 */
327static irqreturn_t timer_interrupt(int irq, void *dev_id)
328{
329 unsigned long timer_status;
330
331 /* Clear UNF bit */
332 timer_status = ctrl_inw(TMU0_TCR);
333 timer_status &= ~0x100;
334 ctrl_outw(timer_status, TMU0_TCR);
335
336 /*
337 * Here we are in the timer irq handler. We just have irqs locally
338 * disabled but we don't know if the timer_bh is running on the other
339 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
340 * the irq version of write_lock because as just said we have irq
341 * locally disabled. -arca
342 */
343 write_lock(&xtime_lock);
344 do_timer_interrupt();
345 write_unlock(&xtime_lock);
346
347 return IRQ_HANDLED;
348}
349
350static unsigned long get_rtc_time(void)
351{
352 unsigned int sec, min, hr, wk, day, mon, yr, yr100;
353
354 again:
355 do {
356 ctrl_outb(0, RCR1); /* Clear CF-bit */
357 sec = ctrl_inb(RSECCNT);
358 min = ctrl_inb(RMINCNT);
359 hr = ctrl_inb(RHRCNT);
360 wk = ctrl_inb(RWKCNT);
361 day = ctrl_inb(RDAYCNT);
362 mon = ctrl_inb(RMONCNT);
363 yr = ctrl_inw(RYRCNT);
364 yr100 = (yr >> 8);
365 yr &= 0xff;
366 } while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
367
368 BCD_TO_BIN(yr100);
369 BCD_TO_BIN(yr);
370 BCD_TO_BIN(mon);
371 BCD_TO_BIN(day);
372 BCD_TO_BIN(hr);
373 BCD_TO_BIN(min);
374 BCD_TO_BIN(sec);
375
376 if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
377 hr > 23 || min > 59 || sec > 59) {
378 printk(KERN_ERR
379 "SH RTC: invalid value, resetting to 1 Jan 2000\n");
380 ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
381 ctrl_outb(0, RSECCNT);
382 ctrl_outb(0, RMINCNT);
383 ctrl_outb(0, RHRCNT);
384 ctrl_outb(6, RWKCNT);
385 ctrl_outb(1, RDAYCNT);
386 ctrl_outb(1, RMONCNT);
387 ctrl_outw(0x2000, RYRCNT);
388 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
389 goto again;
390 }
391
392 return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
393}
394
395static __init unsigned int get_cpu_hz(void)
396{
397 unsigned int count;
398 unsigned long __dummy;
399 unsigned long ctc_val_init, ctc_val;
400
401 /*
402 ** Regardless the toolchain, force the compiler to use the
403 ** arbitrary register r3 as a clock tick counter.
404 ** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
405 */
406 register unsigned long long __rtc_irq_flag __asm__ ("r3");
407
408 local_irq_enable();
409 do {} while (ctrl_inb(R64CNT) != 0);
410 ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
411
412 /*
413 * r3 is arbitrary. CDC does not support "=z".
414 */
415 ctc_val_init = 0xffffffff;
416 ctc_val = ctc_val_init;
417
418 asm volatile("gettr tr0, %1\n\t"
419 "putcon %0, " __CTC "\n\t"
420 "and %2, r63, %2\n\t"
421 "pta $+4, tr0\n\t"
422 "beq/l %2, r63, tr0\n\t"
423 "ptabs %1, tr0\n\t"
424 "getcon " __CTC ", %0\n\t"
425 : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
426 : "0" (0));
427 local_irq_disable();
428 /*
429 * SH-3:
430 * CPU clock = 4 stages * loop
431 * tst rm,rm if id ex
432 * bt/s 1b if id ex
433 * add #1,rd if id ex
434 * (if) pipe line stole
435 * tst rm,rm if id ex
436 * ....
437 *
438 *
439 * SH-4:
440 * CPU clock = 6 stages * loop
441 * I don't know why.
442 * ....
443 *
444 * SH-5:
445 * Use CTC register to count. This approach returns the right value
446 * even if the I-cache is disabled (e.g. whilst debugging.)
447 *
448 */
449
450 count = ctc_val_init - ctc_val; /* CTC counts down */
451
452#if defined (CONFIG_SH_SIMULATOR)
453 /*
454 * Let's pretend we are a 5MHz SH-5 to avoid a too
455 * little timer interval. Also to keep delay
456 * calibration within a reasonable time.
457 */
458 return 5000000;
459#else
460 /*
461 * This really is count by the number of clock cycles
462 * by the ratio between a complete R64CNT
463 * wrap-around (128) and CUI interrupt being raised (64).
464 */
465 return count*2;
466#endif
467}
468
469static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
470{
471 struct pt_regs *regs = get_irq_regs();
472
473 ctrl_outb(0, RCR1); /* Disable Carry Interrupts */
474 regs->regs[3] = 1; /* Using r3 */
475
476 return IRQ_HANDLED;
477}
478
479static struct irqaction irq0 = {
480 .handler = timer_interrupt,
481 .flags = IRQF_DISABLED,
482 .mask = CPU_MASK_NONE,
483 .name = "timer",
484};
485static struct irqaction irq1 = {
486 .handler = sh64_rtc_interrupt,
487 .flags = IRQF_DISABLED,
488 .mask = CPU_MASK_NONE,
489 .name = "rtc",
490};
491
492void __init time_init(void)
493{
494 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
495 unsigned long interval;
496 unsigned long frqcr, ifc, pfc;
497 static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
498#define bfc_table ifc_table /* Same */
499#define pfc_table ifc_table /* Same */
500
501 tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
502 if (!tmu_base) {
503 panic("Unable to remap TMU\n");
504 }
505
506 rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
507 if (!rtc_base) {
508 panic("Unable to remap RTC\n");
509 }
510
511 cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
512 if (!cprc_base) {
513 panic("Unable to remap CPRC\n");
514 }
515
516 xtime.tv_sec = get_rtc_time();
517 xtime.tv_nsec = 0;
518
519 setup_irq(TIMER_IRQ, &irq0);
520 setup_irq(RTC_IRQ, &irq1);
521
522 /* Check how fast it is.. */
523 cpu_clock = get_cpu_hz();
524
525 /* Note careful order of operations to maintain reasonable precision and avoid overflow. */
526 scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
527
528 disable_irq(RTC_IRQ);
529
530 printk("CPU clock: %d.%02dMHz\n",
531 (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
532 {
533 unsigned short bfc;
534 frqcr = ctrl_inl(FRQCR);
535 ifc = ifc_table[(frqcr>> 6) & 0x0007];
536 bfc = bfc_table[(frqcr>> 3) & 0x0007];
537 pfc = pfc_table[(frqcr>> 12) & 0x0007];
538 master_clock = cpu_clock * ifc;
539 bus_clock = master_clock/bfc;
540 }
541
542 printk("Bus clock: %d.%02dMHz\n",
543 (bus_clock/1000000), (bus_clock % 1000000)/10000);
544 module_clock = master_clock/pfc;
545 printk("Module clock: %d.%02dMHz\n",
546 (module_clock/1000000), (module_clock % 1000000)/10000);
547 interval = (module_clock/(HZ*4));
548
549 printk("Interval = %ld\n", interval);
550
551 current_cpu_data.cpu_clock = cpu_clock;
552 current_cpu_data.master_clock = master_clock;
553 current_cpu_data.bus_clock = bus_clock;
554 current_cpu_data.module_clock = module_clock;
555
556 /* Start TMU0 */
557 ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
558 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
559 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
560 ctrl_outl(interval, TMU0_TCOR);
561 ctrl_outl(interval, TMU0_TCNT);
562 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
563}
564
565void enter_deep_standby(void)
566{
567 /* Disable watchdog timer */
568 ctrl_outl(0xa5000000, WTCSR);
569 /* Configure deep standby on sleep */
570 ctrl_outl(0x03, STBCR);
571
572#ifdef CONFIG_SH_ALPHANUMERIC
573 {
574 extern void mach_alphanum(int position, unsigned char value);
575 extern void mach_alphanum_brightness(int setting);
576 char halted[] = "Halted. ";
577 int i;
578 mach_alphanum_brightness(6); /* dimmest setting above off */
579 for (i=0; i<8; i++) {
580 mach_alphanum(i, halted[i]);
581 }
582 asm __volatile__ ("synco");
583 }
584#endif
585
586 asm __volatile__ ("sleep");
587 asm __volatile__ ("synci");
588 asm __volatile__ ("nop");
589 asm __volatile__ ("nop");
590 asm __volatile__ ("nop");
591 asm __volatile__ ("nop");
592 panic("Unexpected wakeup!\n");
593}
diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c
deleted file mode 100644
index f32df3831f45..000000000000
--- a/arch/sh64/kernel/traps.c
+++ /dev/null
@@ -1,982 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/traps.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 */
13
14/*
15 * 'Traps.c' handles hardware traps and faults after we have saved some
16 * state in 'entry.S'.
17 */
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/spinlock.h>
29#include <linux/kallsyms.h>
30#include <linux/interrupt.h>
31#include <linux/sysctl.h>
32#include <linux/module.h>
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <asm/atomic.h>
37#include <asm/processor.h>
38#include <asm/pgtable.h>
39
40#undef DEBUG_EXCEPTION
41#ifdef DEBUG_EXCEPTION
42/* implemented in ../lib/dbg.c */
43extern void show_excp_regs(char *fname, int trapnr, int signr,
44 struct pt_regs *regs);
45#else
46#define show_excp_regs(a, b, c, d)
47#endif
48
49static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
50 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
51
52#define DO_ERROR(trapnr, signr, str, name, tsk) \
53asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
54{ \
55 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
56}
57
58spinlock_t die_lock;
59
60void die(const char * str, struct pt_regs * regs, long err)
61{
62 console_verbose();
63 spin_lock_irq(&die_lock);
64 printk("%s: %lx\n", str, (err & 0xffffff));
65 show_regs(regs);
66 spin_unlock_irq(&die_lock);
67 do_exit(SIGSEGV);
68}
69
70static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
71{
72 if (!user_mode(regs))
73 die(str, regs, err);
74}
75
76static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
77{
78 if (!user_mode(regs)) {
79 const struct exception_table_entry *fixup;
80 fixup = search_exception_tables(regs->pc);
81 if (fixup) {
82 regs->pc = fixup->fixup;
83 return;
84 }
85 die(str, regs, err);
86 }
87}
88
89DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
90DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
91
92
93/* Implement misaligned load/store handling for kernel (and optionally for user
94 mode too). Limitation : only SHmedia mode code is handled - there is no
95 handling at all for misaligned accesses occurring in SHcompact code yet. */
96
97static int misaligned_fixup(struct pt_regs *regs);
98
99asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
100{
101 if (misaligned_fixup(regs) < 0) {
102 do_unhandled_exception(7, SIGSEGV, "address error(load)",
103 "do_address_error_load",
104 error_code, regs, current);
105 }
106 return;
107}
108
109asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
110{
111 if (misaligned_fixup(regs) < 0) {
112 do_unhandled_exception(8, SIGSEGV, "address error(store)",
113 "do_address_error_store",
114 error_code, regs, current);
115 }
116 return;
117}
118
119#if defined(CONFIG_SH64_ID2815_WORKAROUND)
120
121#define OPCODE_INVALID 0
122#define OPCODE_USER_VALID 1
123#define OPCODE_PRIV_VALID 2
124
125/* getcon/putcon - requires checking which control register is referenced. */
126#define OPCODE_CTRL_REG 3
127
128/* Table of valid opcodes for SHmedia mode.
129 Form a 10-bit value by concatenating the major/minor opcodes i.e.
130 opcode[31:26,20:16]. The 6 MSBs of this value index into the following
131 array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
132 LSBs==4'b0000 etc). */
133static unsigned long shmedia_opcode_table[64] = {
134 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
135 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
136 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
137 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
138 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
139 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
140 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
141 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
142};
143
144void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
145{
146 /* Workaround SH5-101 cut2 silicon defect #2815 :
147 in some situations, inter-mode branches from SHcompact -> SHmedia
148 which should take ITLBMISS or EXECPROT exceptions at the target
149 falsely take RESINST at the target instead. */
150
151 unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
152 unsigned long pc, aligned_pc;
153 int get_user_error;
154 int trapnr = 12;
155 int signr = SIGILL;
156 char *exception_name = "reserved_instruction";
157
158 pc = regs->pc;
159 if ((pc & 3) == 1) {
160 /* SHmedia : check for defect. This requires executable vmas
161 to be readable too. */
162 aligned_pc = pc & ~3;
163 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
164 get_user_error = -EFAULT;
165 } else {
166 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
167 }
168 if (get_user_error >= 0) {
169 unsigned long index, shift;
170 unsigned long major, minor, combined;
171 unsigned long reserved_field;
172 reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
173 major = (opcode >> 26) & 0x3f;
174 minor = (opcode >> 16) & 0xf;
175 combined = (major << 4) | minor;
176 index = major;
177 shift = minor << 1;
178 if (reserved_field == 0) {
179 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
180 switch (opcode_state) {
181 case OPCODE_INVALID:
182 /* Trap. */
183 break;
184 case OPCODE_USER_VALID:
185 /* Restart the instruction : the branch to the instruction will now be from an RTE
186 not from SHcompact so the silicon defect won't be triggered. */
187 return;
188 case OPCODE_PRIV_VALID:
189 if (!user_mode(regs)) {
190 /* Should only ever get here if a module has
191 SHcompact code inside it. If so, the same fix up is needed. */
192 return; /* same reason */
193 }
194 /* Otherwise, user mode trying to execute a privileged instruction -
195 fall through to trap. */
196 break;
197 case OPCODE_CTRL_REG:
198 /* If in privileged mode, return as above. */
199 if (!user_mode(regs)) return;
200 /* In user mode ... */
201 if (combined == 0x9f) { /* GETCON */
202 unsigned long regno = (opcode >> 20) & 0x3f;
203 if (regno >= 62) {
204 return;
205 }
206 /* Otherwise, reserved or privileged control register, => trap */
207 } else if (combined == 0x1bf) { /* PUTCON */
208 unsigned long regno = (opcode >> 4) & 0x3f;
209 if (regno >= 62) {
210 return;
211 }
212 /* Otherwise, reserved or privileged control register, => trap */
213 } else {
214 /* Trap */
215 }
216 break;
217 default:
218 /* Fall through to trap. */
219 break;
220 }
221 }
222 /* fall through to normal resinst processing */
223 } else {
224 /* Error trying to read opcode. This typically means a
225 real fault, not a RESINST any more. So change the
226 codes. */
227 trapnr = 87;
228 exception_name = "address error (exec)";
229 signr = SIGSEGV;
230 }
231 }
232
233 do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
234}
235
236#else /* CONFIG_SH64_ID2815_WORKAROUND */
237
238/* If the workaround isn't needed, this is just a straightforward reserved
239 instruction */
240DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
241
242#endif /* CONFIG_SH64_ID2815_WORKAROUND */
243
244/* Called with interrupts disabled */
245asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
246{
247 PLS();
248 show_excp_regs(__FUNCTION__, -1, -1, regs);
249 die_if_kernel("exception", regs, ex);
250}
251
252int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
253{
254 /* Syscall debug */
255 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
256
257 die_if_kernel("unknown trapa", regs, scId);
258
259 return -ENOSYS;
260}
261
262void show_stack(struct task_struct *tsk, unsigned long *sp)
263{
264#ifdef CONFIG_KALLSYMS
265 extern void sh64_unwind(struct pt_regs *regs);
266 struct pt_regs *regs;
267
268 regs = tsk ? tsk->thread.kregs : NULL;
269
270 sh64_unwind(regs);
271#else
272 printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
273#endif
274}
275
276void show_task(unsigned long *sp)
277{
278 show_stack(NULL, sp);
279}
280
281void dump_stack(void)
282{
283 show_task(NULL);
284}
285/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
286EXPORT_SYMBOL(dump_stack);
287
288static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
289 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
290{
291 show_excp_regs(fn_name, trapnr, signr, regs);
292 tsk->thread.error_code = error_code;
293 tsk->thread.trap_no = trapnr;
294
295 if (user_mode(regs))
296 force_sig(signr, tsk);
297
298 die_if_no_fixup(str, regs, error_code);
299}
300
301static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
302{
303 int get_user_error;
304 unsigned long aligned_pc;
305 unsigned long opcode;
306
307 if ((pc & 3) == 1) {
308 /* SHmedia */
309 aligned_pc = pc & ~3;
310 if (from_user_mode) {
311 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
312 get_user_error = -EFAULT;
313 } else {
314 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
315 *result_opcode = opcode;
316 }
317 return get_user_error;
318 } else {
319 /* If the fault was in the kernel, we can either read
320 * this directly, or if not, we fault.
321 */
322 *result_opcode = *(unsigned long *) aligned_pc;
323 return 0;
324 }
325 } else if ((pc & 1) == 0) {
326 /* SHcompact */
327 /* TODO : provide handling for this. We don't really support
328 user-mode SHcompact yet, and for a kernel fault, this would
329 have to come from a module built for SHcompact. */
330 return -EFAULT;
331 } else {
332 /* misaligned */
333 return -EFAULT;
334 }
335}
336
337static int address_is_sign_extended(__u64 a)
338{
339 __u64 b;
340#if (NEFF == 32)
341 b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
342 return (b == a) ? 1 : 0;
343#else
344#error "Sign extend check only works for NEFF==32"
345#endif
346}
347
348static int generate_and_check_address(struct pt_regs *regs,
349 __u32 opcode,
350 int displacement_not_indexed,
351 int width_shift,
352 __u64 *address)
353{
354 /* return -1 for fault, 0 for OK */
355
356 __u64 base_address, addr;
357 int basereg;
358
359 basereg = (opcode >> 20) & 0x3f;
360 base_address = regs->regs[basereg];
361 if (displacement_not_indexed) {
362 __s64 displacement;
363 displacement = (opcode >> 10) & 0x3ff;
364 displacement = ((displacement << 54) >> 54); /* sign extend */
365 addr = (__u64)((__s64)base_address + (displacement << width_shift));
366 } else {
367 __u64 offset;
368 int offsetreg;
369 offsetreg = (opcode >> 10) & 0x3f;
370 offset = regs->regs[offsetreg];
371 addr = base_address + offset;
372 }
373
374 /* Check sign extended */
375 if (!address_is_sign_extended(addr)) {
376 return -1;
377 }
378
379#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
380 /* Check accessible. For misaligned access in the kernel, assume the
381 address is always accessible (and if not, just fault when the
382 load/store gets done.) */
383 if (user_mode(regs)) {
384 if (addr >= TASK_SIZE) {
385 return -1;
386 }
387 /* Do access_ok check later - it depends on whether it's a load or a store. */
388 }
389#endif
390
391 *address = addr;
392 return 0;
393}
394
395/* Default value as for sh */
396#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
397static int user_mode_unaligned_fixup_count = 10;
398static int user_mode_unaligned_fixup_enable = 1;
399#endif
400
401static int kernel_mode_unaligned_fixup_count = 32;
402
403static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
404{
405 unsigned short x;
406 unsigned char *p, *q;
407 p = (unsigned char *) (int) address;
408 q = (unsigned char *) &x;
409 q[0] = p[0];
410 q[1] = p[1];
411
412 if (do_sign_extend) {
413 *result = (__u64)(__s64) *(short *) &x;
414 } else {
415 *result = (__u64) x;
416 }
417}
418
419static void misaligned_kernel_word_store(__u64 address, __u64 value)
420{
421 unsigned short x;
422 unsigned char *p, *q;
423 p = (unsigned char *) (int) address;
424 q = (unsigned char *) &x;
425
426 x = (__u16) value;
427 p[0] = q[0];
428 p[1] = q[1];
429}
430
431static int misaligned_load(struct pt_regs *regs,
432 __u32 opcode,
433 int displacement_not_indexed,
434 int width_shift,
435 int do_sign_extend)
436{
437 /* Return -1 for a fault, 0 for OK */
438 int error;
439 int destreg;
440 __u64 address;
441
442 error = generate_and_check_address(regs, opcode,
443 displacement_not_indexed, width_shift, &address);
444 if (error < 0) {
445 return error;
446 }
447
448 destreg = (opcode >> 4) & 0x3f;
449#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
450 if (user_mode(regs)) {
451 __u64 buffer;
452
453 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
454 return -1;
455 }
456
457 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
458 return -1; /* fault */
459 }
460 switch (width_shift) {
461 case 1:
462 if (do_sign_extend) {
463 regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
464 } else {
465 regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
466 }
467 break;
468 case 2:
469 regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
470 break;
471 case 3:
472 regs->regs[destreg] = buffer;
473 break;
474 default:
475 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
476 width_shift, (unsigned long) regs->pc);
477 break;
478 }
479 } else
480#endif
481 {
482 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
483 __u64 lo, hi;
484
485 switch (width_shift) {
486 case 1:
487 misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
488 break;
489 case 2:
490 asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
491 asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
492 regs->regs[destreg] = lo | hi;
493 break;
494 case 3:
495 asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
496 asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
497 regs->regs[destreg] = lo | hi;
498 break;
499
500 default:
501 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
502 width_shift, (unsigned long) regs->pc);
503 break;
504 }
505 }
506
507 return 0;
508
509}
510
511static int misaligned_store(struct pt_regs *regs,
512 __u32 opcode,
513 int displacement_not_indexed,
514 int width_shift)
515{
516 /* Return -1 for a fault, 0 for OK */
517 int error;
518 int srcreg;
519 __u64 address;
520
521 error = generate_and_check_address(regs, opcode,
522 displacement_not_indexed, width_shift, &address);
523 if (error < 0) {
524 return error;
525 }
526
527 srcreg = (opcode >> 4) & 0x3f;
528#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
529 if (user_mode(regs)) {
530 __u64 buffer;
531
532 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
533 return -1;
534 }
535
536 switch (width_shift) {
537 case 1:
538 *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
539 break;
540 case 2:
541 *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
542 break;
543 case 3:
544 buffer = regs->regs[srcreg];
545 break;
546 default:
547 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
548 width_shift, (unsigned long) regs->pc);
549 break;
550 }
551
552 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
553 return -1; /* fault */
554 }
555 } else
556#endif
557 {
558 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
559 __u64 val = regs->regs[srcreg];
560
561 switch (width_shift) {
562 case 1:
563 misaligned_kernel_word_store(address, val);
564 break;
565 case 2:
566 asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
567 asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
568 break;
569 case 3:
570 asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
571 asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
572 break;
573
574 default:
575 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
576 width_shift, (unsigned long) regs->pc);
577 break;
578 }
579 }
580
581 return 0;
582
583}
584
585#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
586/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
587 error. */
588static int misaligned_fpu_load(struct pt_regs *regs,
589 __u32 opcode,
590 int displacement_not_indexed,
591 int width_shift,
592 int do_paired_load)
593{
594 /* Return -1 for a fault, 0 for OK */
595 int error;
596 int destreg;
597 __u64 address;
598
599 error = generate_and_check_address(regs, opcode,
600 displacement_not_indexed, width_shift, &address);
601 if (error < 0) {
602 return error;
603 }
604
605 destreg = (opcode >> 4) & 0x3f;
606 if (user_mode(regs)) {
607 __u64 buffer;
608 __u32 buflo, bufhi;
609
610 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
611 return -1;
612 }
613
614 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
615 return -1; /* fault */
616 }
617 /* 'current' may be the current owner of the FPU state, so
618 context switch the registers into memory so they can be
619 indexed by register number. */
620 if (last_task_used_math == current) {
621 grab_fpu();
622 fpsave(&current->thread.fpu.hard);
623 release_fpu();
624 last_task_used_math = NULL;
625 regs->sr |= SR_FD;
626 }
627
628 buflo = *(__u32*) &buffer;
629 bufhi = *(1 + (__u32*) &buffer);
630
631 switch (width_shift) {
632 case 2:
633 current->thread.fpu.hard.fp_regs[destreg] = buflo;
634 break;
635 case 3:
636 if (do_paired_load) {
637 current->thread.fpu.hard.fp_regs[destreg] = buflo;
638 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
639 } else {
640#if defined(CONFIG_LITTLE_ENDIAN)
641 current->thread.fpu.hard.fp_regs[destreg] = bufhi;
642 current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
643#else
644 current->thread.fpu.hard.fp_regs[destreg] = buflo;
645 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
646#endif
647 }
648 break;
649 default:
650 printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
651 width_shift, (unsigned long) regs->pc);
652 break;
653 }
654 return 0;
655 } else {
656 die ("Misaligned FPU load inside kernel", regs, 0);
657 return -1;
658 }
659
660
661}
662
663static int misaligned_fpu_store(struct pt_regs *regs,
664 __u32 opcode,
665 int displacement_not_indexed,
666 int width_shift,
667 int do_paired_load)
668{
669 /* Return -1 for a fault, 0 for OK */
670 int error;
671 int srcreg;
672 __u64 address;
673
674 error = generate_and_check_address(regs, opcode,
675 displacement_not_indexed, width_shift, &address);
676 if (error < 0) {
677 return error;
678 }
679
680 srcreg = (opcode >> 4) & 0x3f;
681 if (user_mode(regs)) {
682 __u64 buffer;
683 /* Initialise these to NaNs. */
684 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
685
686 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
687 return -1;
688 }
689
690 /* 'current' may be the current owner of the FPU state, so
691 context switch the registers into memory so they can be
692 indexed by register number. */
693 if (last_task_used_math == current) {
694 grab_fpu();
695 fpsave(&current->thread.fpu.hard);
696 release_fpu();
697 last_task_used_math = NULL;
698 regs->sr |= SR_FD;
699 }
700
701 switch (width_shift) {
702 case 2:
703 buflo = current->thread.fpu.hard.fp_regs[srcreg];
704 break;
705 case 3:
706 if (do_paired_load) {
707 buflo = current->thread.fpu.hard.fp_regs[srcreg];
708 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
709 } else {
710#if defined(CONFIG_LITTLE_ENDIAN)
711 bufhi = current->thread.fpu.hard.fp_regs[srcreg];
712 buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
713#else
714 buflo = current->thread.fpu.hard.fp_regs[srcreg];
715 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
716#endif
717 }
718 break;
719 default:
720 printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
721 width_shift, (unsigned long) regs->pc);
722 break;
723 }
724
725 *(__u32*) &buffer = buflo;
726 *(1 + (__u32*) &buffer) = bufhi;
727 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
728 return -1; /* fault */
729 }
730 return 0;
731 } else {
732 die ("Misaligned FPU load inside kernel", regs, 0);
733 return -1;
734 }
735}
736#endif
737
738static int misaligned_fixup(struct pt_regs *regs)
739{
740 unsigned long opcode;
741 int error;
742 int major, minor;
743
744#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
745 /* Never fixup user mode misaligned accesses without this option enabled. */
746 return -1;
747#else
748 if (!user_mode_unaligned_fixup_enable) return -1;
749#endif
750
751 error = read_opcode(regs->pc, &opcode, user_mode(regs));
752 if (error < 0) {
753 return error;
754 }
755 major = (opcode >> 26) & 0x3f;
756 minor = (opcode >> 16) & 0xf;
757
758#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
759 if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
760 --user_mode_unaligned_fixup_count;
761 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
762 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
763 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
764 } else
765#endif
766 if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
767 --kernel_mode_unaligned_fixup_count;
768 if (in_interrupt()) {
769 printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
770 (__u32)regs->pc, opcode);
771 } else {
772 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
773 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
774 }
775 }
776
777
778 switch (major) {
779 case (0x84>>2): /* LD.W */
780 error = misaligned_load(regs, opcode, 1, 1, 1);
781 break;
782 case (0xb0>>2): /* LD.UW */
783 error = misaligned_load(regs, opcode, 1, 1, 0);
784 break;
785 case (0x88>>2): /* LD.L */
786 error = misaligned_load(regs, opcode, 1, 2, 1);
787 break;
788 case (0x8c>>2): /* LD.Q */
789 error = misaligned_load(regs, opcode, 1, 3, 0);
790 break;
791
792 case (0xa4>>2): /* ST.W */
793 error = misaligned_store(regs, opcode, 1, 1);
794 break;
795 case (0xa8>>2): /* ST.L */
796 error = misaligned_store(regs, opcode, 1, 2);
797 break;
798 case (0xac>>2): /* ST.Q */
799 error = misaligned_store(regs, opcode, 1, 3);
800 break;
801
802 case (0x40>>2): /* indexed loads */
803 switch (minor) {
804 case 0x1: /* LDX.W */
805 error = misaligned_load(regs, opcode, 0, 1, 1);
806 break;
807 case 0x5: /* LDX.UW */
808 error = misaligned_load(regs, opcode, 0, 1, 0);
809 break;
810 case 0x2: /* LDX.L */
811 error = misaligned_load(regs, opcode, 0, 2, 1);
812 break;
813 case 0x3: /* LDX.Q */
814 error = misaligned_load(regs, opcode, 0, 3, 0);
815 break;
816 default:
817 error = -1;
818 break;
819 }
820 break;
821
822 case (0x60>>2): /* indexed stores */
823 switch (minor) {
824 case 0x1: /* STX.W */
825 error = misaligned_store(regs, opcode, 0, 1);
826 break;
827 case 0x2: /* STX.L */
828 error = misaligned_store(regs, opcode, 0, 2);
829 break;
830 case 0x3: /* STX.Q */
831 error = misaligned_store(regs, opcode, 0, 3);
832 break;
833 default:
834 error = -1;
835 break;
836 }
837 break;
838
839#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
840 case (0x94>>2): /* FLD.S */
841 error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
842 break;
843 case (0x98>>2): /* FLD.P */
844 error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
845 break;
846 case (0x9c>>2): /* FLD.D */
847 error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
848 break;
849 case (0x1c>>2): /* floating indexed loads */
850 switch (minor) {
851 case 0x8: /* FLDX.S */
852 error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
853 break;
854 case 0xd: /* FLDX.P */
855 error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
856 break;
857 case 0x9: /* FLDX.D */
858 error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
859 break;
860 default:
861 error = -1;
862 break;
863 }
864 break;
865 case (0xb4>>2): /* FLD.S */
866 error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
867 break;
868 case (0xb8>>2): /* FLD.P */
869 error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
870 break;
871 case (0xbc>>2): /* FLD.D */
872 error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
873 break;
874 case (0x3c>>2): /* floating indexed stores */
875 switch (minor) {
876 case 0x8: /* FSTX.S */
877 error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
878 break;
879 case 0xd: /* FSTX.P */
880 error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
881 break;
882 case 0x9: /* FSTX.D */
883 error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
884 break;
885 default:
886 error = -1;
887 break;
888 }
889 break;
890#endif
891
892 default:
893 /* Fault */
894 error = -1;
895 break;
896 }
897
898 if (error < 0) {
899 return error;
900 } else {
901 regs->pc += 4; /* Skip the instruction that's just been emulated */
902 return 0;
903 }
904
905}
906
907static ctl_table unaligned_table[] = {
908 {
909 .ctl_name = CTL_UNNUMBERED,
910 .procname = "kernel_reports",
911 .data = &kernel_mode_unaligned_fixup_count,
912 .maxlen = sizeof(int),
913 .mode = 0644,
914 .proc_handler = &proc_dointvec
915 },
916#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
917 {
918 .ctl_name = CTL_UNNUMBERED,
919 .procname = "user_reports",
920 .data = &user_mode_unaligned_fixup_count,
921 .maxlen = sizeof(int),
922 .mode = 0644,
923 .proc_handler = &proc_dointvec
924 },
925 {
926 .ctl_name = CTL_UNNUMBERED,
927 .procname = "user_enable",
928 .data = &user_mode_unaligned_fixup_enable,
929 .maxlen = sizeof(int),
930 .mode = 0644,
931 .proc_handler = &proc_dointvec},
932#endif
933 {}
934};
935
936static ctl_table unaligned_root[] = {
937 {
938 .ctl_name = CTL_UNNUMBERED,
939 .procname = "unaligned_fixup",
940 .mode = 0555,
941 unaligned_table
942 },
943 {}
944};
945
946static ctl_table sh64_root[] = {
947 {
948 .ctl_name = CTL_UNNUMBERED,
949 .procname = "sh64",
950 .mode = 0555,
951 .child = unaligned_root
952 },
953 {}
954};
955static struct ctl_table_header *sysctl_header;
956static int __init init_sysctl(void)
957{
958 sysctl_header = register_sysctl_table(sh64_root);
959 return 0;
960}
961
962__initcall(init_sysctl);
963
964
965asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
966{
967 u64 peek_real_address_q(u64 addr);
968 u64 poke_real_address_q(u64 addr, u64 val);
969 unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
970 unsigned long long exp_cause;
971 /* It's not worth ioremapping the debug module registers for the amount
972 of access we make to them - just go direct to their physical
973 addresses. */
974 exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
975 if (exp_cause & ~4) {
976 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
977 (unsigned long)(exp_cause & 0xffffffff));
978 }
979 show_state();
980 /* Clear all DEBUGINT causes */
981 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
982}
diff --git a/arch/sh64/kernel/unwind.c b/arch/sh64/kernel/unwind.c
deleted file mode 100644
index 1214c78e3584..000000000000
--- a/arch/sh64/kernel/unwind.c
+++ /dev/null
@@ -1,326 +0,0 @@
1/*
2 * arch/sh64/kernel/unwind.c
3 *
4 * Copyright (C) 2004 Paul Mundt
5 * Copyright (C) 2004 Richard Curnow
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/kallsyms.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17#include <asm/processor.h>
18#include <asm/io.h>
19
20static u8 regcache[63];
21
22/*
23 * Finding the previous stack frame isn't horribly straightforward as it is
24 * on some other platforms. In the sh64 case, we don't have "linked" stack
25 * frames, so we need to do a bit of work to determine the previous frame,
26 * and in turn, the previous r14/r18 pair.
27 *
28 * There are generally a few cases which determine where we can find out
29 * the r14/r18 values. In the general case, this can be determined by poking
30 * around the prologue of the symbol PC is in (note that we absolutely must
31 * have frame pointer support as well as the kernel symbol table mapped,
32 * otherwise we can't even get this far).
33 *
34 * In other cases, such as the interrupt/exception path, we can poke around
35 * the sp/fp.
36 *
37 * Notably, this entire approach is somewhat error prone, and in the event
38 * that the previous frame cannot be determined, that's all we can do.
39 * Either way, this still leaves us with a more correct backtrace then what
40 * we would be able to come up with by walking the stack (which is garbage
41 * for anything beyond the first frame).
42 * -- PFM.
43 */
44static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
45 unsigned long *pprev_fp, unsigned long *pprev_pc,
46 struct pt_regs *regs)
47{
48 const char *sym;
49 char namebuf[128];
50 unsigned long offset;
51 unsigned long prologue = 0;
52 unsigned long fp_displacement = 0;
53 unsigned long fp_prev = 0;
54 unsigned long offset_r14 = 0, offset_r18 = 0;
55 int i, found_prologue_end = 0;
56
57 sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
58 if (!sym)
59 return -EINVAL;
60
61 prologue = pc - offset;
62 if (!prologue)
63 return -EINVAL;
64
65 /* Validate fp, to avoid risk of dereferencing a bad pointer later.
66 Assume 128Mb since that's the amount of RAM on a Cayman. Modify
67 when there is an SH-5 board with more. */
68 if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
69 (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
70 ((fp & 7) != 0)) {
71 return -EINVAL;
72 }
73
74 /*
75 * Depth to walk, depth is completely arbitrary.
76 */
77 for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
78 unsigned long op;
79 u8 major, minor;
80 u8 src, dest, disp;
81
82 op = *(unsigned long *)prologue;
83
84 major = (op >> 26) & 0x3f;
85 src = (op >> 20) & 0x3f;
86 minor = (op >> 16) & 0xf;
87 disp = (op >> 10) & 0x3f;
88 dest = (op >> 4) & 0x3f;
89
90 /*
91 * Stack frame creation happens in a number of ways.. in the
92 * general case when the stack frame is less than 511 bytes,
93 * it's generally created by an addi or addi.l:
94 *
95 * addi/addi.l r15, -FRAME_SIZE, r15
96 *
97 * in the event that the frame size is bigger than this, it's
98 * typically created using a movi/sub pair as follows:
99 *
100 * movi FRAME_SIZE, rX
101 * sub r15, rX, r15
102 */
103
104 switch (major) {
105 case (0x00 >> 2):
106 switch (minor) {
107 case 0x8: /* add.l */
108 case 0x9: /* add */
109 /* Look for r15, r63, r14 */
110 if (src == 15 && disp == 63 && dest == 14)
111 found_prologue_end = 1;
112
113 break;
114 case 0xa: /* sub.l */
115 case 0xb: /* sub */
116 if (src != 15 || dest != 15)
117 continue;
118
119 fp_displacement -= regcache[disp];
120 fp_prev = fp - fp_displacement;
121 break;
122 }
123 break;
124 case (0xa8 >> 2): /* st.l */
125 if (src != 15)
126 continue;
127
128 switch (dest) {
129 case 14:
130 if (offset_r14 || fp_displacement == 0)
131 continue;
132
133 offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
134 offset_r14 *= sizeof(unsigned long);
135 offset_r14 += fp_displacement;
136 break;
137 case 18:
138 if (offset_r18 || fp_displacement == 0)
139 continue;
140
141 offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
142 offset_r18 *= sizeof(unsigned long);
143 offset_r18 += fp_displacement;
144 break;
145 }
146
147 break;
148 case (0xcc >> 2): /* movi */
149 if (dest >= 63) {
150 printk(KERN_NOTICE "%s: Invalid dest reg %d "
151 "specified in movi handler. Failed "
152 "opcode was 0x%lx: ", __FUNCTION__,
153 dest, op);
154
155 continue;
156 }
157
158 /* Sign extend */
159 regcache[dest] =
160 ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
161 break;
162 case (0xd0 >> 2): /* addi */
163 case (0xd4 >> 2): /* addi.l */
164 /* Look for r15, -FRAME_SIZE, r15 */
165 if (src != 15 || dest != 15)
166 continue;
167
168 /* Sign extended frame size.. */
169 fp_displacement +=
170 (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
171 fp_prev = fp - fp_displacement;
172 break;
173 }
174
175 if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
176 break;
177 }
178
179 if (offset_r14 == 0 || fp_prev == 0) {
180 if (!offset_r14)
181 pr_debug("Unable to find r14 offset\n");
182 if (!fp_prev)
183 pr_debug("Unable to find previous fp\n");
184
185 return -EINVAL;
186 }
187
188 /* For innermost leaf function, there might not be a offset_r18 */
189 if (!*pprev_pc && (offset_r18 == 0))
190 return -EINVAL;
191
192 *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
193
194 if (offset_r18)
195 *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
196
197 *pprev_pc &= ~1;
198
199 return 0;
200}
201
202/* Don't put this on the stack since we'll want to call sh64_unwind
203 * when we're close to underflowing the stack anyway. */
204static struct pt_regs here_regs;
205
206extern const char syscall_ret;
207extern const char ret_from_syscall;
208extern const char ret_from_exception;
209extern const char ret_from_irq;
210
211static void sh64_unwind_inner(struct pt_regs *regs);
212
213static void unwind_nested (unsigned long pc, unsigned long fp)
214{
215 if ((fp >= __MEMORY_START) &&
216 ((fp & 7) == 0)) {
217 sh64_unwind_inner((struct pt_regs *) fp);
218 }
219}
220
221static void sh64_unwind_inner(struct pt_regs *regs)
222{
223 unsigned long pc, fp;
224 int ofs = 0;
225 int first_pass;
226
227 pc = regs->pc & ~1;
228 fp = regs->regs[14];
229
230 first_pass = 1;
231 for (;;) {
232 int cond;
233 unsigned long next_fp, next_pc;
234
235 if (pc == ((unsigned long) &syscall_ret & ~1)) {
236 printk("SYSCALL\n");
237 unwind_nested(pc,fp);
238 return;
239 }
240
241 if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
242 printk("SYSCALL (PREEMPTED)\n");
243 unwind_nested(pc,fp);
244 return;
245 }
246
247 /* In this case, the PC is discovered by lookup_prev_stack_frame but
248 it has 4 taken off it to look like the 'caller' */
249 if (pc == ((unsigned long) &ret_from_exception & ~1)) {
250 printk("EXCEPTION\n");
251 unwind_nested(pc,fp);
252 return;
253 }
254
255 if (pc == ((unsigned long) &ret_from_irq & ~1)) {
256 printk("IRQ\n");
257 unwind_nested(pc,fp);
258 return;
259 }
260
261 cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
262 ((pc & 3) == 0) && ((fp & 7) == 0));
263
264 pc -= ofs;
265
266 printk("[<%08lx>] ", pc);
267 print_symbol("%s\n", pc);
268
269 if (first_pass) {
270 /* If the innermost frame is a leaf function, it's
271 * possible that r18 is never saved out to the stack.
272 */
273 next_pc = regs->regs[18];
274 } else {
275 next_pc = 0;
276 }
277
278 if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
279 ofs = sizeof(unsigned long);
280 pc = next_pc & ~1;
281 fp = next_fp;
282 } else {
283 printk("Unable to lookup previous stack frame\n");
284 break;
285 }
286 first_pass = 0;
287 }
288
289 printk("\n");
290
291}
292
293void sh64_unwind(struct pt_regs *regs)
294{
295 if (!regs) {
296 /*
297 * Fetch current regs if we have no other saved state to back
298 * trace from.
299 */
300 regs = &here_regs;
301
302 __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
303 __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
304 __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
305
306 __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
307 __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
308 __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
309 __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
310 __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
311 __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
312 __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
313 __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
314
315 __asm__ __volatile__ (
316 "pta 0f, tr0\n\t"
317 "blink tr0, %0\n\t"
318 "0: nop"
319 : "=r" (regs->pc)
320 );
321 }
322
323 printk("\nCall Trace:\n");
324 sh64_unwind_inner(regs);
325}
326
diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S
deleted file mode 100644
index f533a064da5f..000000000000
--- a/arch/sh64/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,140 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh5/vmlinux.lds.S
7 *
8 * ld script to make ST50 Linux kernel
9 *
10 * Copyright (C) 2000, 2001 Paolo Alberelli
11 *
12 * benedict.gaster@superh.com: 2nd May 2002
13 * Add definition of empty_zero_page to be the first page of kernel image.
14 *
15 * benedict.gaster@superh.com: 3rd May 2002
16 * Added support for ramdisk, removing statically linked romfs at the same time.
17 *
18 * lethal@linux-sh.org: 9th May 2003
19 * Kill off GLOBAL_NAME() usage and other CDC-isms.
20 *
21 * lethal@linux-sh.org: 19th May 2003
22 * Remove support for ancient toolchains.
23 */
24
25#include <asm/page.h>
26#include <asm/cache.h>
27#include <asm/processor.h>
28#include <asm/thread_info.h>
29
30#define LOAD_OFFSET CONFIG_CACHED_MEMORY_OFFSET
31#include <asm-generic/vmlinux.lds.h>
32
33OUTPUT_ARCH(sh:sh5)
34
35#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
36
37ENTRY(__start)
38SECTIONS
39{
40 . = CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
41 _text = .; /* Text and read-only data */
42 text = .; /* Text and read-only data */
43
44 .empty_zero_page : C_PHYS(.empty_zero_page) {
45 *(.empty_zero_page)
46 } = 0
47
48 .text : C_PHYS(.text) {
49 *(.text.head)
50 TEXT_TEXT
51 *(.text64)
52 *(.text..SHmedia32)
53 SCHED_TEXT
54 LOCK_TEXT
55 *(.fixup)
56 *(.gnu.warning)
57#ifdef CONFIG_LITTLE_ENDIAN
58 } = 0x6ff0fff0
59#else
60 } = 0xf0fff06f
61#endif
62
63 /* We likely want __ex_table to be Cache Line aligned */
64 . = ALIGN(L1_CACHE_BYTES); /* Exception table */
65 __start___ex_table = .;
66 __ex_table : C_PHYS(__ex_table) { *(__ex_table) }
67 __stop___ex_table = .;
68
69 _etext = .; /* End of text section */
70
71 NOTES
72
73 RODATA
74
75 .data : C_PHYS(.data) { /* Data */
76 DATA_DATA
77 CONSTRUCTORS
78 }
79
80 . = ALIGN(PAGE_SIZE);
81 .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) }
82
83 PERCPU(PAGE_SIZE)
84
85 . = ALIGN(L1_CACHE_BYTES);
86 .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
87
88 _edata = .; /* End of data section */
89
90 . = ALIGN(THREAD_SIZE); /* init_task: structure size aligned */
91 .data.init_task : C_PHYS(.data.init_task) { *(.data.init_task) }
92
93 . = ALIGN(PAGE_SIZE); /* Init code and data */
94 __init_begin = .;
95 _sinittext = .;
96 .init.text : C_PHYS(.init.text) { *(.init.text) }
97 _einittext = .;
98 .init.data : C_PHYS(.init.data) { *(.init.data) }
99 . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */
100 __setup_start = .;
101 .init.setup : C_PHYS(.init.setup) { *(.init.setup) }
102 __setup_end = .;
103 __initcall_start = .;
104 .initcall.init : C_PHYS(.initcall.init) {
105 INITCALLS
106 }
107 __initcall_end = .;
108 __con_initcall_start = .;
109 .con_initcall.init : C_PHYS(.con_initcall.init) { *(.con_initcall.init) }
110 __con_initcall_end = .;
111 SECURITY_INIT
112
113#ifdef CONFIG_BLK_DEV_INITRD
114 __initramfs_start = .;
115 .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
116 __initramfs_end = .;
117#endif
118
119 . = ALIGN(PAGE_SIZE);
120 __init_end = .;
121
122 /* Align to the biggest single data representation, head and tail */
123 . = ALIGN(8);
124 __bss_start = .; /* BSS */
125 .bss : C_PHYS(.bss) {
126 *(.bss)
127 }
128 . = ALIGN(8);
129 _end = . ;
130
131 /* Sections to be discarded */
132 /DISCARD/ : {
133 *(.exit.text)
134 *(.exit.data)
135 *(.exitcall.exit)
136 }
137
138 STABS_DEBUG
139 DWARF_DEBUG
140}