aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile30
-rw-r--r--arch/sh/kernel/Makefile_3226
-rw-r--r--arch/sh/kernel/Makefile_6422
-rw-r--r--arch/sh/kernel/cpu/Makefile6
-rw-r--r--arch/sh/kernel/cpu/init.c74
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile4
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c257
-rw-r--r--arch/sh/kernel/cpu/irq/intc.c31
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S19
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile4
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c89
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c633
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c22
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c319
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7712.c71
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S24
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S2
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c10
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c11
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c16
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c78
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c537
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c18
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c13
-rw-r--r--arch/sh/kernel/cpu/sh4/softfloat.c892
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c126
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c10
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c390
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c19
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c14
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile7
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2101
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c166
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S198
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c326
-rw-r--r--arch/sh/kernel/dump_task.c31
-rw-r--r--arch/sh/kernel/early_printk.c12
-rw-r--r--arch/sh/kernel/entry-common.S27
-rw-r--r--arch/sh/kernel/head_32.S (renamed from arch/sh/kernel/head.S)6
-rw-r--r--arch/sh/kernel/head_64.S356
-rw-r--r--arch/sh/kernel/init_task.c4
-rw-r--r--arch/sh/kernel/io.c67
-rw-r--r--arch/sh/kernel/module.c62
-rw-r--r--arch/sh/kernel/process_32.c (renamed from arch/sh/kernel/process.c)93
-rw-r--r--arch/sh/kernel/process_64.c701
-rw-r--r--arch/sh/kernel/ptrace_32.c (renamed from arch/sh/kernel/ptrace.c)21
-rw-r--r--arch/sh/kernel/ptrace_64.c341
-rw-r--r--arch/sh/kernel/setup.c39
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c (renamed from arch/sh/kernel/sh_ksyms.c)0
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c55
-rw-r--r--arch/sh/kernel/signal_32.c (renamed from arch/sh/kernel/signal.c)18
-rw-r--r--arch/sh/kernel/signal_64.c751
-rw-r--r--arch/sh/kernel/sys_sh.c100
-rw-r--r--arch/sh/kernel/sys_sh32.c84
-rw-r--r--arch/sh/kernel/sys_sh64.c66
-rw-r--r--arch/sh/kernel/syscalls_32.S (renamed from arch/sh/kernel/syscalls.S)0
-rw-r--r--arch/sh/kernel/syscalls_64.S381
-rw-r--r--arch/sh/kernel/time_32.c (renamed from arch/sh/kernel/time.c)0
-rw-r--r--arch/sh/kernel/time_64.c519
-rw-r--r--arch/sh/kernel/timers/timer-cmt.c4
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c1
-rw-r--r--arch/sh/kernel/traps.c949
-rw-r--r--arch/sh/kernel/traps_32.c919
-rw-r--r--arch/sh/kernel/traps_64.c975
-rw-r--r--arch/sh/kernel/vmlinux.lds.S139
-rw-r--r--arch/sh/kernel/vmlinux_32.lds.S152
-rw-r--r--arch/sh/kernel/vmlinux_64.lds.S164
76 files changed, 11952 insertions, 1760 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 4b81d9c47b00..349d833deab5 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -1,25 +1,5 @@
1# 1ifeq ($(CONFIG_SUPERH32),y)
2# Makefile for the Linux/SuperH kernel. 2include ${srctree}/arch/sh/kernel/Makefile_32
3# 3else
4 4include ${srctree}/arch/sh/kernel/Makefile_64
5extra-y := head.o init_task.o vmlinux.lds 5endif
6
7obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process.o ptrace.o \
8 semaphore.o setup.o signal.o sys_sh.o syscalls.o \
9 time.o topology.o traps.o
10
11obj-y += cpu/ timers/
12obj-$(CONFIG_VSYSCALL) += vsyscall/
13obj-$(CONFIG_SMP) += smp.o
14obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
15obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
16obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
17obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
18obj-$(CONFIG_MODULES) += sh_ksyms.o module.o
19obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
20obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
21obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
22obj-$(CONFIG_PM) += pm.o
23obj-$(CONFIG_STACKTRACE) += stacktrace.o
24
25EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
new file mode 100644
index 000000000000..c89289831053
--- /dev/null
+++ b/arch/sh/kernel/Makefile_32
@@ -0,0 +1,26 @@
1#
2# Makefile for the Linux/SuperH kernel.
3#
4
5extra-y := head_32.o init_task.o vmlinux.lds
6
7obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
8 ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \
9 syscalls_32.o time_32.o topology.o traps.o traps_32.o
10
11obj-y += cpu/ timers/
12obj-$(CONFIG_VSYSCALL) += vsyscall/
13obj-$(CONFIG_SMP) += smp.o
14obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
15obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
16obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
17obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
18obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
19obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
20obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
21obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
22obj-$(CONFIG_PM) += pm.o
23obj-$(CONFIG_STACKTRACE) += stacktrace.o
24obj-$(CONFIG_BINFMT_ELF) += dump_task.o
25
26EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
new file mode 100644
index 000000000000..1ef21cc087f3
--- /dev/null
+++ b/arch/sh/kernel/Makefile_64
@@ -0,0 +1,22 @@
1extra-y := head_64.o init_task.o vmlinux.lds
2
3obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
4 ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \
5 syscalls_64.o time_64.o topology.o traps.o traps_64.o
6
7obj-y += cpu/ timers/
8obj-$(CONFIG_VSYSCALL) += vsyscall/
9obj-$(CONFIG_SMP) += smp.o
10obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
11obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
12obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
13obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
14obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o
15obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
16obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
17obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
18obj-$(CONFIG_PM) += pm.o
19obj-$(CONFIG_STACKTRACE) += stacktrace.o
20obj-$(CONFIG_BINFMT_ELF) += dump_task.o
21
22EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index d055a3ea6b4b..f471d242774e 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -6,8 +6,14 @@ obj-$(CONFIG_CPU_SH2) = sh2/
6obj-$(CONFIG_CPU_SH2A) = sh2a/ 6obj-$(CONFIG_CPU_SH2A) = sh2a/
7obj-$(CONFIG_CPU_SH3) = sh3/ 7obj-$(CONFIG_CPU_SH3) = sh3/
8obj-$(CONFIG_CPU_SH4) = sh4/ 8obj-$(CONFIG_CPU_SH4) = sh4/
9obj-$(CONFIG_CPU_SH5) = sh5/
10
11# Special cases for family ancestry.
12
9obj-$(CONFIG_CPU_SH4A) += sh4a/ 13obj-$(CONFIG_CPU_SH4A) += sh4a/
10 14
15# Common interfaces.
16
11obj-$(CONFIG_UBC_WAKEUP) += ubc.o 17obj-$(CONFIG_UBC_WAKEUP) += ubc.o
12obj-$(CONFIG_SH_ADC) += adc.o 18obj-$(CONFIG_SH_ADC) += adc.o
13 19
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index c217c4bf0085..80a31329ead9 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/log2.h>
16#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
@@ -20,9 +21,12 @@
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22#include <asm/cache.h> 23#include <asm/cache.h>
24#include <asm/elf.h>
23#include <asm/io.h> 25#include <asm/io.h>
24#include <asm/ubc.h>
25#include <asm/smp.h> 26#include <asm/smp.h>
27#ifdef CONFIG_SUPERH32
28#include <asm/ubc.h>
29#endif
26 30
27/* 31/*
28 * Generic wrapper for command line arguments to disable on-chip 32 * Generic wrapper for command line arguments to disable on-chip
@@ -61,25 +65,12 @@ static void __init speculative_execution_init(void)
61/* 65/*
62 * Generic first-level cache init 66 * Generic first-level cache init
63 */ 67 */
64static void __init cache_init(void) 68#ifdef CONFIG_SUPERH32
69static void __uses_jump_to_uncached cache_init(void)
65{ 70{
66 unsigned long ccr, flags; 71 unsigned long ccr, flags;
67 72
68 /* First setup the rest of the I-cache info */ 73 jump_to_uncached();
69 current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
70 current_cpu_data.icache.linesz;
71
72 current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
73 current_cpu_data.icache.linesz;
74
75 /* And the D-cache too */
76 current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
77 current_cpu_data.dcache.linesz;
78
79 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
80 current_cpu_data.dcache.linesz;
81
82 jump_to_P2();
83 ccr = ctrl_inl(CCR); 74 ccr = ctrl_inl(CCR);
84 75
85 /* 76 /*
@@ -156,7 +147,31 @@ static void __init cache_init(void)
156#endif 147#endif
157 148
158 ctrl_outl(flags, CCR); 149 ctrl_outl(flags, CCR);
159 back_to_P1(); 150 back_to_cached();
151}
152#else
153#define cache_init() do { } while (0)
154#endif
155
156#define CSHAPE(totalsize, linesize, assoc) \
157 ((totalsize & ~0xff) | (linesize << 4) | assoc)
158
159#define CACHE_DESC_SHAPE(desc) \
160 CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
161
162static void detect_cache_shape(void)
163{
164 l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
165
166 if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
167 l1i_cache_shape = l1d_cache_shape;
168 else
169 l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
170
171 if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
172 l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
173 else
174 l2_cache_shape = -1; /* No S-cache */
160} 175}
161 176
162#ifdef CONFIG_SH_DSP 177#ifdef CONFIG_SH_DSP
@@ -228,14 +243,32 @@ asmlinkage void __cpuinit sh_cpu_init(void)
228 if (current_cpu_data.type == CPU_SH_NONE) 243 if (current_cpu_data.type == CPU_SH_NONE)
229 panic("Unknown CPU"); 244 panic("Unknown CPU");
230 245
246 /* First setup the rest of the I-cache info */
247 current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
248 current_cpu_data.icache.linesz;
249
250 current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
251 current_cpu_data.icache.linesz;
252
253 /* And the D-cache too */
254 current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
255 current_cpu_data.dcache.linesz;
256
257 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
258 current_cpu_data.dcache.linesz;
259
231 /* Init the cache */ 260 /* Init the cache */
232 cache_init(); 261 cache_init();
233 262
234 if (raw_smp_processor_id() == 0) 263 if (raw_smp_processor_id() == 0) {
235 shm_align_mask = max_t(unsigned long, 264 shm_align_mask = max_t(unsigned long,
236 current_cpu_data.dcache.way_size - 1, 265 current_cpu_data.dcache.way_size - 1,
237 PAGE_SIZE - 1); 266 PAGE_SIZE - 1);
238 267
268 /* Boot CPU sets the cache shape */
269 detect_cache_shape();
270 }
271
239 /* Disable the FPU */ 272 /* Disable the FPU */
240 if (fpu_disabled) { 273 if (fpu_disabled) {
241 printk("FPU Disabled\n"); 274 printk("FPU Disabled\n");
@@ -273,7 +306,10 @@ asmlinkage void __cpuinit sh_cpu_init(void)
273 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. 306 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
274 * we wake it up and hope that all is well. 307 * we wake it up and hope that all is well.
275 */ 308 */
309#ifdef CONFIG_SUPERH32
276 if (raw_smp_processor_id() == 0) 310 if (raw_smp_processor_id() == 0)
277 ubc_wakeup(); 311 ubc_wakeup();
312#endif
313
278 speculative_execution_init(); 314 speculative_execution_init();
279} 315}
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 8da8e178f09c..cc1836e47a5d 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,7 +1,9 @@
1# 1#
2# Makefile for the Linux/SuperH CPU-specifc IRQ handlers. 2# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
3# 3#
4obj-y += imask.o intc.o 4obj-y += intc.o
5 5
6obj-$(CONFIG_SUPERH32) += imask.o
7obj-$(CONFIG_CPU_SH5) += intc-sh5.o
6obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o 8obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
7obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o 9obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
new file mode 100644
index 000000000000..43ee7a9a4f0b
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -0,0 +1,257 @@
1/*
2 * arch/sh/kernel/cpu/irq/intc-sh5.c
3 *
4 * Interrupt Controller support for SH5 INTC.
5 *
6 * Copyright (C) 2000, 2001 Paolo Alberelli
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * Per-interrupt selective. IRLM=0 (Fixed priority) is not
10 * supported being useless without a cascaded interrupt
11 * controller.
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/bitops.h>
23#include <asm/cpu/irq.h>
24#include <asm/page.h>
25
26/*
27 * Maybe the generic Peripheral block could move to a more
28 * generic include file. INTC Block will be defined here
29 * and only here to make INTC self-contained in a single
30 * file.
31 */
32#define INTC_BLOCK_OFFSET 0x01000000
33
34/* Base */
35#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
36 INTC_BLOCK_OFFSET
37
38/* Address */
39#define INTC_ICR_SET (intc_virt + 0x0)
40#define INTC_ICR_CLEAR (intc_virt + 0x8)
41#define INTC_INTPRI_0 (intc_virt + 0x10)
42#define INTC_INTSRC_0 (intc_virt + 0x50)
43#define INTC_INTSRC_1 (intc_virt + 0x58)
44#define INTC_INTREQ_0 (intc_virt + 0x60)
45#define INTC_INTREQ_1 (intc_virt + 0x68)
46#define INTC_INTENB_0 (intc_virt + 0x70)
47#define INTC_INTENB_1 (intc_virt + 0x78)
48#define INTC_INTDSB_0 (intc_virt + 0x80)
49#define INTC_INTDSB_1 (intc_virt + 0x88)
50
51#define INTC_ICR_IRLM 0x1
52#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
53#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
54
55
56/*
57 * Mapper between the vector ordinal and the IRQ number
58 * passed to kernel/device drivers.
59 */
60int intc_evt_to_irq[(0xE20/0x20)+1] = {
61 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
62 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
63 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
64 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
65 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
66 -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
67 -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
68 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
69 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
70 -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
71 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
72 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
73 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
74 -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
75 -1, -1 /* 0xE00 - 0xE20 */
76};
77
78/*
79 * Opposite mapper.
80 */
81static int IRQ_to_vectorN[NR_INTC_IRQS] = {
82 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
83 -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
84 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
85 -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
86 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
87 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
88 -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
89 -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
90
91};
92
93static unsigned long intc_virt;
94
95static unsigned int startup_intc_irq(unsigned int irq);
96static void shutdown_intc_irq(unsigned int irq);
97static void enable_intc_irq(unsigned int irq);
98static void disable_intc_irq(unsigned int irq);
99static void mask_and_ack_intc(unsigned int);
100static void end_intc_irq(unsigned int irq);
101
102static struct hw_interrupt_type intc_irq_type = {
103 .typename = "INTC",
104 .startup = startup_intc_irq,
105 .shutdown = shutdown_intc_irq,
106 .enable = enable_intc_irq,
107 .disable = disable_intc_irq,
108 .ack = mask_and_ack_intc,
109 .end = end_intc_irq
110};
111
112static int irlm; /* IRL mode */
113
114static unsigned int startup_intc_irq(unsigned int irq)
115{
116 enable_intc_irq(irq);
117 return 0; /* never anything pending */
118}
119
120static void shutdown_intc_irq(unsigned int irq)
121{
122 disable_intc_irq(irq);
123}
124
125static void enable_intc_irq(unsigned int irq)
126{
127 unsigned long reg;
128 unsigned long bitmask;
129
130 if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
131 printk("Trying to use straight IRL0-3 with an encoding platform.\n");
132
133 if (irq < 32) {
134 reg = INTC_INTENB_0;
135 bitmask = 1 << irq;
136 } else {
137 reg = INTC_INTENB_1;
138 bitmask = 1 << (irq - 32);
139 }
140
141 ctrl_outl(bitmask, reg);
142}
143
144static void disable_intc_irq(unsigned int irq)
145{
146 unsigned long reg;
147 unsigned long bitmask;
148
149 if (irq < 32) {
150 reg = INTC_INTDSB_0;
151 bitmask = 1 << irq;
152 } else {
153 reg = INTC_INTDSB_1;
154 bitmask = 1 << (irq - 32);
155 }
156
157 ctrl_outl(bitmask, reg);
158}
159
160static void mask_and_ack_intc(unsigned int irq)
161{
162 disable_intc_irq(irq);
163}
164
165static void end_intc_irq(unsigned int irq)
166{
167 enable_intc_irq(irq);
168}
169
170/* For future use, if we ever support IRLM=0) */
171void make_intc_irq(unsigned int irq)
172{
173 disable_irq_nosync(irq);
174 irq_desc[irq].chip = &intc_irq_type;
175 disable_intc_irq(irq);
176}
177
178#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
179int intc_irq_describe(char* p, int irq)
180{
181 if (irq < NR_INTC_IRQS)
182 return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
183 else
184 return 0;
185}
186#endif
187
188void __init plat_irq_setup(void)
189{
190 unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
191 unsigned long reg;
192 unsigned long data;
193 int i;
194
195 intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
196 if (!intc_virt) {
197 panic("Unable to remap INTC\n");
198 }
199
200
201 /* Set default: per-line enable/disable, priority driven ack/eoi */
202 for (i = 0; i < NR_INTC_IRQS; i++) {
203 if (platform_int_priority[i] != NO_PRIORITY) {
204 irq_desc[i].chip = &intc_irq_type;
205 }
206 }
207
208
209 /* Disable all interrupts and set all priorities to 0 to avoid trouble */
210 ctrl_outl(-1, INTC_INTDSB_0);
211 ctrl_outl(-1, INTC_INTDSB_1);
212
213 for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
214 ctrl_outl( NO_PRIORITY, reg);
215
216
217 /* Set IRLM */
218 /* If all the priorities are set to 'no priority', then
219 * assume we are using encoded mode.
220 */
221 irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
222 platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
223
224 if (irlm == NO_PRIORITY) {
225 /* IRLM = 0 */
226 reg = INTC_ICR_CLEAR;
227 i = IRQ_INTA;
228 printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
229 } else {
230 /* IRLM = 1 */
231 reg = INTC_ICR_SET;
232 i = IRQ_IRL0;
233 }
234 ctrl_outl(INTC_ICR_IRLM, reg);
235
236 /* Set interrupt priorities according to platform description */
237 for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
238 data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
239 if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
240 /* Upon the 7th, set Priority Register */
241 ctrl_outl(data, reg);
242 data = 0;
243 reg += 8;
244 }
245 }
246
247 /*
248 * And now let interrupts come in.
249 * sti() is not enough, we need to
250 * lower priority, too.
251 */
252 __asm__ __volatile__("getcon " __SR ", %0\n\t"
253 "and %0, %1, %0\n\t"
254 "putcon %0, " __SR "\n\t"
255 : "=&r" (__dummy0)
256 : "r" (__dummy1));
257}
diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c
index 6ac018c15e03..84806b2027f8 100644
--- a/arch/sh/kernel/cpu/irq/intc.c
+++ b/arch/sh/kernel/cpu/irq/intc.c
@@ -335,31 +335,6 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
335 return 0; 335 return 0;
336} 336}
337 337
338static unsigned int __init intc_prio_value(struct intc_desc *desc,
339 intc_enum enum_id, int do_grps)
340{
341 struct intc_prio *p = desc->priorities;
342 unsigned int i;
343
344 for (i = 0; p && enum_id && i < desc->nr_priorities; i++) {
345 p = desc->priorities + i;
346
347 if (p->enum_id != enum_id)
348 continue;
349
350 return p->priority;
351 }
352
353 if (do_grps)
354 return intc_prio_value(desc, intc_grp_id(desc, enum_id), 0);
355
356 /* default to the lowest priority possible if no priority is set
357 * - this needs to be at least 2 for 5-bit priorities on 7780
358 */
359
360 return 2;
361}
362
363static unsigned int __init intc_mask_data(struct intc_desc *desc, 338static unsigned int __init intc_mask_data(struct intc_desc *desc,
364 struct intc_desc_int *d, 339 struct intc_desc_int *d,
365 intc_enum enum_id, int do_grps) 340 intc_enum enum_id, int do_grps)
@@ -518,8 +493,10 @@ static void __init intc_register_irq(struct intc_desc *desc,
518 handle_level_irq, "level"); 493 handle_level_irq, "level");
519 set_irq_chip_data(irq, (void *)data[primary]); 494 set_irq_chip_data(irq, (void *)data[primary]);
520 495
521 /* record the desired priority level */ 496 /* set priority level
522 intc_prio_level[irq] = intc_prio_value(desc, enum_id, 1); 497 * - this needs to be at least 2 for 5-bit priorities on 7780
498 */
499 intc_prio_level[irq] = 2;
523 500
524 /* enable secondary masking method if present */ 501 /* enable secondary masking method if present */
525 if (data[!primary]) 502 if (data[!primary])
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index ee8f1fe84b08..7a26569e7956 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -149,6 +149,14 @@ ENTRY(exception_handler)
149 mov #32,r8 149 mov #32,r8
150 cmp/hs r8,r9 150 cmp/hs r8,r9
151 bt trap_entry ! 64 > vec >= 32 is trap 151 bt trap_entry ! 64 > vec >= 32 is trap
152
153#if defined(CONFIG_SH_FPU)
154 mov #13,r8
155 cmp/eq r8,r9
156 bt 10f ! fpu
157 nop
158#endif
159
152 mov.l 4f,r8 160 mov.l 4f,r8
153 mov r9,r4 161 mov r9,r4
154 shll2 r9 162 shll2 r9
@@ -158,6 +166,10 @@ ENTRY(exception_handler)
158 cmp/eq r9,r8 166 cmp/eq r9,r8
159 bf 3f 167 bf 3f
160 mov.l 8f,r8 ! unhandled exception 168 mov.l 8f,r8 ! unhandled exception
169#if defined(CONFIG_SH_FPU)
17010:
171 mov.l 9f, r8 ! unhandled exception
172#endif
1613: 1733:
162 mov.l 5f,r10 174 mov.l 5f,r10
163 jmp @r8 175 jmp @r8
@@ -177,7 +189,10 @@ interrupt_entry:
1776: .long ret_from_irq 1896: .long ret_from_irq
1787: .long do_IRQ 1907: .long do_IRQ
1798: .long do_exception_error 1918: .long do_exception_error
180 192#ifdef CONFIG_SH_FPU
1939: .long fpu_error_trap_handler
194#endif
195
181trap_entry: 196trap_entry:
182 mov #0x30,r8 197 mov #0x30,r8
183 cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall 198 cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall
@@ -250,7 +265,7 @@ ENTRY(sh_bios_handler)
2501: .long gdb_vbr_vector 2651: .long gdb_vbr_vector
251#endif /* CONFIG_SH_STANDARD_BIOS */ 266#endif /* CONFIG_SH_STANDARD_BIOS */
252 267
253ENTRY(address_error_handler) 268ENTRY(address_error_trap_handler)
254 mov r15,r4 ! regs 269 mov r15,r4 ! regs
255 add #4,r4 270 add #4,r4
256 mov #OFF_PC,r0 271 mov #OFF_PC,r0
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index ec6adc3f306f..b230eb278cef 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -65,7 +65,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
65}; 65};
66 66
67static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups, 67static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups,
68 NULL, NULL, prio_registers, NULL); 68 NULL, prio_registers, NULL);
69 69
70static struct plat_sci_port sci_platform_data[] = { 70static struct plat_sci_port sci_platform_data[] = {
71 { 71 {
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index 965fa2572b23..b279cdc3a233 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -6,4 +6,8 @@ obj-y := common.o probe.o opcode_helper.o
6 6
7common-y += $(addprefix ../sh2/, ex.o entry.o) 7common-y += $(addprefix ../sh2/, ex.o entry.o)
8 8
9obj-$(CONFIG_SH_FPU) += fpu.o
10
9obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o 11obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
12obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
13obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
new file mode 100644
index 000000000000..3feb95a4fcbc
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -0,0 +1,89 @@
1/*
2 * arch/sh/kernel/cpu/sh2a/clock-sh7203.c
3 *
4 * SH7203 support for the clock framework
5 *
6 * Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd)
7 *
8 * Based on clock-sh7263.c
9 * Copyright (C) 2006 Yoshinori Sato
10 *
11 * Based on clock-sh4.c
12 * Copyright (C) 2005 Paul Mundt
13 *
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
17 */
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <asm/clock.h>
21#include <asm/freq.h>
22#include <asm/io.h>
23
24const static int pll1rate[]={8,12,16,0};
25const static int pfc_divisors[]={1,2,3,4,6,8,12};
26#define ifc_divisors pfc_divisors
27
28#if (CONFIG_SH_CLK_MD == 0)
29#define PLL2 (1)
30#elif (CONFIG_SH_CLK_MD == 1)
31#define PLL2 (2)
32#elif (CONFIG_SH_CLK_MD == 2)
33#define PLL2 (4)
34#elif (CONFIG_SH_CLK_MD == 3)
35#define PLL2 (4)
36#else
37#error "Illegal Clock Mode!"
38#endif
39
40static void master_clk_init(struct clk *clk)
41{
42 clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
43}
44
45static struct clk_ops sh7203_master_clk_ops = {
46 .init = master_clk_init,
47};
48
49static void module_clk_recalc(struct clk *clk)
50{
51 int idx = (ctrl_inw(FREQCR) & 0x0007);
52 clk->rate = clk->parent->rate / pfc_divisors[idx];
53}
54
55static struct clk_ops sh7203_module_clk_ops = {
56 .recalc = module_clk_recalc,
57};
58
59static void bus_clk_recalc(struct clk *clk)
60{
61 int idx = (ctrl_inw(FREQCR) & 0x0007);
62 clk->rate = clk->parent->rate / pfc_divisors[idx-2];
63}
64
65static struct clk_ops sh7203_bus_clk_ops = {
66 .recalc = bus_clk_recalc,
67};
68
69static void cpu_clk_recalc(struct clk *clk)
70{
71 clk->rate = clk->parent->rate;
72}
73
74static struct clk_ops sh7203_cpu_clk_ops = {
75 .recalc = cpu_clk_recalc,
76};
77
78static struct clk_ops *sh7203_clk_ops[] = {
79 &sh7203_master_clk_ops,
80 &sh7203_module_clk_ops,
81 &sh7203_bus_clk_ops,
82 &sh7203_cpu_clk_ops,
83};
84
85void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
86{
87 if (idx < ARRAY_SIZE(sh7203_clk_ops))
88 *ops = sh7203_clk_ops[idx];
89}
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
new file mode 100644
index 000000000000..ff99562456fb
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -0,0 +1,633 @@
1/*
2 * Save/restore floating point context for signal handlers.
3 *
4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * FIXME! These routines can be optimized in big endian case.
11 */
12#include <linux/sched.h>
13#include <linux/signal.h>
14#include <asm/processor.h>
15#include <asm/io.h>
16
17/* The PR (precision) bit in the FP Status Register must be clear when
18 * an frchg instruction is executed, otherwise the instruction is undefined.
19 * Executing frchg with PR set causes a trap on some SH4 implementations.
20 */
21
22#define FPSCR_RCHG 0x00000000
23
24
25/*
26 * Save FPU registers onto task structure.
27 * Assume called with FPU enabled (SR.FD=0).
28 */
29void
30save_fpu(struct task_struct *tsk, struct pt_regs *regs)
31{
32 unsigned long dummy;
33
34 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
35 enable_fpu();
36 asm volatile("sts.l fpul, @-%0\n\t"
37 "sts.l fpscr, @-%0\n\t"
38 "fmov.s fr15, @-%0\n\t"
39 "fmov.s fr14, @-%0\n\t"
40 "fmov.s fr13, @-%0\n\t"
41 "fmov.s fr12, @-%0\n\t"
42 "fmov.s fr11, @-%0\n\t"
43 "fmov.s fr10, @-%0\n\t"
44 "fmov.s fr9, @-%0\n\t"
45 "fmov.s fr8, @-%0\n\t"
46 "fmov.s fr7, @-%0\n\t"
47 "fmov.s fr6, @-%0\n\t"
48 "fmov.s fr5, @-%0\n\t"
49 "fmov.s fr4, @-%0\n\t"
50 "fmov.s fr3, @-%0\n\t"
51 "fmov.s fr2, @-%0\n\t"
52 "fmov.s fr1, @-%0\n\t"
53 "fmov.s fr0, @-%0\n\t"
54 "lds %3, fpscr\n\t"
55 : "=r" (dummy)
56 : "0" ((char *)(&tsk->thread.fpu.hard.status)),
57 "r" (FPSCR_RCHG),
58 "r" (FPSCR_INIT)
59 : "memory");
60
61 disable_fpu();
62 release_fpu(regs);
63}
64
65static void
66restore_fpu(struct task_struct *tsk)
67{
68 unsigned long dummy;
69
70 enable_fpu();
71 asm volatile("fmov.s @%0+, fr0\n\t"
72 "fmov.s @%0+, fr1\n\t"
73 "fmov.s @%0+, fr2\n\t"
74 "fmov.s @%0+, fr3\n\t"
75 "fmov.s @%0+, fr4\n\t"
76 "fmov.s @%0+, fr5\n\t"
77 "fmov.s @%0+, fr6\n\t"
78 "fmov.s @%0+, fr7\n\t"
79 "fmov.s @%0+, fr8\n\t"
80 "fmov.s @%0+, fr9\n\t"
81 "fmov.s @%0+, fr10\n\t"
82 "fmov.s @%0+, fr11\n\t"
83 "fmov.s @%0+, fr12\n\t"
84 "fmov.s @%0+, fr13\n\t"
85 "fmov.s @%0+, fr14\n\t"
86 "fmov.s @%0+, fr15\n\t"
87 "lds.l @%0+, fpscr\n\t"
88 "lds.l @%0+, fpul\n\t"
89 : "=r" (dummy)
90 : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
91 : "memory");
92 disable_fpu();
93}
94
95/*
96 * Load the FPU with signalling NANS. This bit pattern we're using
97 * has the property that no matter wether considered as single or as
98 * double precission represents signaling NANS.
99 */
100
101static void
102fpu_init(void)
103{
104 enable_fpu();
105 asm volatile("lds %0, fpul\n\t"
106 "fsts fpul, fr0\n\t"
107 "fsts fpul, fr1\n\t"
108 "fsts fpul, fr2\n\t"
109 "fsts fpul, fr3\n\t"
110 "fsts fpul, fr4\n\t"
111 "fsts fpul, fr5\n\t"
112 "fsts fpul, fr6\n\t"
113 "fsts fpul, fr7\n\t"
114 "fsts fpul, fr8\n\t"
115 "fsts fpul, fr9\n\t"
116 "fsts fpul, fr10\n\t"
117 "fsts fpul, fr11\n\t"
118 "fsts fpul, fr12\n\t"
119 "fsts fpul, fr13\n\t"
120 "fsts fpul, fr14\n\t"
121 "fsts fpul, fr15\n\t"
122 "lds %2, fpscr\n\t"
123 : /* no output */
124 : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
125 disable_fpu();
126}
127
128/*
129 * Emulate arithmetic ops on denormalized number for some FPU insns.
130 */
131
132/* denormalized float * float */
133static int denormal_mulf(int hx, int hy)
134{
135 unsigned int ix, iy;
136 unsigned long long m, n;
137 int exp, w;
138
139 ix = hx & 0x7fffffff;
140 iy = hy & 0x7fffffff;
141 if (iy < 0x00800000 || ix == 0)
142 return ((hx ^ hy) & 0x80000000);
143
144 exp = (iy & 0x7f800000) >> 23;
145 ix &= 0x007fffff;
146 iy = (iy & 0x007fffff) | 0x00800000;
147 m = (unsigned long long)ix * iy;
148 n = m;
149 w = -1;
150 while (n) { n >>= 1; w++; }
151
152 /* FIXME: use guard bits */
153 exp += w - 126 - 46;
154 if (exp > 0)
155 ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
156 else if (exp + 22 >= 0)
157 ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
158 else
159 ix = 0;
160
161 ix |= (hx ^ hy) & 0x80000000;
162 return ix;
163}
164
165/* denormalized double * double */
166static void mult64(unsigned long long x, unsigned long long y,
167 unsigned long long *highp, unsigned long long *lowp)
168{
169 unsigned long long sub0, sub1, sub2, sub3;
170 unsigned long long high, low;
171
172 sub0 = (x >> 32) * (unsigned long) (y >> 32);
173 sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
174 sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
175 sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
176 low = sub3;
177 high = 0LL;
178 sub3 += (sub1 << 32);
179 if (low > sub3)
180 high++;
181 low = sub3;
182 sub3 += (sub2 << 32);
183 if (low > sub3)
184 high++;
185 low = sub3;
186 high += (sub1 >> 32) + (sub2 >> 32);
187 high += sub0;
188 *lowp = low;
189 *highp = high;
190}
191
192static inline long long rshift64(unsigned long long mh,
193 unsigned long long ml, int n)
194{
195 if (n >= 64)
196 return mh >> (n - 64);
197 return (mh << (64 - n)) | (ml >> n);
198}
199
200static long long denormal_muld(long long hx, long long hy)
201{
202 unsigned long long ix, iy;
203 unsigned long long mh, ml, nh, nl;
204 int exp, w;
205
206 ix = hx & 0x7fffffffffffffffLL;
207 iy = hy & 0x7fffffffffffffffLL;
208 if (iy < 0x0010000000000000LL || ix == 0)
209 return ((hx ^ hy) & 0x8000000000000000LL);
210
211 exp = (iy & 0x7ff0000000000000LL) >> 52;
212 ix &= 0x000fffffffffffffLL;
213 iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
214 mult64(ix, iy, &mh, &ml);
215 nh = mh;
216 nl = ml;
217 w = -1;
218 if (nh) {
219 while (nh) { nh >>= 1; w++;}
220 w += 64;
221 } else
222 while (nl) { nl >>= 1; w++;}
223
224 /* FIXME: use guard bits */
225 exp += w - 1022 - 52 * 2;
226 if (exp > 0)
227 ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
228 | ((long long)exp << 52);
229 else if (exp + 51 >= 0)
230 ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
231 else
232 ix = 0;
233
234 ix |= (hx ^ hy) & 0x8000000000000000LL;
235 return ix;
236}
237
238/* ix - iy where iy: denormal and ix, iy >= 0 */
239static int denormal_subf1(unsigned int ix, unsigned int iy)
240{
241 int frac;
242 int exp;
243
244 if (ix < 0x00800000)
245 return ix - iy;
246
247 exp = (ix & 0x7f800000) >> 23;
248 if (exp - 1 > 31)
249 return ix;
250 iy >>= exp - 1;
251 if (iy == 0)
252 return ix;
253
254 frac = (ix & 0x007fffff) | 0x00800000;
255 frac -= iy;
256 while (frac < 0x00800000) {
257 if (--exp == 0)
258 return frac;
259 frac <<= 1;
260 }
261
262 return (exp << 23) | (frac & 0x007fffff);
263}
264
265/* ix + iy where iy: denormal and ix, iy >= 0 */
266static int denormal_addf1(unsigned int ix, unsigned int iy)
267{
268 int frac;
269 int exp;
270
271 if (ix < 0x00800000)
272 return ix + iy;
273
274 exp = (ix & 0x7f800000) >> 23;
275 if (exp - 1 > 31)
276 return ix;
277 iy >>= exp - 1;
278 if (iy == 0)
279 return ix;
280
281 frac = (ix & 0x007fffff) | 0x00800000;
282 frac += iy;
283 if (frac >= 0x01000000) {
284 frac >>= 1;
285 ++exp;
286 }
287
288 return (exp << 23) | (frac & 0x007fffff);
289}
290
291static int denormal_addf(int hx, int hy)
292{
293 unsigned int ix, iy;
294 int sign;
295
296 if ((hx ^ hy) & 0x80000000) {
297 sign = hx & 0x80000000;
298 ix = hx & 0x7fffffff;
299 iy = hy & 0x7fffffff;
300 if (iy < 0x00800000) {
301 ix = denormal_subf1(ix, iy);
302 if (ix < 0) {
303 ix = -ix;
304 sign ^= 0x80000000;
305 }
306 } else {
307 ix = denormal_subf1(iy, ix);
308 sign ^= 0x80000000;
309 }
310 } else {
311 sign = hx & 0x80000000;
312 ix = hx & 0x7fffffff;
313 iy = hy & 0x7fffffff;
314 if (iy < 0x00800000)
315 ix = denormal_addf1(ix, iy);
316 else
317 ix = denormal_addf1(iy, ix);
318 }
319
320 return sign | ix;
321}
322
323/* ix - iy where iy: denormal and ix, iy >= 0 */
324static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
325{
326 long long frac;
327 int exp;
328
329 if (ix < 0x0010000000000000LL)
330 return ix - iy;
331
332 exp = (ix & 0x7ff0000000000000LL) >> 52;
333 if (exp - 1 > 63)
334 return ix;
335 iy >>= exp - 1;
336 if (iy == 0)
337 return ix;
338
339 frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
340 frac -= iy;
341 while (frac < 0x0010000000000000LL) {
342 if (--exp == 0)
343 return frac;
344 frac <<= 1;
345 }
346
347 return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
348}
349
350/* ix + iy where iy: denormal and ix, iy >= 0 */
351static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
352{
353 long long frac;
354 long long exp;
355
356 if (ix < 0x0010000000000000LL)
357 return ix + iy;
358
359 exp = (ix & 0x7ff0000000000000LL) >> 52;
360 if (exp - 1 > 63)
361 return ix;
362 iy >>= exp - 1;
363 if (iy == 0)
364 return ix;
365
366 frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
367 frac += iy;
368 if (frac >= 0x0020000000000000LL) {
369 frac >>= 1;
370 ++exp;
371 }
372
373 return (exp << 52) | (frac & 0x000fffffffffffffLL);
374}
375
376static long long denormal_addd(long long hx, long long hy)
377{
378 unsigned long long ix, iy;
379 long long sign;
380
381 if ((hx ^ hy) & 0x8000000000000000LL) {
382 sign = hx & 0x8000000000000000LL;
383 ix = hx & 0x7fffffffffffffffLL;
384 iy = hy & 0x7fffffffffffffffLL;
385 if (iy < 0x0010000000000000LL) {
386 ix = denormal_subd1(ix, iy);
387 if (ix < 0) {
388 ix = -ix;
389 sign ^= 0x8000000000000000LL;
390 }
391 } else {
392 ix = denormal_subd1(iy, ix);
393 sign ^= 0x8000000000000000LL;
394 }
395 } else {
396 sign = hx & 0x8000000000000000LL;
397 ix = hx & 0x7fffffffffffffffLL;
398 iy = hy & 0x7fffffffffffffffLL;
399 if (iy < 0x0010000000000000LL)
400 ix = denormal_addd1(ix, iy);
401 else
402 ix = denormal_addd1(iy, ix);
403 }
404
405 return sign | ix;
406}
407
408/**
409 * denormal_to_double - Given denormalized float number,
410 * store double float
411 *
412 * @fpu: Pointer to sh_fpu_hard structure
413 * @n: Index to FP register
414 */
415static void
416denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
417{
418 unsigned long du, dl;
419 unsigned long x = fpu->fpul;
420 int exp = 1023 - 126;
421
422 if (x != 0 && (x & 0x7f800000) == 0) {
423 du = (x & 0x80000000);
424 while ((x & 0x00800000) == 0) {
425 x <<= 1;
426 exp--;
427 }
428 x &= 0x007fffff;
429 du |= (exp << 20) | (x >> 3);
430 dl = x << 29;
431
432 fpu->fp_regs[n] = du;
433 fpu->fp_regs[n+1] = dl;
434 }
435}
436
437/**
438 * ieee_fpe_handler - Handle denormalized number exception
439 *
440 * @regs: Pointer to register structure
441 *
442 * Returns 1 when it's handled (should not cause exception).
443 */
444static int
445ieee_fpe_handler (struct pt_regs *regs)
446{
447 unsigned short insn = *(unsigned short *) regs->pc;
448 unsigned short finsn;
449 unsigned long nextpc;
450 int nib[4] = {
451 (insn >> 12) & 0xf,
452 (insn >> 8) & 0xf,
453 (insn >> 4) & 0xf,
454 insn & 0xf};
455
456 if (nib[0] == 0xb ||
457 (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
458 regs->pr = regs->pc + 4;
459 if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
460 nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
461 finsn = *(unsigned short *) (regs->pc + 2);
462 } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
463 if (regs->sr & 1)
464 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
465 else
466 nextpc = regs->pc + 4;
467 finsn = *(unsigned short *) (regs->pc + 2);
468 } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
469 if (regs->sr & 1)
470 nextpc = regs->pc + 4;
471 else
472 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
473 finsn = *(unsigned short *) (regs->pc + 2);
474 } else if (nib[0] == 0x4 && nib[3] == 0xb &&
475 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
476 nextpc = regs->regs[nib[1]];
477 finsn = *(unsigned short *) (regs->pc + 2);
478 } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
479 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
480 nextpc = regs->pc + 4 + regs->regs[nib[1]];
481 finsn = *(unsigned short *) (regs->pc + 2);
482 } else if (insn == 0x000b) { /* rts */
483 nextpc = regs->pr;
484 finsn = *(unsigned short *) (regs->pc + 2);
485 } else {
486 nextpc = regs->pc + 2;
487 finsn = insn;
488 }
489
490#define FPSCR_FPU_ERROR (1 << 17)
491
492 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
493 struct task_struct *tsk = current;
494
495 if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
496 /* FPU error */
497 denormal_to_double (&tsk->thread.fpu.hard,
498 (finsn >> 8) & 0xf);
499 } else
500 return 0;
501
502 regs->pc = nextpc;
503 return 1;
504 } else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
505 struct task_struct *tsk = current;
506 int fpscr;
507 int n, m, prec;
508 unsigned int hx, hy;
509
510 n = (finsn >> 8) & 0xf;
511 m = (finsn >> 4) & 0xf;
512 hx = tsk->thread.fpu.hard.fp_regs[n];
513 hy = tsk->thread.fpu.hard.fp_regs[m];
514 fpscr = tsk->thread.fpu.hard.fpscr;
515 prec = fpscr & (1 << 19);
516
517 if ((fpscr & FPSCR_FPU_ERROR)
518 && (prec && ((hx & 0x7fffffff) < 0x00100000
519 || (hy & 0x7fffffff) < 0x00100000))) {
520 long long llx, lly;
521
522 /* FPU error because of denormal */
523 llx = ((long long) hx << 32)
524 | tsk->thread.fpu.hard.fp_regs[n+1];
525 lly = ((long long) hy << 32)
526 | tsk->thread.fpu.hard.fp_regs[m+1];
527 if ((hx & 0x7fffffff) >= 0x00100000)
528 llx = denormal_muld(lly, llx);
529 else
530 llx = denormal_muld(llx, lly);
531 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
532 tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
533 } else if ((fpscr & FPSCR_FPU_ERROR)
534 && (!prec && ((hx & 0x7fffffff) < 0x00800000
535 || (hy & 0x7fffffff) < 0x00800000))) {
536 /* FPU error because of denormal */
537 if ((hx & 0x7fffffff) >= 0x00800000)
538 hx = denormal_mulf(hy, hx);
539 else
540 hx = denormal_mulf(hx, hy);
541 tsk->thread.fpu.hard.fp_regs[n] = hx;
542 } else
543 return 0;
544
545 regs->pc = nextpc;
546 return 1;
547 } else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
548 struct task_struct *tsk = current;
549 int fpscr;
550 int n, m, prec;
551 unsigned int hx, hy;
552
553 n = (finsn >> 8) & 0xf;
554 m = (finsn >> 4) & 0xf;
555 hx = tsk->thread.fpu.hard.fp_regs[n];
556 hy = tsk->thread.fpu.hard.fp_regs[m];
557 fpscr = tsk->thread.fpu.hard.fpscr;
558 prec = fpscr & (1 << 19);
559
560 if ((fpscr & FPSCR_FPU_ERROR)
561 && (prec && ((hx & 0x7fffffff) < 0x00100000
562 || (hy & 0x7fffffff) < 0x00100000))) {
563 long long llx, lly;
564
565 /* FPU error because of denormal */
566 llx = ((long long) hx << 32)
567 | tsk->thread.fpu.hard.fp_regs[n+1];
568 lly = ((long long) hy << 32)
569 | tsk->thread.fpu.hard.fp_regs[m+1];
570 if ((finsn & 0xf00f) == 0xf000)
571 llx = denormal_addd(llx, lly);
572 else
573 llx = denormal_addd(llx, lly ^ (1LL << 63));
574 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
575 tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
576 } else if ((fpscr & FPSCR_FPU_ERROR)
577 && (!prec && ((hx & 0x7fffffff) < 0x00800000
578 || (hy & 0x7fffffff) < 0x00800000))) {
579 /* FPU error because of denormal */
580 if ((finsn & 0xf00f) == 0xf000)
581 hx = denormal_addf(hx, hy);
582 else
583 hx = denormal_addf(hx, hy ^ 0x80000000);
584 tsk->thread.fpu.hard.fp_regs[n] = hx;
585 } else
586 return 0;
587
588 regs->pc = nextpc;
589 return 1;
590 }
591
592 return 0;
593}
594
595BUILD_TRAP_HANDLER(fpu_error)
596{
597 struct task_struct *tsk = current;
598 TRAP_HANDLER_DECL;
599
600 save_fpu(tsk, regs);
601 if (ieee_fpe_handler(regs)) {
602 tsk->thread.fpu.hard.fpscr &=
603 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
604 grab_fpu(regs);
605 restore_fpu(tsk);
606 set_tsk_thread_flag(tsk, TIF_USEDFPU);
607 return;
608 }
609
610 force_sig(SIGFPE, tsk);
611}
612
613BUILD_TRAP_HANDLER(fpu_state_restore)
614{
615 struct task_struct *tsk = current;
616 TRAP_HANDLER_DECL;
617
618 grab_fpu(regs);
619 if (!user_mode(regs)) {
620 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
621 return;
622 }
623
624 if (used_math()) {
625 /* Using the FPU again. */
626 restore_fpu(tsk);
627 } else {
628 /* First time FPU user. */
629 fpu_init();
630 set_used_math();
631 }
632 set_tsk_thread_flag(tsk, TIF_USEDFPU);
633}
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 6d02465704b9..6910e2664468 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -3,25 +3,36 @@
3 * 3 *
4 * CPU Subtype Probing for SH-2A. 4 * CPU Subtype Probing for SH-2A.
5 * 5 *
6 * Copyright (C) 2004, 2005 Paul Mundt 6 * Copyright (C) 2004 - 2007 Paul Mundt
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details. 10 * for more details.
11 */ 11 */
12
13#include <linux/init.h> 12#include <linux/init.h>
14#include <asm/processor.h> 13#include <asm/processor.h>
15#include <asm/cache.h> 14#include <asm/cache.h>
16 15
17int __init detect_cpu_and_cache_system(void) 16int __init detect_cpu_and_cache_system(void)
18{ 17{
19 /* Just SH7206 for now .. */ 18 /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
20 boot_cpu_data.type = CPU_SH7206;
21 boot_cpu_data.flags |= CPU_HAS_OP32; 19 boot_cpu_data.flags |= CPU_HAS_OP32;
22 20
21#if defined(CONFIG_CPU_SUBTYPE_SH7203)
22 boot_cpu_data.type = CPU_SH7203;
23 /* SH7203 has an FPU.. */
24 boot_cpu_data.flags |= CPU_HAS_FPU;
25#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
26 boot_cpu_data.type = CPU_SH7263;
27 boot_cpu_data.flags |= CPU_HAS_FPU;
28#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
29 boot_cpu_data.type = CPU_SH7206;
30 /* While SH7206 has a DSP.. */
31 boot_cpu_data.flags |= CPU_HAS_DSP;
32#endif
33
23 boot_cpu_data.dcache.ways = 4; 34 boot_cpu_data.dcache.ways = 4;
24 boot_cpu_data.dcache.way_incr = (1 << 11); 35 boot_cpu_data.dcache.way_incr = (1 << 11);
25 boot_cpu_data.dcache.sets = 128; 36 boot_cpu_data.dcache.sets = 128;
26 boot_cpu_data.dcache.entry_shift = 4; 37 boot_cpu_data.dcache.entry_shift = 4;
27 boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; 38 boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
@@ -37,4 +48,3 @@ int __init detect_cpu_and_cache_system(void)
37 48
38 return 0; 49 return 0;
39} 50}
40
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
new file mode 100644
index 000000000000..db6ef5cecde1
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -0,0 +1,319 @@
1/*
2 * SH7203 and SH7263 Setup
3 *
4 * Copyright (C) 2007 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/platform_device.h>
11#include <linux/init.h>
12#include <linux/serial.h>
13#include <asm/sci.h>
14
15enum {
16 UNUSED = 0,
17
18 /* interrupt sources */
19 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
20 PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
21 DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI,
22 DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI,
23 DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI,
24 DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI,
25 USB, LCDC, CMT0, CMT1, BSC, WDT,
26 MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D,
27 MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F,
28 MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U,
29 MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U,
30 MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V,
31 MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V,
32 ADC_ADI,
33 IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI,
34 IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI,
35 IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI,
36 IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, IIC33_TEI,
37 SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
38 SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
39 SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
40 SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
41 SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI,
42 SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI,
43 SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
44
45 /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
46 ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF,
47 ROMDEC_IREADY,
48
49 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
50
51 SDHI3, SDHI0, SDHI1,
52
53 RTC_ARM, RTC_PRD, RTC_CUP,
54 RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, RCAN0_SLE,
55 RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, RCAN1_SLE,
56
57 SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI,
58
59 /* interrupt groups */
60 PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
61 MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
62 MTU3_ABCD, MTU4_ABCD,
63 IIC30, IIC31, IIC32, IIC33, SCIF0, SCIF1, SCIF2, SCIF3,
64 SSU0, SSU1, ROMDEC, SDHI, FLCTL, RTC, RCAN0, RCAN1, SRC
65};
66
67static struct intc_vect vectors[] __initdata = {
68 INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
69 INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
70 INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
71 INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
72 INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
73 INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
74 INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
75 INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
76 INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109),
77 INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113),
78 INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117),
79 INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121),
80 INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125),
81 INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129),
82 INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133),
83 INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137),
84 INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
85 INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
86 INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
87 INTC_IRQ(MTU2_TGI0A, 146), INTC_IRQ(MTU2_TGI0B, 147),
88 INTC_IRQ(MTU2_TGI0C, 148), INTC_IRQ(MTU2_TGI0D, 149),
89 INTC_IRQ(MTU2_TCI0V, 150),
90 INTC_IRQ(MTU2_TGI0E, 151), INTC_IRQ(MTU2_TGI0F, 152),
91 INTC_IRQ(MTU2_TGI1A, 153), INTC_IRQ(MTU2_TGI1B, 154),
92 INTC_IRQ(MTU2_TCI1V, 155), INTC_IRQ(MTU2_TCI1U, 156),
93 INTC_IRQ(MTU2_TGI2A, 157), INTC_IRQ(MTU2_TGI2B, 158),
94 INTC_IRQ(MTU2_TCI2V, 159), INTC_IRQ(MTU2_TCI2U, 160),
95 INTC_IRQ(MTU2_TGI3A, 161), INTC_IRQ(MTU2_TGI3B, 162),
96 INTC_IRQ(MTU2_TGI3C, 163), INTC_IRQ(MTU2_TGI3D, 164),
97 INTC_IRQ(MTU2_TCI3V, 165),
98 INTC_IRQ(MTU2_TGI4A, 166), INTC_IRQ(MTU2_TGI4B, 167),
99 INTC_IRQ(MTU2_TGI4C, 168), INTC_IRQ(MTU2_TGI4D, 169),
100 INTC_IRQ(MTU2_TCI4V, 170),
101 INTC_IRQ(ADC_ADI, 171),
102 INTC_IRQ(IIC30_STPI, 172), INTC_IRQ(IIC30_NAKI, 173),
103 INTC_IRQ(IIC30_RXI, 174), INTC_IRQ(IIC30_TXI, 175),
104 INTC_IRQ(IIC30_TEI, 176),
105 INTC_IRQ(IIC31_STPI, 177), INTC_IRQ(IIC31_NAKI, 178),
106 INTC_IRQ(IIC31_RXI, 179), INTC_IRQ(IIC31_TXI, 180),
107 INTC_IRQ(IIC31_TEI, 181),
108 INTC_IRQ(IIC32_STPI, 182), INTC_IRQ(IIC32_NAKI, 183),
109 INTC_IRQ(IIC32_RXI, 184), INTC_IRQ(IIC32_TXI, 185),
110 INTC_IRQ(IIC32_TEI, 186),
111 INTC_IRQ(IIC33_STPI, 187), INTC_IRQ(IIC33_NAKI, 188),
112 INTC_IRQ(IIC33_RXI, 189), INTC_IRQ(IIC33_TXI, 190),
113 INTC_IRQ(IIC33_TEI, 191),
114 INTC_IRQ(SCIF0_BRI, 192), INTC_IRQ(SCIF0_ERI, 193),
115 INTC_IRQ(SCIF0_RXI, 194), INTC_IRQ(SCIF0_TXI, 195),
116 INTC_IRQ(SCIF1_BRI, 196), INTC_IRQ(SCIF1_ERI, 197),
117 INTC_IRQ(SCIF1_RXI, 198), INTC_IRQ(SCIF1_TXI, 199),
118 INTC_IRQ(SCIF2_BRI, 200), INTC_IRQ(SCIF2_ERI, 201),
119 INTC_IRQ(SCIF2_RXI, 202), INTC_IRQ(SCIF2_TXI, 203),
120 INTC_IRQ(SCIF3_BRI, 204), INTC_IRQ(SCIF3_ERI, 205),
121 INTC_IRQ(SCIF3_RXI, 206), INTC_IRQ(SCIF3_TXI, 207),
122 INTC_IRQ(SSU0_SSERI, 208), INTC_IRQ(SSU0_SSRXI, 209),
123 INTC_IRQ(SSU0_SSTXI, 210),
124 INTC_IRQ(SSU1_SSERI, 211), INTC_IRQ(SSU1_SSRXI, 212),
125 INTC_IRQ(SSU1_SSTXI, 213),
126 INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
127 INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
128 INTC_IRQ(FLCTL_FLSTEI, 224), INTC_IRQ(FLCTL_FLTENDI, 225),
129 INTC_IRQ(FLCTL_FLTREQ0I, 226), INTC_IRQ(FLCTL_FLTREQ1I, 227),
130 INTC_IRQ(RTC_ARM, 231), INTC_IRQ(RTC_PRD, 232),
131 INTC_IRQ(RTC_CUP, 233),
132 INTC_IRQ(RCAN0_ERS, 234), INTC_IRQ(RCAN0_OVR, 235),
133 INTC_IRQ(RCAN0_RM0, 236), INTC_IRQ(RCAN0_RM1, 237),
134 INTC_IRQ(RCAN0_SLE, 238),
135 INTC_IRQ(RCAN1_ERS, 239), INTC_IRQ(RCAN1_OVR, 240),
136 INTC_IRQ(RCAN1_RM0, 241), INTC_IRQ(RCAN1_RM1, 242),
137 INTC_IRQ(RCAN1_SLE, 243),
138
139 /* SH7263-specific trash */
140#ifdef CONFIG_CPU_SUBTYPE_SH7263
141 INTC_IRQ(ROMDEC_ISY, 218), INTC_IRQ(ROMDEC_IERR, 219),
142 INTC_IRQ(ROMDEC_IARG, 220), INTC_IRQ(ROMDEC_ISEC, 221),
143 INTC_IRQ(ROMDEC_IBUF, 222), INTC_IRQ(ROMDEC_IREADY, 223),
144
145 INTC_IRQ(SDHI3, 228), INTC_IRQ(SDHI0, 229), INTC_IRQ(SDHI1, 230),
146
147 INTC_IRQ(SRC_OVF, 244), INTC_IRQ(SRC_ODFI, 245),
148 INTC_IRQ(SRC_IDEI, 246),
149
150 INTC_IRQ(IEBI, 247),
151#endif
152};
153
154static struct intc_group groups[] __initdata = {
155 INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
156 PINT4, PINT5, PINT6, PINT7),
157 INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI),
158 INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI),
159 INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI),
160 INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI),
161 INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI),
162 INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI),
163 INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI),
164 INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI),
165 INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D),
166 INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F),
167 INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B),
168 INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U),
169 INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B),
170 INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U),
171 INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D),
172 INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D),
173 INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI,
174 IIC30_TEI),
175 INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI,
176 IIC31_TEI),
177 INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI,
178 IIC32_TEI),
179 INTC_GROUP(IIC33, IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI,
180 IIC33_TEI),
181 INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
182 INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
183 INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
184 INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
185 INTC_GROUP(SSU0, SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI),
186 INTC_GROUP(SSU1, SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI),
187 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I,
188 FLCTL_FLTREQ1I),
189 INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP),
190 INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1,
191 RCAN0_SLE),
192 INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1,
193 RCAN1_SLE),
194
195#ifdef CONFIG_CPU_SUBTYPE_SH7263
196 INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG,
197 ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY),
198 INTC_GROUP(SDHI, SDHI3, SDHI0, SDHI1),
199 INTC_GROUP(SRC, SRC_OVF, SRC_ODFI, SRC_IDEI),
200#endif
201};
202
203static struct intc_prio_reg prio_registers[] __initdata = {
204 { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
205 { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
206 { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
207 { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
208 { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
209 { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
210 { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
211 { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
212 MTU2_VU } },
213 { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
214 MTU2_TCI4V } },
215 { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
216 { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
217 { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
218#ifdef CONFIG_CPU_SUBTYPE_SH7203
219 { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
220 SSI3_SSII, 0 } },
221 { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
222 { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
223#else
224 { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
225 SSI3_SSII, ROMDEC } },
226 { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
227 { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
228#endif
229};
230
231static struct intc_mask_reg mask_registers[] __initdata = {
232 { 0xfffe0808, 0, 16, /* PINTER */
233 { 0, 0, 0, 0, 0, 0, 0, 0,
234 PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
235};
236
237static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
238 mask_registers, prio_registers, NULL);
239
240static struct plat_sci_port sci_platform_data[] = {
241 {
242 .mapbase = 0xfffe8000,
243 .flags = UPF_BOOT_AUTOCONF,
244 .type = PORT_SCIF,
245 .irqs = { 193, 194, 195, 192 },
246 }, {
247 .mapbase = 0xfffe8800,
248 .flags = UPF_BOOT_AUTOCONF,
249 .type = PORT_SCIF,
250 .irqs = { 197, 198, 199, 196 },
251 }, {
252 .mapbase = 0xfffe9000,
253 .flags = UPF_BOOT_AUTOCONF,
254 .type = PORT_SCIF,
255 .irqs = { 201, 202, 203, 200 },
256 }, {
257 .mapbase = 0xfffe9800,
258 .flags = UPF_BOOT_AUTOCONF,
259 .type = PORT_SCIF,
260 .irqs = { 205, 206, 207, 204 },
261 }, {
262 .flags = 0,
263 }
264};
265
266static struct platform_device sci_device = {
267 .name = "sh-sci",
268 .id = -1,
269 .dev = {
270 .platform_data = sci_platform_data,
271 },
272};
273
274static struct resource rtc_resources[] = {
275 [0] = {
276 .start = 0xffff2000,
277 .end = 0xffff2000 + 0x58 - 1,
278 .flags = IORESOURCE_IO,
279 },
280 [1] = {
281 /* Period IRQ */
282 .start = 232,
283 .flags = IORESOURCE_IRQ,
284 },
285 [2] = {
286 /* Carry IRQ */
287 .start = 233,
288 .flags = IORESOURCE_IRQ,
289 },
290 [3] = {
291 /* Alarm IRQ */
292 .start = 231,
293 .flags = IORESOURCE_IRQ,
294 },
295};
296
297static struct platform_device rtc_device = {
298 .name = "sh-rtc",
299 .id = -1,
300 .num_resources = ARRAY_SIZE(rtc_resources),
301 .resource = rtc_resources,
302};
303
304static struct platform_device *sh7203_devices[] __initdata = {
305 &sci_device,
306 &rtc_device,
307};
308
309static int __init sh7203_devices_setup(void)
310{
311 return platform_add_devices(sh7203_devices,
312 ARRAY_SIZE(sh7203_devices));
313}
314__initcall(sh7203_devices_setup);
315
316void __init plat_irq_setup(void)
317{
318 register_intc_controller(&intc_desc);
319}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index bd745aa87222..a564425b905f 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -167,7 +167,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
167}; 167};
168 168
169static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups, 169static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
170 NULL, mask_registers, prio_registers, NULL); 170 mask_registers, prio_registers, NULL);
171 171
172static struct plat_sci_port sci_platform_data[] = { 172static struct plat_sci_port sci_platform_data[] = {
173 { 173 {
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 646eb6933614..3ae4d9111f19 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o
13obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o 13obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o
14obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o 14obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o
15obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o 15obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o
16obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o
16 17
17# Primary on-chip clocks (common) 18# Primary on-chip clocks (common)
18clock-$(CONFIG_CPU_SH3) := clock-sh3.o 19clock-$(CONFIG_CPU_SH3) := clock-sh3.o
@@ -21,5 +22,6 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7706) := clock-sh7706.o
21clock-$(CONFIG_CPU_SUBTYPE_SH7709) := clock-sh7709.o 22clock-$(CONFIG_CPU_SUBTYPE_SH7709) := clock-sh7709.o
22clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o 23clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o
23clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o 24clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o
25clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o
24 26
25obj-y += $(clock-y) 27obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
new file mode 100644
index 000000000000..54f54df51ef0
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -0,0 +1,71 @@
1/*
2 * arch/sh/kernel/cpu/sh3/clock-sh7712.c
3 *
4 * SH7712 support for the clock framework
5 *
6 * Copyright (C) 2007 Andrew Murray <amurray@mpc-data.co.uk>
7 *
8 * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
9 * Copyright (C) 2005 Paul Mundt
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <asm/clock.h>
18#include <asm/freq.h>
19#include <asm/io.h>
20
21static int multipliers[] = { 1, 2, 3 };
22static int divisors[] = { 1, 2, 3, 4, 6 };
23
24static void master_clk_init(struct clk *clk)
25{
26 int frqcr = ctrl_inw(FRQCR);
27 int idx = (frqcr & 0x0300) >> 8;
28
29 clk->rate *= multipliers[idx];
30}
31
32static struct clk_ops sh7712_master_clk_ops = {
33 .init = master_clk_init,
34};
35
36static void module_clk_recalc(struct clk *clk)
37{
38 int frqcr = ctrl_inw(FRQCR);
39 int idx = frqcr & 0x0007;
40
41 clk->rate = clk->parent->rate / divisors[idx];
42}
43
44static struct clk_ops sh7712_module_clk_ops = {
45 .recalc = module_clk_recalc,
46};
47
48static void cpu_clk_recalc(struct clk *clk)
49{
50 int frqcr = ctrl_inw(FRQCR);
51 int idx = (frqcr & 0x0030) >> 4;
52
53 clk->rate = clk->parent->rate / divisors[idx];
54}
55
56static struct clk_ops sh7712_cpu_clk_ops = {
57 .recalc = cpu_clk_recalc,
58};
59
60static struct clk_ops *sh7712_clk_ops[] = {
61 &sh7712_master_clk_ops,
62 &sh7712_module_clk_ops,
63 &sh7712_cpu_clk_ops,
64};
65
66void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
67{
68 if (idx < ARRAY_SIZE(sh7712_clk_ops))
69 *ops = sh7712_clk_ops[idx];
70}
71
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 0d12a124055c..4004073f98cd 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -13,8 +13,9 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h> 15#include <asm/thread_info.h>
16#include <asm/cpu/mmu_context.h>
17#include <asm/unistd.h> 16#include <asm/unistd.h>
17#include <asm/cpu/mmu_context.h>
18#include <asm/page.h>
18 19
19! NOTE: 20! NOTE:
20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address 21! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -409,6 +410,27 @@ ENTRY(handle_exception)
409 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank), 410 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
410 ! save all registers onto stack. 411 ! save all registers onto stack.
411 ! 412 !
413
414#ifdef CONFIG_GUSA
415 ! Check for roll back gRB (User and Kernel)
416 mov r15, k0
417 shll k0
418 bf/s 1f
419 shll k0
420 bf/s 1f
421 stc spc, k1
422 stc r0_bank, k0
423 cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
424 bt/s 2f
425 stc r1_bank, k1
426
427 add #-2, k0
428 add r15, k0
429 ldc k0, spc ! PC = saved r0 + r15 - 2
4302: mov k1, r15 ! SP = r1
4311:
432#endif
433
412 stc ssr, k0 ! Is it from kernel space? 434 stc ssr, k0 ! Is it from kernel space?
413 shll k0 ! Check MD bit (bit30) by shifting it into... 435 shll k0 ! Check MD bit (bit30) by shifting it into...
414 shll k0 ! ...the T bit 436 shll k0 ! ...the T bit
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index b6abf38d3a8d..11b6d9c6edae 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -36,7 +36,7 @@ ENTRY(exception_handling_table)
36 .long exception_error ! address error store /* 100 */ 36 .long exception_error ! address error store /* 100 */
37#endif 37#endif
38#if defined(CONFIG_SH_FPU) 38#if defined(CONFIG_SH_FPU)
39 .long do_fpu_error /* 120 */ 39 .long fpu_error_trap_handler /* 120 */
40#else 40#else
41 .long exception_error /* 120 */ 41 .long exception_error /* 120 */
42#endif 42#endif
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index bf579e061e09..fcc80bb7bee7 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,11 +16,11 @@
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19int __init detect_cpu_and_cache_system(void) 19int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
20{ 20{
21 unsigned long addr0, addr1, data0, data1, data2, data3; 21 unsigned long addr0, addr1, data0, data1, data2, data3;
22 22
23 jump_to_P2(); 23 jump_to_uncached();
24 /* 24 /*
25 * Check if the entry shadows or not. 25 * Check if the entry shadows or not.
26 * When shadowed, it's 128-entry system. 26 * When shadowed, it's 128-entry system.
@@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
48 ctrl_outl(data0&~SH_CACHE_VALID, addr0); 48 ctrl_outl(data0&~SH_CACHE_VALID, addr0);
49 ctrl_outl(data2&~SH_CACHE_VALID, addr1); 49 ctrl_outl(data2&~SH_CACHE_VALID, addr1);
50 50
51 back_to_P1(); 51 back_to_cached();
52 52
53 boot_cpu_data.dcache.ways = 4; 53 boot_cpu_data.dcache.ways = 4;
54 boot_cpu_data.dcache.entry_shift = 4; 54 boot_cpu_data.dcache.entry_shift = 4;
@@ -84,6 +84,9 @@ int __init detect_cpu_and_cache_system(void)
84#if defined(CONFIG_CPU_SUBTYPE_SH7720) 84#if defined(CONFIG_CPU_SUBTYPE_SH7720)
85 boot_cpu_data.type = CPU_SH7720; 85 boot_cpu_data.type = CPU_SH7720;
86#endif 86#endif
87#if defined(CONFIG_CPU_SUBTYPE_SH7721)
88 boot_cpu_data.type = CPU_SH7721;
89#endif
87#if defined(CONFIG_CPU_SUBTYPE_SH7705) 90#if defined(CONFIG_CPU_SUBTYPE_SH7705)
88 boot_cpu_data.type = CPU_SH7705; 91 boot_cpu_data.type = CPU_SH7705;
89 92
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index f6c65f2659e9..dd0a20a685f7 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -66,12 +66,6 @@ static struct intc_group groups[] __initdata = {
66 INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), 66 INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
67}; 67};
68 68
69static struct intc_prio priorities[] __initdata = {
70 INTC_PRIO(DMAC, 7),
71 INTC_PRIO(SCIF2, 3),
72 INTC_PRIO(SCIF0, 3),
73};
74
75static struct intc_prio_reg prio_registers[] __initdata = { 69static struct intc_prio_reg prio_registers[] __initdata = {
76 { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, 70 { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
77 { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } }, 71 { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
@@ -85,7 +79,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
85}; 79};
86 80
87static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups, 81static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups,
88 priorities, NULL, prio_registers, NULL); 82 NULL, prio_registers, NULL);
89 83
90static struct intc_vect vectors_irq[] __initdata = { 84static struct intc_vect vectors_irq[] __initdata = {
91 INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), 85 INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
@@ -93,7 +87,7 @@ static struct intc_vect vectors_irq[] __initdata = {
93}; 87};
94 88
95static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL, 89static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL,
96 priorities, NULL, prio_registers, NULL); 90 NULL, prio_registers, NULL);
97 91
98static struct plat_sci_port sci_platform_data[] = { 92static struct plat_sci_port sci_platform_data[] = {
99 { 93 {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 60b04b1f9453..969804bb523b 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -81,13 +81,6 @@ static struct intc_group groups[] __initdata = {
81 INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), 81 INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
82}; 82};
83 83
84static struct intc_prio priorities[] __initdata = {
85 INTC_PRIO(DMAC, 7),
86 INTC_PRIO(SCI, 3),
87 INTC_PRIO(SCIF2, 3),
88 INTC_PRIO(SCIF0, 3),
89};
90
91static struct intc_prio_reg prio_registers[] __initdata = { 84static struct intc_prio_reg prio_registers[] __initdata = {
92 { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, 85 { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
93 { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } }, 86 { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
@@ -109,7 +102,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
109}; 102};
110 103
111static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups, 104static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups,
112 priorities, NULL, prio_registers, NULL); 105 NULL, prio_registers, NULL);
113 106
114#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ 107#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
115 defined(CONFIG_CPU_SUBTYPE_SH7707) || \ 108 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
@@ -120,7 +113,7 @@ static struct intc_vect vectors_irq[] __initdata = {
120}; 113};
121 114
122static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL, 115static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL,
123 priorities, NULL, prio_registers, NULL); 116 NULL, prio_registers, NULL);
124#endif 117#endif
125 118
126static struct resource rtc_resources[] = { 119static struct resource rtc_resources[] = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 84e5629fa841..0cc0e2bf135d 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -73,18 +73,6 @@ static struct intc_group groups[] __initdata = {
73 INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI), 73 INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI),
74}; 74};
75 75
76static struct intc_prio priorities[] __initdata = {
77 INTC_PRIO(DMAC1, 7),
78 INTC_PRIO(DMAC2, 7),
79 INTC_PRIO(SCIF0, 3),
80 INTC_PRIO(SCIF1, 3),
81 INTC_PRIO(SIOF0, 3),
82 INTC_PRIO(SIOF1, 3),
83 INTC_PRIO(EDMAC0, 5),
84 INTC_PRIO(EDMAC1, 5),
85 INTC_PRIO(EDMAC2, 5),
86};
87
88static struct intc_prio_reg prio_registers[] __initdata = { 76static struct intc_prio_reg prio_registers[] __initdata = {
89 { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, 77 { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
90 { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } }, 78 { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
@@ -101,7 +89,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
101}; 89};
102 90
103static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups, 91static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups,
104 priorities, NULL, prio_registers, NULL); 92 NULL, prio_registers, NULL);
105 93
106static struct intc_vect vectors_irq[] __initdata = { 94static struct intc_vect vectors_irq[] __initdata = {
107 INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), 95 INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
@@ -109,7 +97,7 @@ static struct intc_vect vectors_irq[] __initdata = {
109}; 97};
110 98
111static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL, 99static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL,
112 priorities, NULL, prio_registers, NULL); 100 NULL, prio_registers, NULL);
113 101
114static struct resource rtc_resources[] = { 102static struct resource rtc_resources[] = {
115 [0] = { 103 [0] = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index a0929b8a95ae..3855ea4c21c8 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -85,9 +85,62 @@ static struct platform_device sci_device = {
85 }, 85 },
86}; 86};
87 87
88static struct resource usb_ohci_resources[] = {
89 [0] = {
90 .start = 0xA4428000,
91 .end = 0xA44280FF,
92 .flags = IORESOURCE_MEM,
93 },
94 [1] = {
95 .start = 67,
96 .end = 67,
97 .flags = IORESOURCE_IRQ,
98 },
99};
100
101static u64 usb_ohci_dma_mask = 0xffffffffUL;
102static struct platform_device usb_ohci_device = {
103 .name = "sh_ohci",
104 .id = -1,
105 .dev = {
106 .dma_mask = &usb_ohci_dma_mask,
107 .coherent_dma_mask = 0xffffffff,
108 },
109 .num_resources = ARRAY_SIZE(usb_ohci_resources),
110 .resource = usb_ohci_resources,
111};
112
113static struct resource usbf_resources[] = {
114 [0] = {
115 .name = "sh_udc",
116 .start = 0xA4420000,
117 .end = 0xA44200FF,
118 .flags = IORESOURCE_MEM,
119 },
120 [1] = {
121 .name = "sh_udc",
122 .start = 65,
123 .end = 65,
124 .flags = IORESOURCE_IRQ,
125 },
126};
127
128static struct platform_device usbf_device = {
129 .name = "sh_udc",
130 .id = -1,
131 .dev = {
132 .dma_mask = NULL,
133 .coherent_dma_mask = 0xffffffff,
134 },
135 .num_resources = ARRAY_SIZE(usbf_resources),
136 .resource = usbf_resources,
137};
138
88static struct platform_device *sh7720_devices[] __initdata = { 139static struct platform_device *sh7720_devices[] __initdata = {
89 &rtc_device, 140 &rtc_device,
90 &sci_device, 141 &sci_device,
142 &usb_ohci_device,
143 &usbf_device,
91}; 144};
92 145
93static int __init sh7720_devices_setup(void) 146static int __init sh7720_devices_setup(void)
@@ -127,8 +180,11 @@ static struct intc_vect vectors[] __initdata = {
127 INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1_DEI0, 0x800), 180 INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1_DEI0, 0x800),
128 INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840), 181 INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840),
129 INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900), 182 INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900),
130 INTC_VECT(SSL, 0x980), INTC_VECT(USBFI0, 0xa20), 183#if defined(CONFIG_CPU_SUBTYPE_SH7720)
131 INTC_VECT(USBFI1, 0xa40), INTC_VECT(USBHI, 0xa60), 184 INTC_VECT(SSL, 0x980),
185#endif
186 INTC_VECT(USBFI0, 0xa20), INTC_VECT(USBFI1, 0xa40),
187 INTC_VECT(USBHI, 0xa60),
132 INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0), 188 INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0),
133 INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00), 189 INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00),
134 INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80), 190 INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80),
@@ -153,22 +209,16 @@ static struct intc_group groups[] __initdata = {
153 INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3), 209 INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3),
154}; 210};
155 211
156static struct intc_prio priorities[] __initdata = {
157 INTC_PRIO(SCIF0, 2),
158 INTC_PRIO(SCIF1, 2),
159 INTC_PRIO(DMAC1, 1),
160 INTC_PRIO(DMAC2, 1),
161 INTC_PRIO(RTC, 2),
162 INTC_PRIO(TMU, 2),
163 INTC_PRIO(TPU, 2),
164};
165
166static struct intc_prio_reg prio_registers[] __initdata = { 212static struct intc_prio_reg prio_registers[] __initdata = {
167 { 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, 213 { 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
168 { 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } }, 214 { 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
169 { 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, 215 { 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
170 { 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } }, 216 { 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
217#if defined(CONFIG_CPU_SUBTYPE_SH7720)
171 { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } }, 218 { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
219#else
220 { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, 0 } },
221#endif
172 { 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } }, 222 { 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
173 { 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } }, 223 { 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
174 { 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } }, 224 { 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
@@ -177,7 +227,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
177}; 227};
178 228
179static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups, 229static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups,
180 priorities, NULL, prio_registers, NULL); 230 NULL, prio_registers, NULL);
181 231
182static struct intc_sense_reg sense_registers[] __initdata = { 232static struct intc_sense_reg sense_registers[] __initdata = {
183 { INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } }, 233 { INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
@@ -190,7 +240,7 @@ static struct intc_vect vectors_irq[] __initdata = {
190}; 240};
191 241
192static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq, 242static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq,
193 NULL, priorities, NULL, prio_registers, sense_registers); 243 NULL, NULL, prio_registers, sense_registers);
194 244
195void __init plat_irq_setup_pins(int mode) 245void __init plat_irq_setup_pins(int mode)
196{ 246{
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index dadd6bffc128..d608557c7a3f 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -5,7 +5,7 @@
5obj-y := probe.o common.o 5obj-y := probe.o common.o
6common-y += $(addprefix ../sh3/, entry.o ex.o) 6common-y += $(addprefix ../sh3/, entry.o ex.o)
7 7
8obj-$(CONFIG_SH_FPU) += fpu.o 8obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
9obj-$(CONFIG_SH_STORE_QUEUES) += sq.o 9obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
10 10
11# CPU subtype setup 11# CPU subtype setup
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index c5a4fc77fa06..817f9939cda6 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -1,7 +1,4 @@
1/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $ 1/*
2 *
3 * linux/arch/sh/kernel/fpu.c
4 *
5 * Save/restore floating point context for signal handlers. 2 * Save/restore floating point context for signal handlers.
6 * 3 *
7 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,15 +6,16 @@
9 * for more details. 6 * for more details.
10 * 7 *
11 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 8 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
9 * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support)
12 * 10 *
13 * FIXME! These routines can be optimized in big endian case. 11 * FIXME! These routines have not been tested for big endian case.
14 */ 12 */
15
16#include <linux/sched.h> 13#include <linux/sched.h>
17#include <linux/signal.h> 14#include <linux/signal.h>
15#include <linux/io.h>
16#include <asm/cpu/fpu.h>
18#include <asm/processor.h> 17#include <asm/processor.h>
19#include <asm/system.h> 18#include <asm/system.h>
20#include <asm/io.h>
21 19
22/* The PR (precision) bit in the FP Status Register must be clear when 20/* The PR (precision) bit in the FP Status Register must be clear when
23 * an frchg instruction is executed, otherwise the instruction is undefined. 21 * an frchg instruction is executed, otherwise the instruction is undefined.
@@ -25,177 +23,184 @@
25 */ 23 */
26 24
27#define FPSCR_RCHG 0x00000000 25#define FPSCR_RCHG 0x00000000
26extern unsigned long long float64_div(unsigned long long a,
27 unsigned long long b);
28extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
29extern unsigned long long float64_mul(unsigned long long a,
30 unsigned long long b);
31extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
32extern unsigned long long float64_add(unsigned long long a,
33 unsigned long long b);
34extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
35extern unsigned long long float64_sub(unsigned long long a,
36 unsigned long long b);
37extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
28 38
39static unsigned int fpu_exception_flags;
29 40
30/* 41/*
31 * Save FPU registers onto task structure. 42 * Save FPU registers onto task structure.
32 * Assume called with FPU enabled (SR.FD=0). 43 * Assume called with FPU enabled (SR.FD=0).
33 */ 44 */
34void 45void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
35save_fpu(struct task_struct *tsk, struct pt_regs *regs)
36{ 46{
37 unsigned long dummy; 47 unsigned long dummy;
38 48
39 clear_tsk_thread_flag(tsk, TIF_USEDFPU); 49 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
40 enable_fpu(); 50 enable_fpu();
41 asm volatile("sts.l fpul, @-%0\n\t" 51 asm volatile ("sts.l fpul, @-%0\n\t"
42 "sts.l fpscr, @-%0\n\t" 52 "sts.l fpscr, @-%0\n\t"
43 "lds %2, fpscr\n\t" 53 "lds %2, fpscr\n\t"
44 "frchg\n\t" 54 "frchg\n\t"
45 "fmov.s fr15, @-%0\n\t" 55 "fmov.s fr15, @-%0\n\t"
46 "fmov.s fr14, @-%0\n\t" 56 "fmov.s fr14, @-%0\n\t"
47 "fmov.s fr13, @-%0\n\t" 57 "fmov.s fr13, @-%0\n\t"
48 "fmov.s fr12, @-%0\n\t" 58 "fmov.s fr12, @-%0\n\t"
49 "fmov.s fr11, @-%0\n\t" 59 "fmov.s fr11, @-%0\n\t"
50 "fmov.s fr10, @-%0\n\t" 60 "fmov.s fr10, @-%0\n\t"
51 "fmov.s fr9, @-%0\n\t" 61 "fmov.s fr9, @-%0\n\t"
52 "fmov.s fr8, @-%0\n\t" 62 "fmov.s fr8, @-%0\n\t"
53 "fmov.s fr7, @-%0\n\t" 63 "fmov.s fr7, @-%0\n\t"
54 "fmov.s fr6, @-%0\n\t" 64 "fmov.s fr6, @-%0\n\t"
55 "fmov.s fr5, @-%0\n\t" 65 "fmov.s fr5, @-%0\n\t"
56 "fmov.s fr4, @-%0\n\t" 66 "fmov.s fr4, @-%0\n\t"
57 "fmov.s fr3, @-%0\n\t" 67 "fmov.s fr3, @-%0\n\t"
58 "fmov.s fr2, @-%0\n\t" 68 "fmov.s fr2, @-%0\n\t"
59 "fmov.s fr1, @-%0\n\t" 69 "fmov.s fr1, @-%0\n\t"
60 "fmov.s fr0, @-%0\n\t" 70 "fmov.s fr0, @-%0\n\t"
61 "frchg\n\t" 71 "frchg\n\t"
62 "fmov.s fr15, @-%0\n\t" 72 "fmov.s fr15, @-%0\n\t"
63 "fmov.s fr14, @-%0\n\t" 73 "fmov.s fr14, @-%0\n\t"
64 "fmov.s fr13, @-%0\n\t" 74 "fmov.s fr13, @-%0\n\t"
65 "fmov.s fr12, @-%0\n\t" 75 "fmov.s fr12, @-%0\n\t"
66 "fmov.s fr11, @-%0\n\t" 76 "fmov.s fr11, @-%0\n\t"
67 "fmov.s fr10, @-%0\n\t" 77 "fmov.s fr10, @-%0\n\t"
68 "fmov.s fr9, @-%0\n\t" 78 "fmov.s fr9, @-%0\n\t"
69 "fmov.s fr8, @-%0\n\t" 79 "fmov.s fr8, @-%0\n\t"
70 "fmov.s fr7, @-%0\n\t" 80 "fmov.s fr7, @-%0\n\t"
71 "fmov.s fr6, @-%0\n\t" 81 "fmov.s fr6, @-%0\n\t"
72 "fmov.s fr5, @-%0\n\t" 82 "fmov.s fr5, @-%0\n\t"
73 "fmov.s fr4, @-%0\n\t" 83 "fmov.s fr4, @-%0\n\t"
74 "fmov.s fr3, @-%0\n\t" 84 "fmov.s fr3, @-%0\n\t"
75 "fmov.s fr2, @-%0\n\t" 85 "fmov.s fr2, @-%0\n\t"
76 "fmov.s fr1, @-%0\n\t" 86 "fmov.s fr1, @-%0\n\t"
77 "fmov.s fr0, @-%0\n\t" 87 "fmov.s fr0, @-%0\n\t"
78 "lds %3, fpscr\n\t" 88 "lds %3, fpscr\n\t":"=r" (dummy)
79 : "=r" (dummy) 89 :"0"((char *)(&tsk->thread.fpu.hard.status)),
80 : "0" ((char *)(&tsk->thread.fpu.hard.status)), 90 "r"(FPSCR_RCHG), "r"(FPSCR_INIT)
81 "r" (FPSCR_RCHG), 91 :"memory");
82 "r" (FPSCR_INIT) 92
83 : "memory"); 93 disable_fpu();
84 94 release_fpu(regs);
85 disable_fpu();
86 release_fpu(regs);
87} 95}
88 96
89static void 97static void restore_fpu(struct task_struct *tsk)
90restore_fpu(struct task_struct *tsk)
91{ 98{
92 unsigned long dummy; 99 unsigned long dummy;
93 100
94 enable_fpu(); 101 enable_fpu();
95 asm volatile("lds %2, fpscr\n\t" 102 asm volatile ("lds %2, fpscr\n\t"
96 "fmov.s @%0+, fr0\n\t" 103 "fmov.s @%0+, fr0\n\t"
97 "fmov.s @%0+, fr1\n\t" 104 "fmov.s @%0+, fr1\n\t"
98 "fmov.s @%0+, fr2\n\t" 105 "fmov.s @%0+, fr2\n\t"
99 "fmov.s @%0+, fr3\n\t" 106 "fmov.s @%0+, fr3\n\t"
100 "fmov.s @%0+, fr4\n\t" 107 "fmov.s @%0+, fr4\n\t"
101 "fmov.s @%0+, fr5\n\t" 108 "fmov.s @%0+, fr5\n\t"
102 "fmov.s @%0+, fr6\n\t" 109 "fmov.s @%0+, fr6\n\t"
103 "fmov.s @%0+, fr7\n\t" 110 "fmov.s @%0+, fr7\n\t"
104 "fmov.s @%0+, fr8\n\t" 111 "fmov.s @%0+, fr8\n\t"
105 "fmov.s @%0+, fr9\n\t" 112 "fmov.s @%0+, fr9\n\t"
106 "fmov.s @%0+, fr10\n\t" 113 "fmov.s @%0+, fr10\n\t"
107 "fmov.s @%0+, fr11\n\t" 114 "fmov.s @%0+, fr11\n\t"
108 "fmov.s @%0+, fr12\n\t" 115 "fmov.s @%0+, fr12\n\t"
109 "fmov.s @%0+, fr13\n\t" 116 "fmov.s @%0+, fr13\n\t"
110 "fmov.s @%0+, fr14\n\t" 117 "fmov.s @%0+, fr14\n\t"
111 "fmov.s @%0+, fr15\n\t" 118 "fmov.s @%0+, fr15\n\t"
112 "frchg\n\t" 119 "frchg\n\t"
113 "fmov.s @%0+, fr0\n\t" 120 "fmov.s @%0+, fr0\n\t"
114 "fmov.s @%0+, fr1\n\t" 121 "fmov.s @%0+, fr1\n\t"
115 "fmov.s @%0+, fr2\n\t" 122 "fmov.s @%0+, fr2\n\t"
116 "fmov.s @%0+, fr3\n\t" 123 "fmov.s @%0+, fr3\n\t"
117 "fmov.s @%0+, fr4\n\t" 124 "fmov.s @%0+, fr4\n\t"
118 "fmov.s @%0+, fr5\n\t" 125 "fmov.s @%0+, fr5\n\t"
119 "fmov.s @%0+, fr6\n\t" 126 "fmov.s @%0+, fr6\n\t"
120 "fmov.s @%0+, fr7\n\t" 127 "fmov.s @%0+, fr7\n\t"
121 "fmov.s @%0+, fr8\n\t" 128 "fmov.s @%0+, fr8\n\t"
122 "fmov.s @%0+, fr9\n\t" 129 "fmov.s @%0+, fr9\n\t"
123 "fmov.s @%0+, fr10\n\t" 130 "fmov.s @%0+, fr10\n\t"
124 "fmov.s @%0+, fr11\n\t" 131 "fmov.s @%0+, fr11\n\t"
125 "fmov.s @%0+, fr12\n\t" 132 "fmov.s @%0+, fr12\n\t"
126 "fmov.s @%0+, fr13\n\t" 133 "fmov.s @%0+, fr13\n\t"
127 "fmov.s @%0+, fr14\n\t" 134 "fmov.s @%0+, fr14\n\t"
128 "fmov.s @%0+, fr15\n\t" 135 "fmov.s @%0+, fr15\n\t"
129 "frchg\n\t" 136 "frchg\n\t"
130 "lds.l @%0+, fpscr\n\t" 137 "lds.l @%0+, fpscr\n\t"
131 "lds.l @%0+, fpul\n\t" 138 "lds.l @%0+, fpul\n\t"
132 : "=r" (dummy) 139 :"=r" (dummy)
133 : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG) 140 :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
134 : "memory"); 141 :"memory");
135 disable_fpu(); 142 disable_fpu();
136} 143}
137 144
138/* 145/*
139 * Load the FPU with signalling NANS. This bit pattern we're using 146 * Load the FPU with signalling NANS. This bit pattern we're using
140 * has the property that no matter wether considered as single or as 147 * has the property that no matter wether considered as single or as
141 * double precision represents signaling NANS. 148 * double precision represents signaling NANS.
142 */ 149 */
143 150
144static void 151static void fpu_init(void)
145fpu_init(void)
146{ 152{
147 enable_fpu(); 153 enable_fpu();
148 asm volatile("lds %0, fpul\n\t" 154 asm volatile ( "lds %0, fpul\n\t"
149 "lds %1, fpscr\n\t" 155 "lds %1, fpscr\n\t"
150 "fsts fpul, fr0\n\t" 156 "fsts fpul, fr0\n\t"
151 "fsts fpul, fr1\n\t" 157 "fsts fpul, fr1\n\t"
152 "fsts fpul, fr2\n\t" 158 "fsts fpul, fr2\n\t"
153 "fsts fpul, fr3\n\t" 159 "fsts fpul, fr3\n\t"
154 "fsts fpul, fr4\n\t" 160 "fsts fpul, fr4\n\t"
155 "fsts fpul, fr5\n\t" 161 "fsts fpul, fr5\n\t"
156 "fsts fpul, fr6\n\t" 162 "fsts fpul, fr6\n\t"
157 "fsts fpul, fr7\n\t" 163 "fsts fpul, fr7\n\t"
158 "fsts fpul, fr8\n\t" 164 "fsts fpul, fr8\n\t"
159 "fsts fpul, fr9\n\t" 165 "fsts fpul, fr9\n\t"
160 "fsts fpul, fr10\n\t" 166 "fsts fpul, fr10\n\t"
161 "fsts fpul, fr11\n\t" 167 "fsts fpul, fr11\n\t"
162 "fsts fpul, fr12\n\t" 168 "fsts fpul, fr12\n\t"
163 "fsts fpul, fr13\n\t" 169 "fsts fpul, fr13\n\t"
164 "fsts fpul, fr14\n\t" 170 "fsts fpul, fr14\n\t"
165 "fsts fpul, fr15\n\t" 171 "fsts fpul, fr15\n\t"
166 "frchg\n\t" 172 "frchg\n\t"
167 "fsts fpul, fr0\n\t" 173 "fsts fpul, fr0\n\t"
168 "fsts fpul, fr1\n\t" 174 "fsts fpul, fr1\n\t"
169 "fsts fpul, fr2\n\t" 175 "fsts fpul, fr2\n\t"
170 "fsts fpul, fr3\n\t" 176 "fsts fpul, fr3\n\t"
171 "fsts fpul, fr4\n\t" 177 "fsts fpul, fr4\n\t"
172 "fsts fpul, fr5\n\t" 178 "fsts fpul, fr5\n\t"
173 "fsts fpul, fr6\n\t" 179 "fsts fpul, fr6\n\t"
174 "fsts fpul, fr7\n\t" 180 "fsts fpul, fr7\n\t"
175 "fsts fpul, fr8\n\t" 181 "fsts fpul, fr8\n\t"
176 "fsts fpul, fr9\n\t" 182 "fsts fpul, fr9\n\t"
177 "fsts fpul, fr10\n\t" 183 "fsts fpul, fr10\n\t"
178 "fsts fpul, fr11\n\t" 184 "fsts fpul, fr11\n\t"
179 "fsts fpul, fr12\n\t" 185 "fsts fpul, fr12\n\t"
180 "fsts fpul, fr13\n\t" 186 "fsts fpul, fr13\n\t"
181 "fsts fpul, fr14\n\t" 187 "fsts fpul, fr14\n\t"
182 "fsts fpul, fr15\n\t" 188 "fsts fpul, fr15\n\t"
183 "frchg\n\t" 189 "frchg\n\t"
184 "lds %2, fpscr\n\t" 190 "lds %2, fpscr\n\t"
185 : /* no output */ 191 : /* no output */
186 : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT)); 192 :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
187 disable_fpu(); 193 disable_fpu();
188} 194}
189 195
190/** 196/**
191 * denormal_to_double - Given denormalized float number, 197 * denormal_to_double - Given denormalized float number,
192 * store double float 198 * store double float
193 * 199 *
194 * @fpu: Pointer to sh_fpu_hard structure 200 * @fpu: Pointer to sh_fpu_hard structure
195 * @n: Index to FP register 201 * @n: Index to FP register
196 */ 202 */
197static void 203static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
198denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
199{ 204{
200 unsigned long du, dl; 205 unsigned long du, dl;
201 unsigned long x = fpu->fpul; 206 unsigned long x = fpu->fpul;
@@ -212,7 +217,7 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
212 dl = x << 29; 217 dl = x << 29;
213 218
214 fpu->fp_regs[n] = du; 219 fpu->fp_regs[n] = du;
215 fpu->fp_regs[n+1] = dl; 220 fpu->fp_regs[n + 1] = dl;
216 } 221 }
217} 222}
218 223
@@ -223,68 +228,191 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
223 * 228 *
224 * Returns 1 when it's handled (should not cause exception). 229 * Returns 1 when it's handled (should not cause exception).
225 */ 230 */
226static int 231static int ieee_fpe_handler(struct pt_regs *regs)
227ieee_fpe_handler (struct pt_regs *regs)
228{ 232{
229 unsigned short insn = *(unsigned short *) regs->pc; 233 unsigned short insn = *(unsigned short *)regs->pc;
230 unsigned short finsn; 234 unsigned short finsn;
231 unsigned long nextpc; 235 unsigned long nextpc;
232 int nib[4] = { 236 int nib[4] = {
233 (insn >> 12) & 0xf, 237 (insn >> 12) & 0xf,
234 (insn >> 8) & 0xf, 238 (insn >> 8) & 0xf,
235 (insn >> 4) & 0xf, 239 (insn >> 4) & 0xf,
236 insn & 0xf}; 240 insn & 0xf
237 241 };
238 if (nib[0] == 0xb || 242
239 (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */ 243 if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
240 regs->pr = regs->pc + 4; 244 regs->pr = regs->pc + 4; /* bsr & jsr */
241 245
242 if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */ 246 if (nib[0] == 0xa || nib[0] == 0xb) {
243 nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3); 247 /* bra & bsr */
244 finsn = *(unsigned short *) (regs->pc + 2); 248 nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
245 } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */ 249 finsn = *(unsigned short *)(regs->pc + 2);
250 } else if (nib[0] == 0x8 && nib[1] == 0xd) {
251 /* bt/s */
246 if (regs->sr & 1) 252 if (regs->sr & 1)
247 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); 253 nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
248 else 254 else
249 nextpc = regs->pc + 4; 255 nextpc = regs->pc + 4;
250 finsn = *(unsigned short *) (regs->pc + 2); 256 finsn = *(unsigned short *)(regs->pc + 2);
251 } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */ 257 } else if (nib[0] == 0x8 && nib[1] == 0xf) {
258 /* bf/s */
252 if (regs->sr & 1) 259 if (regs->sr & 1)
253 nextpc = regs->pc + 4; 260 nextpc = regs->pc + 4;
254 else 261 else
255 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); 262 nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
256 finsn = *(unsigned short *) (regs->pc + 2); 263 finsn = *(unsigned short *)(regs->pc + 2);
257 } else if (nib[0] == 0x4 && nib[3] == 0xb && 264 } else if (nib[0] == 0x4 && nib[3] == 0xb &&
258 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */ 265 (nib[2] == 0x0 || nib[2] == 0x2)) {
266 /* jmp & jsr */
259 nextpc = regs->regs[nib[1]]; 267 nextpc = regs->regs[nib[1]];
260 finsn = *(unsigned short *) (regs->pc + 2); 268 finsn = *(unsigned short *)(regs->pc + 2);
261 } else if (nib[0] == 0x0 && nib[3] == 0x3 && 269 } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
262 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */ 270 (nib[2] == 0x0 || nib[2] == 0x2)) {
271 /* braf & bsrf */
263 nextpc = regs->pc + 4 + regs->regs[nib[1]]; 272 nextpc = regs->pc + 4 + regs->regs[nib[1]];
264 finsn = *(unsigned short *) (regs->pc + 2); 273 finsn = *(unsigned short *)(regs->pc + 2);
265 } else if (insn == 0x000b) { /* rts */ 274 } else if (insn == 0x000b) {
275 /* rts */
266 nextpc = regs->pr; 276 nextpc = regs->pr;
267 finsn = *(unsigned short *) (regs->pc + 2); 277 finsn = *(unsigned short *)(regs->pc + 2);
268 } else { 278 } else {
269 nextpc = regs->pc + instruction_size(insn); 279 nextpc = regs->pc + instruction_size(insn);
270 finsn = insn; 280 finsn = insn;
271 } 281 }
272 282
273 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ 283 if ((finsn & 0xf1ff) == 0xf0ad) {
284 /* fcnvsd */
274 struct task_struct *tsk = current; 285 struct task_struct *tsk = current;
275 286
276 save_fpu(tsk, regs); 287 save_fpu(tsk, regs);
277 if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) { 288 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
278 /* FPU error */ 289 /* FPU error */
279 denormal_to_double (&tsk->thread.fpu.hard, 290 denormal_to_double(&tsk->thread.fpu.hard,
280 (finsn >> 8) & 0xf); 291 (finsn >> 8) & 0xf);
281 tsk->thread.fpu.hard.fpscr &= 292 else
282 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); 293 return 0;
283 grab_fpu(regs); 294
284 restore_fpu(tsk); 295 regs->pc = nextpc;
285 set_tsk_thread_flag(tsk, TIF_USEDFPU); 296 return 1;
297 } else if ((finsn & 0xf00f) == 0xf002) {
298 /* fmul */
299 struct task_struct *tsk = current;
300 int fpscr;
301 int n, m, prec;
302 unsigned int hx, hy;
303
304 n = (finsn >> 8) & 0xf;
305 m = (finsn >> 4) & 0xf;
306 hx = tsk->thread.fpu.hard.fp_regs[n];
307 hy = tsk->thread.fpu.hard.fp_regs[m];
308 fpscr = tsk->thread.fpu.hard.fpscr;
309 prec = fpscr & FPSCR_DBL_PRECISION;
310
311 if ((fpscr & FPSCR_CAUSE_ERROR)
312 && (prec && ((hx & 0x7fffffff) < 0x00100000
313 || (hy & 0x7fffffff) < 0x00100000))) {
314 long long llx, lly;
315
316 /* FPU error because of denormal (doubles) */
317 llx = ((long long)hx << 32)
318 | tsk->thread.fpu.hard.fp_regs[n + 1];
319 lly = ((long long)hy << 32)
320 | tsk->thread.fpu.hard.fp_regs[m + 1];
321 llx = float64_mul(llx, lly);
322 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
323 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
324 } else if ((fpscr & FPSCR_CAUSE_ERROR)
325 && (!prec && ((hx & 0x7fffffff) < 0x00800000
326 || (hy & 0x7fffffff) < 0x00800000))) {
327 /* FPU error because of denormal (floats) */
328 hx = float32_mul(hx, hy);
329 tsk->thread.fpu.hard.fp_regs[n] = hx;
330 } else
331 return 0;
332
333 regs->pc = nextpc;
334 return 1;
335 } else if ((finsn & 0xf00e) == 0xf000) {
336 /* fadd, fsub */
337 struct task_struct *tsk = current;
338 int fpscr;
339 int n, m, prec;
340 unsigned int hx, hy;
341
342 n = (finsn >> 8) & 0xf;
343 m = (finsn >> 4) & 0xf;
344 hx = tsk->thread.fpu.hard.fp_regs[n];
345 hy = tsk->thread.fpu.hard.fp_regs[m];
346 fpscr = tsk->thread.fpu.hard.fpscr;
347 prec = fpscr & FPSCR_DBL_PRECISION;
348
349 if ((fpscr & FPSCR_CAUSE_ERROR)
350 && (prec && ((hx & 0x7fffffff) < 0x00100000
351 || (hy & 0x7fffffff) < 0x00100000))) {
352 long long llx, lly;
353
354 /* FPU error because of denormal (doubles) */
355 llx = ((long long)hx << 32)
356 | tsk->thread.fpu.hard.fp_regs[n + 1];
357 lly = ((long long)hy << 32)
358 | tsk->thread.fpu.hard.fp_regs[m + 1];
359 if ((finsn & 0xf00f) == 0xf000)
360 llx = float64_add(llx, lly);
361 else
362 llx = float64_sub(llx, lly);
363 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
364 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
365 } else if ((fpscr & FPSCR_CAUSE_ERROR)
366 && (!prec && ((hx & 0x7fffffff) < 0x00800000
367 || (hy & 0x7fffffff) < 0x00800000))) {
368 /* FPU error because of denormal (floats) */
369 if ((finsn & 0xf00f) == 0xf000)
370 hx = float32_add(hx, hy);
371 else
372 hx = float32_sub(hx, hy);
373 tsk->thread.fpu.hard.fp_regs[n] = hx;
374 } else
375 return 0;
376
377 regs->pc = nextpc;
378 return 1;
379 } else if ((finsn & 0xf003) == 0xf003) {
380 /* fdiv */
381 struct task_struct *tsk = current;
382 int fpscr;
383 int n, m, prec;
384 unsigned int hx, hy;
385
386 n = (finsn >> 8) & 0xf;
387 m = (finsn >> 4) & 0xf;
388 hx = tsk->thread.fpu.hard.fp_regs[n];
389 hy = tsk->thread.fpu.hard.fp_regs[m];
390 fpscr = tsk->thread.fpu.hard.fpscr;
391 prec = fpscr & FPSCR_DBL_PRECISION;
392
393 if ((fpscr & FPSCR_CAUSE_ERROR)
394 && (prec && ((hx & 0x7fffffff) < 0x00100000
395 || (hy & 0x7fffffff) < 0x00100000))) {
396 long long llx, lly;
397
398 /* FPU error because of denormal (doubles) */
399 llx = ((long long)hx << 32)
400 | tsk->thread.fpu.hard.fp_regs[n + 1];
401 lly = ((long long)hy << 32)
402 | tsk->thread.fpu.hard.fp_regs[m + 1];
403
404 llx = float64_div(llx, lly);
405
406 tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
407 tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
408 } else if ((fpscr & FPSCR_CAUSE_ERROR)
409 && (!prec && ((hx & 0x7fffffff) < 0x00800000
410 || (hy & 0x7fffffff) < 0x00800000))) {
411 /* FPU error because of denormal (floats) */
412 hx = float32_div(hx, hy);
413 tsk->thread.fpu.hard.fp_regs[n] = hx;
286 } else 414 } else
287 force_sig(SIGFPE, tsk); 415 return 0;
288 416
289 regs->pc = nextpc; 417 regs->pc = nextpc;
290 return 1; 418 return 1;
@@ -293,27 +421,48 @@ ieee_fpe_handler (struct pt_regs *regs)
293 return 0; 421 return 0;
294} 422}
295 423
296asmlinkage void 424void float_raise(unsigned int flags)
297do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, 425{
298 unsigned long r7, struct pt_regs __regs) 426 fpu_exception_flags |= flags;
427}
428
429int float_rounding_mode(void)
299{ 430{
300 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
301 struct task_struct *tsk = current; 431 struct task_struct *tsk = current;
432 int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
433 return roundingMode;
434}
302 435
303 if (ieee_fpe_handler(regs)) 436BUILD_TRAP_HANDLER(fpu_error)
304 return; 437{
438 struct task_struct *tsk = current;
439 TRAP_HANDLER_DECL;
305 440
306 regs->pc += 2;
307 save_fpu(tsk, regs); 441 save_fpu(tsk, regs);
442 fpu_exception_flags = 0;
443 if (ieee_fpe_handler(regs)) {
444 tsk->thread.fpu.hard.fpscr &=
445 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
446 tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
447 /* Set the FPSCR flag as well as cause bits - simply
448 * replicate the cause */
449 tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
450 grab_fpu(regs);
451 restore_fpu(tsk);
452 set_tsk_thread_flag(tsk, TIF_USEDFPU);
453 if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
454 (fpu_exception_flags >> 2)) == 0) {
455 return;
456 }
457 }
458
308 force_sig(SIGFPE, tsk); 459 force_sig(SIGFPE, tsk);
309} 460}
310 461
311asmlinkage void 462BUILD_TRAP_HANDLER(fpu_state_restore)
312do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
313 unsigned long r7, struct pt_regs __regs)
314{ 463{
315 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
316 struct task_struct *tsk = current; 464 struct task_struct *tsk = current;
465 TRAP_HANDLER_DECL;
317 466
318 grab_fpu(regs); 467 grab_fpu(regs);
319 if (!user_mode(regs)) { 468 if (!user_mode(regs)) {
@@ -324,7 +473,7 @@ do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
324 if (used_math()) { 473 if (used_math()) {
325 /* Using the FPU again. */ 474 /* Using the FPU again. */
326 restore_fpu(tsk); 475 restore_fpu(tsk);
327 } else { 476 } else {
328 /* First time FPU user. */ 477 /* First time FPU user. */
329 fpu_init(); 478 fpu_init();
330 set_used_math(); 479 set_used_math();
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index bc9c28a69bf1..f2b9238cda04 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -98,6 +98,8 @@ int __init detect_cpu_and_cache_system(void)
98 case 0x200A: 98 case 0x200A:
99 if (prr == 0x61) 99 if (prr == 0x61)
100 boot_cpu_data.type = CPU_SH7781; 100 boot_cpu_data.type = CPU_SH7781;
101 else if (prr == 0xa1)
102 boot_cpu_data.type = CPU_SH7763;
101 else 103 else
102 boot_cpu_data.type = CPU_SH7780; 104 boot_cpu_data.type = CPU_SH7780;
103 105
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 523f68a9ce0e..ae3603aca615 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -126,12 +126,6 @@ static struct intc_group groups[] __initdata = {
126 INTC_GROUP(REF, REF_RCMI, REF_ROVI), 126 INTC_GROUP(REF, REF_RCMI, REF_ROVI),
127}; 127};
128 128
129static struct intc_prio priorities[] __initdata = {
130 INTC_PRIO(SCIF, 3),
131 INTC_PRIO(SCI1, 3),
132 INTC_PRIO(DMAC, 7),
133};
134
135static struct intc_prio_reg prio_registers[] __initdata = { 129static struct intc_prio_reg prio_registers[] __initdata = {
136 { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, 130 { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
137 { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, 131 { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
@@ -143,7 +137,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
143}; 137};
144 138
145static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups, 139static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
146 priorities, NULL, prio_registers, NULL); 140 NULL, prio_registers, NULL);
147 141
148/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */ 142/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
149#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ 143#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
@@ -163,7 +157,7 @@ static struct intc_group groups_dma4[] __initdata = {
163 157
164static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4", 158static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
165 vectors_dma4, groups_dma4, 159 vectors_dma4, groups_dma4,
166 priorities, NULL, prio_registers, NULL); 160 NULL, prio_registers, NULL);
167#endif 161#endif
168 162
169/* SH7750R and SH7751R both have 8-channel DMA controllers */ 163/* SH7750R and SH7751R both have 8-channel DMA controllers */
@@ -184,7 +178,7 @@ static struct intc_group groups_dma8[] __initdata = {
184 178
185static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8", 179static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
186 vectors_dma8, groups_dma8, 180 vectors_dma8, groups_dma8,
187 priorities, NULL, prio_registers, NULL); 181 NULL, prio_registers, NULL);
188#endif 182#endif
189 183
190/* SH7750R, SH7751 and SH7751R all have two extra timer channels */ 184/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
@@ -205,7 +199,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
205}; 199};
206 200
207static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34", 201static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
208 vectors_tmu34, NULL, priorities, 202 vectors_tmu34, NULL,
209 mask_registers, prio_registers, NULL); 203 mask_registers, prio_registers, NULL);
210#endif 204#endif
211 205
@@ -216,7 +210,7 @@ static struct intc_vect vectors_irlm[] __initdata = {
216}; 210};
217 211
218static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL, 212static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
219 priorities, NULL, prio_registers, NULL); 213 NULL, prio_registers, NULL);
220 214
221/* SH7751 and SH7751R both have PCI */ 215/* SH7751 and SH7751R both have PCI */
222#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R) 216#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
@@ -233,7 +227,7 @@ static struct intc_group groups_pci[] __initdata = {
233}; 227};
234 228
235static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci, 229static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
236 priorities, mask_registers, prio_registers, NULL); 230 mask_registers, prio_registers, NULL);
237#endif 231#endif
238 232
239#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ 233#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 7a898cb1d940..85f81579b97e 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -92,15 +92,6 @@ static struct intc_group groups[] __initdata = {
92 INTC_GROUP(REF, REF_RCMI, REF_ROVI), 92 INTC_GROUP(REF, REF_RCMI, REF_ROVI),
93}; 93};
94 94
95static struct intc_prio priorities[] __initdata = {
96 INTC_PRIO(SCIF0, 3),
97 INTC_PRIO(SCIF1, 3),
98 INTC_PRIO(SCIF2, 3),
99 INTC_PRIO(SIM, 3),
100 INTC_PRIO(DMAC, 7),
101 INTC_PRIO(DMABRG, 13),
102};
103
104static struct intc_mask_reg mask_registers[] __initdata = { 95static struct intc_mask_reg mask_registers[] __initdata = {
105 { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ 96 { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
106 { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21, 97 { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21,
@@ -132,7 +123,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
132}; 123};
133 124
134static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups, 125static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups,
135 priorities, mask_registers, prio_registers, NULL); 126 mask_registers, prio_registers, NULL);
136 127
137static struct intc_vect vectors_irq[] __initdata = { 128static struct intc_vect vectors_irq[] __initdata = {
138 INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), 129 INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
@@ -140,7 +131,7 @@ static struct intc_vect vectors_irq[] __initdata = {
140}; 131};
141 132
142static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups, 133static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
143 priorities, mask_registers, prio_registers, NULL); 134 mask_registers, prio_registers, NULL);
144 135
145static struct plat_sci_port sci_platform_data[] = { 136static struct plat_sci_port sci_platform_data[] = {
146 { 137 {
diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
new file mode 100644
index 000000000000..7b2d337ee412
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/softfloat.c
@@ -0,0 +1,892 @@
1/*
2 * Floating point emulation support for subnormalised numbers on SH4
3 * architecture This file is derived from the SoftFloat IEC/IEEE
4 * Floating-point Arithmetic Package, Release 2 the original license of
5 * which is reproduced below.
6 *
7 * ========================================================================
8 *
9 * This C source file is part of the SoftFloat IEC/IEEE Floating-point
10 * Arithmetic Package, Release 2.
11 *
12 * Written by John R. Hauser. This work was made possible in part by the
13 * International Computer Science Institute, located at Suite 600, 1947 Center
14 * Street, Berkeley, California 94704. Funding was partially provided by the
15 * National Science Foundation under grant MIP-9311980. The original version
16 * of this code was written as part of a project to build a fixed-point vector
17 * processor in collaboration with the University of California at Berkeley,
18 * overseen by Profs. Nelson Morgan and John Wawrzynek. More information
19 * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
20 * arithmetic/softfloat.html'.
21 *
22 * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
23 * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
24 * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
25 * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
26 * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
27 *
28 * Derivative works are acceptable, even for commercial purposes, so long as
29 * (1) they include prominent notice that the work is derivative, and (2) they
30 * include prominent notice akin to these three paragraphs for those parts of
31 * this code that are retained.
32 *
33 * ========================================================================
34 *
35 * SH4 modifications by Ismail Dhaoui <ismail.dhaoui@st.com>
36 * and Kamel Khelifi <kamel.khelifi@st.com>
37 */
38#include <linux/kernel.h>
39#include <asm/cpu/fpu.h>
40
41#define LIT64( a ) a##LL
42
43typedef char flag;
44typedef unsigned char uint8;
45typedef signed char int8;
46typedef int uint16;
47typedef int int16;
48typedef unsigned int uint32;
49typedef signed int int32;
50
51typedef unsigned long long int bits64;
52typedef signed long long int sbits64;
53
54typedef unsigned char bits8;
55typedef signed char sbits8;
56typedef unsigned short int bits16;
57typedef signed short int sbits16;
58typedef unsigned int bits32;
59typedef signed int sbits32;
60
61typedef unsigned long long int uint64;
62typedef signed long long int int64;
63
64typedef unsigned long int float32;
65typedef unsigned long long float64;
66
67extern void float_raise(unsigned int flags); /* in fpu.c */
68extern int float_rounding_mode(void); /* in fpu.c */
69
70inline bits64 extractFloat64Frac(float64 a);
71inline flag extractFloat64Sign(float64 a);
72inline int16 extractFloat64Exp(float64 a);
73inline int16 extractFloat32Exp(float32 a);
74inline flag extractFloat32Sign(float32 a);
75inline bits32 extractFloat32Frac(float32 a);
76inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
77inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
78inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
79inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
80float64 float64_sub(float64 a, float64 b);
81float32 float32_sub(float32 a, float32 b);
82float32 float32_add(float32 a, float32 b);
83float64 float64_add(float64 a, float64 b);
84float64 float64_div(float64 a, float64 b);
85float32 float32_div(float32 a, float32 b);
86float32 float32_mul(float32 a, float32 b);
87float64 float64_mul(float64 a, float64 b);
88inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
89 bits64 * z1Ptr);
90inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
91 bits64 * z1Ptr);
92inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
93
94static int8 countLeadingZeros32(bits32 a);
95static int8 countLeadingZeros64(bits64 a);
96static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
97 bits64 zSig);
98static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
99static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
100static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
101static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
102 bits32 zSig);
103static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
104static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
105static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
106static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
107 bits64 * zSigPtr);
108static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
109static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
110 bits32 * zSigPtr);
111
112inline bits64 extractFloat64Frac(float64 a)
113{
114 return a & LIT64(0x000FFFFFFFFFFFFF);
115}
116
117inline flag extractFloat64Sign(float64 a)
118{
119 return a >> 63;
120}
121
122inline int16 extractFloat64Exp(float64 a)
123{
124 return (a >> 52) & 0x7FF;
125}
126
127inline int16 extractFloat32Exp(float32 a)
128{
129 return (a >> 23) & 0xFF;
130}
131
132inline flag extractFloat32Sign(float32 a)
133{
134 return a >> 31;
135}
136
137inline bits32 extractFloat32Frac(float32 a)
138{
139 return a & 0x007FFFFF;
140}
141
142inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
143{
144 return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
145}
146
147inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
148{
149 bits64 z;
150
151 if (count == 0) {
152 z = a;
153 } else if (count < 64) {
154 z = (a >> count) | ((a << ((-count) & 63)) != 0);
155 } else {
156 z = (a != 0);
157 }
158 *zPtr = z;
159}
160
161static int8 countLeadingZeros32(bits32 a)
162{
163 static const int8 countLeadingZerosHigh[] = {
164 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
165 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
166 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
167 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
168 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
169 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
170 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
171 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
172 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
175 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
176 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
177 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
178 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
179 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
180 };
181 int8 shiftCount;
182
183 shiftCount = 0;
184 if (a < 0x10000) {
185 shiftCount += 16;
186 a <<= 16;
187 }
188 if (a < 0x1000000) {
189 shiftCount += 8;
190 a <<= 8;
191 }
192 shiftCount += countLeadingZerosHigh[a >> 24];
193 return shiftCount;
194
195}
196
197static int8 countLeadingZeros64(bits64 a)
198{
199 int8 shiftCount;
200
201 shiftCount = 0;
202 if (a < ((bits64) 1) << 32) {
203 shiftCount += 32;
204 } else {
205 a >>= 32;
206 }
207 shiftCount += countLeadingZeros32(a);
208 return shiftCount;
209
210}
211
212static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
213{
214 int8 shiftCount;
215
216 shiftCount = countLeadingZeros64(zSig) - 1;
217 return roundAndPackFloat64(zSign, zExp - shiftCount,
218 zSig << shiftCount);
219
220}
221
222static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
223{
224 int16 aExp, bExp, zExp;
225 bits64 aSig, bSig, zSig;
226 int16 expDiff;
227
228 aSig = extractFloat64Frac(a);
229 aExp = extractFloat64Exp(a);
230 bSig = extractFloat64Frac(b);
231 bExp = extractFloat64Exp(b);
232 expDiff = aExp - bExp;
233 aSig <<= 10;
234 bSig <<= 10;
235 if (0 < expDiff)
236 goto aExpBigger;
237 if (expDiff < 0)
238 goto bExpBigger;
239 if (aExp == 0) {
240 aExp = 1;
241 bExp = 1;
242 }
243 if (bSig < aSig)
244 goto aBigger;
245 if (aSig < bSig)
246 goto bBigger;
247 return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
248 bExpBigger:
249 if (bExp == 0x7FF) {
250 return packFloat64(zSign ^ 1, 0x7FF, 0);
251 }
252 if (aExp == 0) {
253 ++expDiff;
254 } else {
255 aSig |= LIT64(0x4000000000000000);
256 }
257 shift64RightJamming(aSig, -expDiff, &aSig);
258 bSig |= LIT64(0x4000000000000000);
259 bBigger:
260 zSig = bSig - aSig;
261 zExp = bExp;
262 zSign ^= 1;
263 goto normalizeRoundAndPack;
264 aExpBigger:
265 if (aExp == 0x7FF) {
266 return a;
267 }
268 if (bExp == 0) {
269 --expDiff;
270 } else {
271 bSig |= LIT64(0x4000000000000000);
272 }
273 shift64RightJamming(bSig, expDiff, &bSig);
274 aSig |= LIT64(0x4000000000000000);
275 aBigger:
276 zSig = aSig - bSig;
277 zExp = aExp;
278 normalizeRoundAndPack:
279 --zExp;
280 return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
281
282}
283static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
284{
285 int16 aExp, bExp, zExp;
286 bits64 aSig, bSig, zSig;
287 int16 expDiff;
288
289 aSig = extractFloat64Frac(a);
290 aExp = extractFloat64Exp(a);
291 bSig = extractFloat64Frac(b);
292 bExp = extractFloat64Exp(b);
293 expDiff = aExp - bExp;
294 aSig <<= 9;
295 bSig <<= 9;
296 if (0 < expDiff) {
297 if (aExp == 0x7FF) {
298 return a;
299 }
300 if (bExp == 0) {
301 --expDiff;
302 } else {
303 bSig |= LIT64(0x2000000000000000);
304 }
305 shift64RightJamming(bSig, expDiff, &bSig);
306 zExp = aExp;
307 } else if (expDiff < 0) {
308 if (bExp == 0x7FF) {
309 return packFloat64(zSign, 0x7FF, 0);
310 }
311 if (aExp == 0) {
312 ++expDiff;
313 } else {
314 aSig |= LIT64(0x2000000000000000);
315 }
316 shift64RightJamming(aSig, -expDiff, &aSig);
317 zExp = bExp;
318 } else {
319 if (aExp == 0x7FF) {
320 return a;
321 }
322 if (aExp == 0)
323 return packFloat64(zSign, 0, (aSig + bSig) >> 9);
324 zSig = LIT64(0x4000000000000000) + aSig + bSig;
325 zExp = aExp;
326 goto roundAndPack;
327 }
328 aSig |= LIT64(0x2000000000000000);
329 zSig = (aSig + bSig) << 1;
330 --zExp;
331 if ((sbits64) zSig < 0) {
332 zSig = aSig + bSig;
333 ++zExp;
334 }
335 roundAndPack:
336 return roundAndPackFloat64(zSign, zExp, zSig);
337
338}
339
340inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
341{
342 return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
343}
344
345inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
346{
347 bits32 z;
348 if (count == 0) {
349 z = a;
350 } else if (count < 32) {
351 z = (a >> count) | ((a << ((-count) & 31)) != 0);
352 } else {
353 z = (a != 0);
354 }
355 *zPtr = z;
356}
357
358static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
359{
360 flag roundNearestEven;
361 int8 roundIncrement, roundBits;
362 flag isTiny;
363
364 /* SH4 has only 2 rounding modes - round to nearest and round to zero */
365 roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
366 roundIncrement = 0x40;
367 if (!roundNearestEven) {
368 roundIncrement = 0;
369 }
370 roundBits = zSig & 0x7F;
371 if (0xFD <= (bits16) zExp) {
372 if ((0xFD < zExp)
373 || ((zExp == 0xFD)
374 && ((sbits32) (zSig + roundIncrement) < 0))
375 ) {
376 float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
377 return packFloat32(zSign, 0xFF,
378 0) - (roundIncrement == 0);
379 }
380 if (zExp < 0) {
381 isTiny = (zExp < -1)
382 || (zSig + roundIncrement < 0x80000000);
383 shift32RightJamming(zSig, -zExp, &zSig);
384 zExp = 0;
385 roundBits = zSig & 0x7F;
386 if (isTiny && roundBits)
387 float_raise(FPSCR_CAUSE_UNDERFLOW);
388 }
389 }
390 if (roundBits)
391 float_raise(FPSCR_CAUSE_INEXACT);
392 zSig = (zSig + roundIncrement) >> 7;
393 zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
394 if (zSig == 0)
395 zExp = 0;
396 return packFloat32(zSign, zExp, zSig);
397
398}
399
400static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
401{
402 int8 shiftCount;
403
404 shiftCount = countLeadingZeros32(zSig) - 1;
405 return roundAndPackFloat32(zSign, zExp - shiftCount,
406 zSig << shiftCount);
407}
408
409static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
410{
411 flag roundNearestEven;
412 int16 roundIncrement, roundBits;
413 flag isTiny;
414
415 /* SH4 has only 2 rounding modes - round to nearest and round to zero */
416 roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
417 roundIncrement = 0x200;
418 if (!roundNearestEven) {
419 roundIncrement = 0;
420 }
421 roundBits = zSig & 0x3FF;
422 if (0x7FD <= (bits16) zExp) {
423 if ((0x7FD < zExp)
424 || ((zExp == 0x7FD)
425 && ((sbits64) (zSig + roundIncrement) < 0))
426 ) {
427 float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
428 return packFloat64(zSign, 0x7FF,
429 0) - (roundIncrement == 0);
430 }
431 if (zExp < 0) {
432 isTiny = (zExp < -1)
433 || (zSig + roundIncrement <
434 LIT64(0x8000000000000000));
435 shift64RightJamming(zSig, -zExp, &zSig);
436 zExp = 0;
437 roundBits = zSig & 0x3FF;
438 if (isTiny && roundBits)
439 float_raise(FPSCR_CAUSE_UNDERFLOW);
440 }
441 }
442 if (roundBits)
443 float_raise(FPSCR_CAUSE_INEXACT);
444 zSig = (zSig + roundIncrement) >> 10;
445 zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
446 if (zSig == 0)
447 zExp = 0;
448 return packFloat64(zSign, zExp, zSig);
449
450}
451
452static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
453{
454 int16 aExp, bExp, zExp;
455 bits32 aSig, bSig, zSig;
456 int16 expDiff;
457
458 aSig = extractFloat32Frac(a);
459 aExp = extractFloat32Exp(a);
460 bSig = extractFloat32Frac(b);
461 bExp = extractFloat32Exp(b);
462 expDiff = aExp - bExp;
463 aSig <<= 7;
464 bSig <<= 7;
465 if (0 < expDiff)
466 goto aExpBigger;
467 if (expDiff < 0)
468 goto bExpBigger;
469 if (aExp == 0) {
470 aExp = 1;
471 bExp = 1;
472 }
473 if (bSig < aSig)
474 goto aBigger;
475 if (aSig < bSig)
476 goto bBigger;
477 return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
478 bExpBigger:
479 if (bExp == 0xFF) {
480 return packFloat32(zSign ^ 1, 0xFF, 0);
481 }
482 if (aExp == 0) {
483 ++expDiff;
484 } else {
485 aSig |= 0x40000000;
486 }
487 shift32RightJamming(aSig, -expDiff, &aSig);
488 bSig |= 0x40000000;
489 bBigger:
490 zSig = bSig - aSig;
491 zExp = bExp;
492 zSign ^= 1;
493 goto normalizeRoundAndPack;
494 aExpBigger:
495 if (aExp == 0xFF) {
496 return a;
497 }
498 if (bExp == 0) {
499 --expDiff;
500 } else {
501 bSig |= 0x40000000;
502 }
503 shift32RightJamming(bSig, expDiff, &bSig);
504 aSig |= 0x40000000;
505 aBigger:
506 zSig = aSig - bSig;
507 zExp = aExp;
508 normalizeRoundAndPack:
509 --zExp;
510 return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
511
512}
513
514static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
515{
516 int16 aExp, bExp, zExp;
517 bits32 aSig, bSig, zSig;
518 int16 expDiff;
519
520 aSig = extractFloat32Frac(a);
521 aExp = extractFloat32Exp(a);
522 bSig = extractFloat32Frac(b);
523 bExp = extractFloat32Exp(b);
524 expDiff = aExp - bExp;
525 aSig <<= 6;
526 bSig <<= 6;
527 if (0 < expDiff) {
528 if (aExp == 0xFF) {
529 return a;
530 }
531 if (bExp == 0) {
532 --expDiff;
533 } else {
534 bSig |= 0x20000000;
535 }
536 shift32RightJamming(bSig, expDiff, &bSig);
537 zExp = aExp;
538 } else if (expDiff < 0) {
539 if (bExp == 0xFF) {
540 return packFloat32(zSign, 0xFF, 0);
541 }
542 if (aExp == 0) {
543 ++expDiff;
544 } else {
545 aSig |= 0x20000000;
546 }
547 shift32RightJamming(aSig, -expDiff, &aSig);
548 zExp = bExp;
549 } else {
550 if (aExp == 0xFF) {
551 return a;
552 }
553 if (aExp == 0)
554 return packFloat32(zSign, 0, (aSig + bSig) >> 6);
555 zSig = 0x40000000 + aSig + bSig;
556 zExp = aExp;
557 goto roundAndPack;
558 }
559 aSig |= 0x20000000;
560 zSig = (aSig + bSig) << 1;
561 --zExp;
562 if ((sbits32) zSig < 0) {
563 zSig = aSig + bSig;
564 ++zExp;
565 }
566 roundAndPack:
567 return roundAndPackFloat32(zSign, zExp, zSig);
568
569}
570
571float64 float64_sub(float64 a, float64 b)
572{
573 flag aSign, bSign;
574
575 aSign = extractFloat64Sign(a);
576 bSign = extractFloat64Sign(b);
577 if (aSign == bSign) {
578 return subFloat64Sigs(a, b, aSign);
579 } else {
580 return addFloat64Sigs(a, b, aSign);
581 }
582
583}
584
585float32 float32_sub(float32 a, float32 b)
586{
587 flag aSign, bSign;
588
589 aSign = extractFloat32Sign(a);
590 bSign = extractFloat32Sign(b);
591 if (aSign == bSign) {
592 return subFloat32Sigs(a, b, aSign);
593 } else {
594 return addFloat32Sigs(a, b, aSign);
595 }
596
597}
598
599float32 float32_add(float32 a, float32 b)
600{
601 flag aSign, bSign;
602
603 aSign = extractFloat32Sign(a);
604 bSign = extractFloat32Sign(b);
605 if (aSign == bSign) {
606 return addFloat32Sigs(a, b, aSign);
607 } else {
608 return subFloat32Sigs(a, b, aSign);
609 }
610
611}
612
613float64 float64_add(float64 a, float64 b)
614{
615 flag aSign, bSign;
616
617 aSign = extractFloat64Sign(a);
618 bSign = extractFloat64Sign(b);
619 if (aSign == bSign) {
620 return addFloat64Sigs(a, b, aSign);
621 } else {
622 return subFloat64Sigs(a, b, aSign);
623 }
624}
625
626static void
627normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
628{
629 int8 shiftCount;
630
631 shiftCount = countLeadingZeros64(aSig) - 11;
632 *zSigPtr = aSig << shiftCount;
633 *zExpPtr = 1 - shiftCount;
634}
635
636inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
637 bits64 * z1Ptr)
638{
639 bits64 z1;
640
641 z1 = a1 + b1;
642 *z1Ptr = z1;
643 *z0Ptr = a0 + b0 + (z1 < a1);
644}
645
646inline void
647sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
648 bits64 * z1Ptr)
649{
650 *z1Ptr = a1 - b1;
651 *z0Ptr = a0 - b0 - (a1 < b1);
652}
653
654static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
655{
656 bits64 b0, b1;
657 bits64 rem0, rem1, term0, term1;
658 bits64 z;
659 if (b <= a0)
660 return LIT64(0xFFFFFFFFFFFFFFFF);
661 b0 = b >> 32;
662 z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32;
663 mul64To128(b, z, &term0, &term1);
664 sub128(a0, a1, term0, term1, &rem0, &rem1);
665 while (((sbits64) rem0) < 0) {
666 z -= LIT64(0x100000000);
667 b1 = b << 32;
668 add128(rem0, rem1, b0, b1, &rem0, &rem1);
669 }
670 rem0 = (rem0 << 32) | (rem1 >> 32);
671 z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0;
672 return z;
673}
674
675inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
676{
677 bits32 aHigh, aLow, bHigh, bLow;
678 bits64 z0, zMiddleA, zMiddleB, z1;
679
680 aLow = a;
681 aHigh = a >> 32;
682 bLow = b;
683 bHigh = b >> 32;
684 z1 = ((bits64) aLow) * bLow;
685 zMiddleA = ((bits64) aLow) * bHigh;
686 zMiddleB = ((bits64) aHigh) * bLow;
687 z0 = ((bits64) aHigh) * bHigh;
688 zMiddleA += zMiddleB;
689 z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
690 zMiddleA <<= 32;
691 z1 += zMiddleA;
692 z0 += (z1 < zMiddleA);
693 *z1Ptr = z1;
694 *z0Ptr = z0;
695
696}
697
698static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
699 bits32 * zSigPtr)
700{
701 int8 shiftCount;
702
703 shiftCount = countLeadingZeros32(aSig) - 8;
704 *zSigPtr = aSig << shiftCount;
705 *zExpPtr = 1 - shiftCount;
706
707}
708
709float64 float64_div(float64 a, float64 b)
710{
711 flag aSign, bSign, zSign;
712 int16 aExp, bExp, zExp;
713 bits64 aSig, bSig, zSig;
714 bits64 rem0, rem1;
715 bits64 term0, term1;
716
717 aSig = extractFloat64Frac(a);
718 aExp = extractFloat64Exp(a);
719 aSign = extractFloat64Sign(a);
720 bSig = extractFloat64Frac(b);
721 bExp = extractFloat64Exp(b);
722 bSign = extractFloat64Sign(b);
723 zSign = aSign ^ bSign;
724 if (aExp == 0x7FF) {
725 if (bExp == 0x7FF) {
726 }
727 return packFloat64(zSign, 0x7FF, 0);
728 }
729 if (bExp == 0x7FF) {
730 return packFloat64(zSign, 0, 0);
731 }
732 if (bExp == 0) {
733 if (bSig == 0) {
734 if ((aExp | aSig) == 0) {
735 float_raise(FPSCR_CAUSE_INVALID);
736 }
737 return packFloat64(zSign, 0x7FF, 0);
738 }
739 normalizeFloat64Subnormal(bSig, &bExp, &bSig);
740 }
741 if (aExp == 0) {
742 if (aSig == 0)
743 return packFloat64(zSign, 0, 0);
744 normalizeFloat64Subnormal(aSig, &aExp, &aSig);
745 }
746 zExp = aExp - bExp + 0x3FD;
747 aSig = (aSig | LIT64(0x0010000000000000)) << 10;
748 bSig = (bSig | LIT64(0x0010000000000000)) << 11;
749 if (bSig <= (aSig + aSig)) {
750 aSig >>= 1;
751 ++zExp;
752 }
753 zSig = estimateDiv128To64(aSig, 0, bSig);
754 if ((zSig & 0x1FF) <= 2) {
755 mul64To128(bSig, zSig, &term0, &term1);
756 sub128(aSig, 0, term0, term1, &rem0, &rem1);
757 while ((sbits64) rem0 < 0) {
758 --zSig;
759 add128(rem0, rem1, 0, bSig, &rem0, &rem1);
760 }
761 zSig |= (rem1 != 0);
762 }
763 return roundAndPackFloat64(zSign, zExp, zSig);
764
765}
766
767float32 float32_div(float32 a, float32 b)
768{
769 flag aSign, bSign, zSign;
770 int16 aExp, bExp, zExp;
771 bits32 aSig, bSig, zSig;
772
773 aSig = extractFloat32Frac(a);
774 aExp = extractFloat32Exp(a);
775 aSign = extractFloat32Sign(a);
776 bSig = extractFloat32Frac(b);
777 bExp = extractFloat32Exp(b);
778 bSign = extractFloat32Sign(b);
779 zSign = aSign ^ bSign;
780 if (aExp == 0xFF) {
781 if (bExp == 0xFF) {
782 }
783 return packFloat32(zSign, 0xFF, 0);
784 }
785 if (bExp == 0xFF) {
786 return packFloat32(zSign, 0, 0);
787 }
788 if (bExp == 0) {
789 if (bSig == 0) {
790 return packFloat32(zSign, 0xFF, 0);
791 }
792 normalizeFloat32Subnormal(bSig, &bExp, &bSig);
793 }
794 if (aExp == 0) {
795 if (aSig == 0)
796 return packFloat32(zSign, 0, 0);
797 normalizeFloat32Subnormal(aSig, &aExp, &aSig);
798 }
799 zExp = aExp - bExp + 0x7D;
800 aSig = (aSig | 0x00800000) << 7;
801 bSig = (bSig | 0x00800000) << 8;
802 if (bSig <= (aSig + aSig)) {
803 aSig >>= 1;
804 ++zExp;
805 }
806 zSig = (((bits64) aSig) << 32) / bSig;
807 if ((zSig & 0x3F) == 0) {
808 zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
809 }
810 return roundAndPackFloat32(zSign, zExp, zSig);
811
812}
813
814float32 float32_mul(float32 a, float32 b)
815{
816 char aSign, bSign, zSign;
817 int aExp, bExp, zExp;
818 unsigned int aSig, bSig;
819 unsigned long long zSig64;
820 unsigned int zSig;
821
822 aSig = extractFloat32Frac(a);
823 aExp = extractFloat32Exp(a);
824 aSign = extractFloat32Sign(a);
825 bSig = extractFloat32Frac(b);
826 bExp = extractFloat32Exp(b);
827 bSign = extractFloat32Sign(b);
828 zSign = aSign ^ bSign;
829 if (aExp == 0) {
830 if (aSig == 0)
831 return packFloat32(zSign, 0, 0);
832 normalizeFloat32Subnormal(aSig, &aExp, &aSig);
833 }
834 if (bExp == 0) {
835 if (bSig == 0)
836 return packFloat32(zSign, 0, 0);
837 normalizeFloat32Subnormal(bSig, &bExp, &bSig);
838 }
839 if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
840 return roundAndPackFloat32(zSign, 0xff, 0);
841
842 zExp = aExp + bExp - 0x7F;
843 aSig = (aSig | 0x00800000) << 7;
844 bSig = (bSig | 0x00800000) << 8;
845 shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
846 zSig = zSig64;
847 if (0 <= (signed int)(zSig << 1)) {
848 zSig <<= 1;
849 --zExp;
850 }
851 return roundAndPackFloat32(zSign, zExp, zSig);
852
853}
854
855float64 float64_mul(float64 a, float64 b)
856{
857 char aSign, bSign, zSign;
858 int aExp, bExp, zExp;
859 unsigned long long int aSig, bSig, zSig0, zSig1;
860
861 aSig = extractFloat64Frac(a);
862 aExp = extractFloat64Exp(a);
863 aSign = extractFloat64Sign(a);
864 bSig = extractFloat64Frac(b);
865 bExp = extractFloat64Exp(b);
866 bSign = extractFloat64Sign(b);
867 zSign = aSign ^ bSign;
868
869 if (aExp == 0) {
870 if (aSig == 0)
871 return packFloat64(zSign, 0, 0);
872 normalizeFloat64Subnormal(aSig, &aExp, &aSig);
873 }
874 if (bExp == 0) {
875 if (bSig == 0)
876 return packFloat64(zSign, 0, 0);
877 normalizeFloat64Subnormal(bSig, &bExp, &bSig);
878 }
879 if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
880 return roundAndPackFloat64(zSign, 0x7ff, 0);
881
882 zExp = aExp + bExp - 0x3FF;
883 aSig = (aSig | 0x0010000000000000LL) << 10;
884 bSig = (bSig | 0x0010000000000000LL) << 11;
885 mul64To128(aSig, bSig, &zSig0, &zSig1);
886 zSig0 |= (zSig1 != 0);
887 if (0 <= (signed long long int)(zSig0 << 1)) {
888 zSig0 <<= 1;
889 --zExp;
890 }
891 return roundAndPackFloat64(zSign, zExp, zSig0);
892}
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 24539873943a..08ac6387bf17 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5# CPU subtype setup 5# CPU subtype setup
6obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
6obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o 7obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
7obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o 8obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
8obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o 9obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
14smp-$(CONFIG_CPU_SUBTYPE_SHX3) := smp-shx3.o 15smp-$(CONFIG_CPU_SUBTYPE_SHX3) := smp-shx3.o
15 16
16# Primary on-chip clocks (common) 17# Primary on-chip clocks (common)
18clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
17clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o 19clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
18clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o 20clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
19clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o 21clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
new file mode 100644
index 000000000000..45889d412c80
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -0,0 +1,126 @@
1/*
2 * arch/sh/kernel/cpu/sh4a/clock-sh7763.c
3 *
4 * SH7763 support for the clock framework
5 *
6 * Copyright (C) 2005 Paul Mundt
7 * Copyright (C) 2007 Yoshihiro Shimoda
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <asm/clock.h>
16#include <asm/freq.h>
17#include <asm/io.h>
18
19static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
20static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
21static int p1fc_divisors[] = { 1, 1, 1, 16, 1, 1, 1, 1 };
22static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
23
24static void master_clk_init(struct clk *clk)
25{
26 clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
27}
28
29static struct clk_ops sh7763_master_clk_ops = {
30 .init = master_clk_init,
31};
32
33static void module_clk_recalc(struct clk *clk)
34{
35 int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
36 clk->rate = clk->parent->rate / p0fc_divisors[idx];
37}
38
39static struct clk_ops sh7763_module_clk_ops = {
40 .recalc = module_clk_recalc,
41};
42
43static void bus_clk_recalc(struct clk *clk)
44{
45 int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
46 clk->rate = clk->parent->rate / bfc_divisors[idx];
47}
48
49static struct clk_ops sh7763_bus_clk_ops = {
50 .recalc = bus_clk_recalc,
51};
52
53static void cpu_clk_recalc(struct clk *clk)
54{
55 clk->rate = clk->parent->rate;
56}
57
58static struct clk_ops sh7763_cpu_clk_ops = {
59 .recalc = cpu_clk_recalc,
60};
61
62static struct clk_ops *sh7763_clk_ops[] = {
63 &sh7763_master_clk_ops,
64 &sh7763_module_clk_ops,
65 &sh7763_bus_clk_ops,
66 &sh7763_cpu_clk_ops,
67};
68
69void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
70{
71 if (idx < ARRAY_SIZE(sh7763_clk_ops))
72 *ops = sh7763_clk_ops[idx];
73}
74
75static void shyway_clk_recalc(struct clk *clk)
76{
77 int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
78 clk->rate = clk->parent->rate / cfc_divisors[idx];
79}
80
81static struct clk_ops sh7763_shyway_clk_ops = {
82 .recalc = shyway_clk_recalc,
83};
84
85static struct clk sh7763_shyway_clk = {
86 .name = "shyway_clk",
87 .flags = CLK_ALWAYS_ENABLED,
88 .ops = &sh7763_shyway_clk_ops,
89};
90
91/*
92 * Additional SH7763-specific on-chip clocks that aren't already part of the
93 * clock framework
94 */
95static struct clk *sh7763_onchip_clocks[] = {
96 &sh7763_shyway_clk,
97};
98
99static int __init sh7763_clk_init(void)
100{
101 struct clk *clk = clk_get(NULL, "master_clk");
102 int i;
103
104 for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
105 struct clk *clkp = sh7763_onchip_clocks[i];
106
107 clkp->parent = clk;
108 clk_register(clkp);
109 clk_enable(clkp);
110 }
111
112 /*
113 * Now that we have the rest of the clocks registered, we need to
114 * force the parent clock to propagate so that these clocks will
115 * automatically figure out their rate. We cheat by handing the
116 * parent clock its current rate and forcing child propagation.
117 */
118 clk_set_rate(clk, clk_get_rate(clk));
119
120 clk_put(clk);
121
122 return 0;
123}
124
125arch_initcall(sh7763_clk_init);
126
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index b9c6547c4a90..73c778d40d13 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -157,14 +157,6 @@ static struct intc_group groups[] __initdata = {
157 INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3), 157 INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
158}; 158};
159 159
160static struct intc_prio priorities[] __initdata = {
161 INTC_PRIO(SCIF0, 3),
162 INTC_PRIO(SCIF1, 3),
163 INTC_PRIO(SCIF2, 3),
164 INTC_PRIO(TMU0, 2),
165 INTC_PRIO(TMU1, 2),
166};
167
168static struct intc_mask_reg mask_registers[] __initdata = { 160static struct intc_mask_reg mask_registers[] __initdata = {
169 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ 161 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
170 { } }, 162 { } },
@@ -217,7 +209,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
217 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 209 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
218}; 210};
219 211
220static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups, priorities, 212static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups,
221 mask_registers, prio_registers, sense_registers); 213 mask_registers, prio_registers, sense_registers);
222 214
223void __init plat_irq_setup(void) 215void __init plat_irq_setup(void)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
new file mode 100644
index 000000000000..eabd5386812d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -0,0 +1,390 @@
1/*
2 * SH7763 Setup
3 *
4 * Copyright (C) 2006 Paul Mundt
5 * Copyright (C) 2007 Yoshihiro Shimoda
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/platform_device.h>
12#include <linux/init.h>
13#include <linux/serial.h>
14#include <linux/io.h>
15#include <asm/sci.h>
16
17static struct resource rtc_resources[] = {
18 [0] = {
19 .start = 0xffe80000,
20 .end = 0xffe80000 + 0x58 - 1,
21 .flags = IORESOURCE_IO,
22 },
23 [1] = {
24 /* Period IRQ */
25 .start = 21,
26 .flags = IORESOURCE_IRQ,
27 },
28 [2] = {
29 /* Carry IRQ */
30 .start = 22,
31 .flags = IORESOURCE_IRQ,
32 },
33 [3] = {
34 /* Alarm IRQ */
35 .start = 20,
36 .flags = IORESOURCE_IRQ,
37 },
38};
39
40static struct platform_device rtc_device = {
41 .name = "sh-rtc",
42 .id = -1,
43 .num_resources = ARRAY_SIZE(rtc_resources),
44 .resource = rtc_resources,
45};
46
47static struct plat_sci_port sci_platform_data[] = {
48 {
49 .mapbase = 0xffe00000,
50 .flags = UPF_BOOT_AUTOCONF,
51 .type = PORT_SCIF,
52 .irqs = { 40, 41, 43, 42 },
53 }, {
54 .mapbase = 0xffe08000,
55 .flags = UPF_BOOT_AUTOCONF,
56 .type = PORT_SCIF,
57 .irqs = { 76, 77, 79, 78 },
58 }, {
59 .flags = 0,
60 }
61};
62
63static struct platform_device sci_device = {
64 .name = "sh-sci",
65 .id = -1,
66 .dev = {
67 .platform_data = sci_platform_data,
68 },
69};
70
71static struct resource usb_ohci_resources[] = {
72 [0] = {
73 .start = 0xffec8000,
74 .end = 0xffec80ff,
75 .flags = IORESOURCE_MEM,
76 },
77 [1] = {
78 .start = 83,
79 .end = 83,
80 .flags = IORESOURCE_IRQ,
81 },
82};
83
84static u64 usb_ohci_dma_mask = 0xffffffffUL;
85static struct platform_device usb_ohci_device = {
86 .name = "sh_ohci",
87 .id = -1,
88 .dev = {
89 .dma_mask = &usb_ohci_dma_mask,
90 .coherent_dma_mask = 0xffffffff,
91 },
92 .num_resources = ARRAY_SIZE(usb_ohci_resources),
93 .resource = usb_ohci_resources,
94};
95
96static struct resource usbf_resources[] = {
97 [0] = {
98 .start = 0xffec0000,
99 .end = 0xffec00ff,
100 .flags = IORESOURCE_MEM,
101 },
102 [1] = {
103 .start = 84,
104 .end = 84,
105 .flags = IORESOURCE_IRQ,
106 },
107};
108
109static struct platform_device usbf_device = {
110 .name = "sh_udc",
111 .id = -1,
112 .dev = {
113 .dma_mask = NULL,
114 .coherent_dma_mask = 0xffffffff,
115 },
116 .num_resources = ARRAY_SIZE(usbf_resources),
117 .resource = usbf_resources,
118};
119
120static struct platform_device *sh7763_devices[] __initdata = {
121 &rtc_device,
122 &sci_device,
123 &usb_ohci_device,
124 &usbf_device,
125};
126
127static int __init sh7763_devices_setup(void)
128{
129 return platform_add_devices(sh7763_devices,
130 ARRAY_SIZE(sh7763_devices));
131}
132__initcall(sh7763_devices_setup);
133
134enum {
135 UNUSED = 0,
136
137 /* interrupt sources */
138
139 IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
140 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
141 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
142 IRL_HHLL, IRL_HHLH, IRL_HHHL,
143
144 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
145 RTC_ATI, RTC_PRI, RTC_CUI,
146 WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
147 HUDI, LCDC,
148 DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
149 SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
150 DMAC0_DMINT4, DMAC0_DMINT5,
151 IIC0, IIC1,
152 CMT,
153 GEINT0, GEINT1, GEINT2,
154 HAC,
155 PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
156 PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
157 STIF0, STIF1,
158 SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
159 SIOF0, SIOF1, SIOF2,
160 USBH, USBFI0, USBFI1,
161 TPU, PCC,
162 MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
163 SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND,
164 TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
165 SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
166 GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3,
167
168 /* interrupt groups */
169
170 TMU012, TMU345, RTC, DMAC, SCIF0, GETHER, PCIC5,
171 SCIF1, USBF, MMCIF, SIM, SCIF2, GPIO,
172};
173
174static struct intc_vect vectors[] __initdata = {
175 INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
176 INTC_VECT(RTC_CUI, 0x4c0),
177 INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
178 INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
179 INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
180 INTC_VECT(LCDC, 0x620),
181 INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
182 INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
183 INTC_VECT(DMAC0_DMAE, 0x6c0),
184 INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
185 INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
186 INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
187 INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
188 INTC_VECT(CMT, 0x900), INTC_VECT(GEINT0, 0x920),
189 INTC_VECT(GEINT1, 0x940), INTC_VECT(GEINT2, 0x960),
190 INTC_VECT(HAC, 0x980),
191 INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
192 INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
193 INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
194 INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
195 INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
196 INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
197 INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
198 INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
199 INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
200 INTC_VECT(USBH, 0xc60), INTC_VECT(USBFI0, 0xc80),
201 INTC_VECT(USBFI1, 0xca0),
202 INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
203 INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
204 INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
205 INTC_VECT(SIM_ERI, 0xd80), INTC_VECT(SIM_RXI, 0xda0),
206 INTC_VECT(SIM_TXI, 0xdc0), INTC_VECT(SIM_TEND, 0xde0),
207 INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
208 INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
209 INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
210 INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
211 INTC_VECT(SCIF1_ERI, 0xf00), INTC_VECT(SCIF1_RXI, 0xf20),
212 INTC_VECT(SCIF1_BRI, 0xf40), INTC_VECT(SCIF1_TXI, 0xf60),
213 INTC_VECT(GPIO_CH0, 0xf80), INTC_VECT(GPIO_CH1, 0xfa0),
214 INTC_VECT(GPIO_CH2, 0xfc0), INTC_VECT(GPIO_CH3, 0xfe0),
215};
216
217static struct intc_group groups[] __initdata = {
218 INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
219 INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
220 INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
221 INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
222 DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
223 INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
224 INTC_GROUP(GETHER, GEINT0, GEINT1, GEINT2),
225 INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
226 INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
227 INTC_GROUP(USBF, USBFI0, USBFI1),
228 INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
229 INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND),
230 INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
231 INTC_GROUP(GPIO, GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3),
232};
233
234static struct intc_prio priorities[] __initdata = {
235 INTC_PRIO(SCIF0, 3),
236 INTC_PRIO(SCIF1, 3),
237 INTC_PRIO(SCIF2, 3),
238};
239
240static struct intc_mask_reg mask_registers[] __initdata = {
241 { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
242 { 0, 0, 0, 0, 0, 0, GPIO, 0,
243 SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB,
244 PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC,
245 HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
246 { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
247 { 0, 0, 0, 0, 0, 0, SCIF2, USBF,
248 0, 0, STIF1, STIF0, 0, 0, USBH, GETHER,
249 PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1,
250 LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } },
251};
252
253static struct intc_prio_reg prio_registers[] __initdata = {
254 { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
255 TMU2, TMU2_TICPI } },
256 { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
257 { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
258 { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } },
259 { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
260 PCISERR, PCIINTA } },
261 { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
262 PCIINTD, PCIC5 } },
263 { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } },
264 { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } },
265 { 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } },
266 { 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } },
267 { 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } },
268 { 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } },
269 { 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } },
270 { 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
271};
272
273static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, priorities,
274 mask_registers, prio_registers, NULL);
275
276/* Support for external interrupt pins in IRQ mode */
277
278static struct intc_vect irq_vectors[] __initdata = {
279 INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
280 INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
281 INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
282 INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
283};
284
285static struct intc_mask_reg irq_mask_registers[] __initdata = {
286 { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
287 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
288};
289
290static struct intc_prio_reg irq_prio_registers[] __initdata = {
291 { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
292 IRQ4, IRQ5, IRQ6, IRQ7 } },
293};
294
295static struct intc_sense_reg irq_sense_registers[] __initdata = {
296 { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
297 IRQ4, IRQ5, IRQ6, IRQ7 } },
298};
299
300static DECLARE_INTC_DESC(intc_irq_desc, "sh7763-irq", irq_vectors,
301 NULL, NULL, irq_mask_registers, irq_prio_registers,
302 irq_sense_registers);
303
304/* External interrupt pins in IRL mode */
305
306static struct intc_vect irl_vectors[] __initdata = {
307 INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
308 INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
309 INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
310 INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
311 INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
312 INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
313 INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
314 INTC_VECT(IRL_HHHL, 0x3c0),
315};
316
317static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
318 { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
319 { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
320 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
321 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
322 IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
323};
324
325static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
326 { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
327 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
328 IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
329 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
330 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
331 IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
332};
333
334static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
335 NULL, NULL, irl7654_mask_registers, NULL, NULL);
336
337static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
338 NULL, NULL, irl3210_mask_registers, NULL, NULL);
339
340#define INTC_ICR0 0xffd00000
341#define INTC_INTMSK0 0xffd00044
342#define INTC_INTMSK1 0xffd00048
343#define INTC_INTMSK2 0xffd40080
344#define INTC_INTMSKCLR1 0xffd00068
345#define INTC_INTMSKCLR2 0xffd40084
346
347void __init plat_irq_setup(void)
348{
349 /* disable IRQ7-0 */
350 ctrl_outl(0xff000000, INTC_INTMSK0);
351
352 /* disable IRL3-0 + IRL7-4 */
353 ctrl_outl(0xc0000000, INTC_INTMSK1);
354 ctrl_outl(0xfffefffe, INTC_INTMSK2);
355
356 register_intc_controller(&intc_desc);
357}
358
359void __init plat_irq_setup_pins(int mode)
360{
361 switch (mode) {
362 case IRQ_MODE_IRQ:
363 /* select IRQ mode for IRL3-0 + IRL7-4 */
364 ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
365 register_intc_controller(&intc_irq_desc);
366 break;
367 case IRQ_MODE_IRL7654:
368 /* enable IRL7-4 but don't provide any masking */
369 ctrl_outl(0x40000000, INTC_INTMSKCLR1);
370 ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
371 break;
372 case IRQ_MODE_IRL3210:
373 /* enable IRL0-3 but don't provide any masking */
374 ctrl_outl(0x80000000, INTC_INTMSKCLR1);
375 ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
376 break;
377 case IRQ_MODE_IRL7654_MASK:
378 /* enable IRL7-4 and mask using cpu intc controller */
379 ctrl_outl(0x40000000, INTC_INTMSKCLR1);
380 register_intc_controller(&intc_irl7654_desc);
381 break;
382 case IRQ_MODE_IRL3210_MASK:
383 /* enable IRL0-3 and mask using cpu intc controller */
384 ctrl_outl(0x80000000, INTC_INTMSKCLR1);
385 register_intc_controller(&intc_irl3210_desc);
386 break;
387 default:
388 BUG();
389 }
390}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index e8fd33ff0605..293004b526ff 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -168,11 +168,6 @@ static struct intc_group groups[] __initdata = {
168 INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3), 168 INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
169}; 169};
170 170
171static struct intc_prio priorities[] __initdata = {
172 INTC_PRIO(SCIF0, 3),
173 INTC_PRIO(SCIF1, 3),
174};
175
176static struct intc_mask_reg mask_registers[] __initdata = { 171static struct intc_mask_reg mask_registers[] __initdata = {
177 { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */ 172 { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
178 { 0, 0, 0, 0, 0, 0, GPIO, FLCTL, 173 { 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
@@ -195,7 +190,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
195 { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } }, 190 { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
196}; 191};
197 192
198static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups, priorities, 193static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
199 mask_registers, prio_registers, NULL); 194 mask_registers, prio_registers, NULL);
200 195
201/* Support for external interrupt pins in IRQ mode */ 196/* Support for external interrupt pins in IRQ mode */
@@ -223,7 +218,7 @@ static struct intc_sense_reg irq_sense_registers[] __initdata = {
223}; 218};
224 219
225static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors, 220static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors,
226 NULL, NULL, irq_mask_registers, irq_prio_registers, 221 NULL, irq_mask_registers, irq_prio_registers,
227 irq_sense_registers); 222 irq_sense_registers);
228 223
229/* External interrupt pins in IRL mode */ 224/* External interrupt pins in IRL mode */
@@ -257,10 +252,10 @@ static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
257}; 252};
258 253
259static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors, 254static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
260 NULL, NULL, irl7654_mask_registers, NULL, NULL); 255 NULL, irl7654_mask_registers, NULL, NULL);
261 256
262static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors, 257static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
263 NULL, NULL, irl3210_mask_registers, NULL, NULL); 258 NULL, irl3210_mask_registers, NULL, NULL);
264 259
265#define INTC_ICR0 0xffd00000 260#define INTC_ICR0 0xffd00000
266#define INTC_INTMSK0 0xffd00044 261#define INTC_INTMSK0 0xffd00044
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 39b215d6cee5..74b60e96cdf4 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -178,15 +178,6 @@ static struct intc_group groups[] __initdata = {
178 INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3), 178 INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
179}; 179};
180 180
181static struct intc_prio priorities[] __initdata = {
182 INTC_PRIO(SCIF0, 3),
183 INTC_PRIO(SCIF1, 3),
184 INTC_PRIO(SCIF2, 3),
185 INTC_PRIO(SCIF3, 3),
186 INTC_PRIO(SCIF4, 3),
187 INTC_PRIO(SCIF5, 3),
188};
189
190static struct intc_mask_reg mask_registers[] __initdata = { 181static struct intc_mask_reg mask_registers[] __initdata = {
191 { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ 182 { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
192 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 183 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
@@ -227,7 +218,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
227 { 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } }, 218 { 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } },
228}; 219};
229 220
230static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups, priorities, 221static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups,
231 mask_registers, prio_registers, NULL); 222 mask_registers, prio_registers, NULL);
232 223
233/* Support for external interrupt pins in IRQ mode */ 224/* Support for external interrupt pins in IRQ mode */
@@ -248,11 +239,11 @@ static struct intc_sense_reg sense_registers[] __initdata = {
248}; 239};
249 240
250static DECLARE_INTC_DESC(intc_desc_irq0123, "sh7785-irq0123", vectors_irq0123, 241static DECLARE_INTC_DESC(intc_desc_irq0123, "sh7785-irq0123", vectors_irq0123,
251 NULL, NULL, mask_registers, prio_registers, 242 NULL, mask_registers, prio_registers,
252 sense_registers); 243 sense_registers);
253 244
254static DECLARE_INTC_DESC(intc_desc_irq4567, "sh7785-irq4567", vectors_irq4567, 245static DECLARE_INTC_DESC(intc_desc_irq4567, "sh7785-irq4567", vectors_irq4567,
255 NULL, NULL, mask_registers, prio_registers, 246 NULL, mask_registers, prio_registers,
256 sense_registers); 247 sense_registers);
257 248
258/* External interrupt pins in IRL mode */ 249/* External interrupt pins in IRL mode */
@@ -280,10 +271,10 @@ static struct intc_vect vectors_irl4567[] __initdata = {
280}; 271};
281 272
282static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123, 273static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123,
283 NULL, NULL, mask_registers, NULL, NULL); 274 NULL, mask_registers, NULL, NULL);
284 275
285static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567, 276static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
286 NULL, NULL, mask_registers, NULL, NULL); 277 NULL, mask_registers, NULL, NULL);
287 278
288#define INTC_ICR0 0xffd00000 279#define INTC_ICR0 0xffd00000
289#define INTC_INTMSK0 0xffd00044 280#define INTC_INTMSK0 0xffd00044
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index c6cdd7e3b049..4dc958b6b314 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -165,13 +165,6 @@ static struct intc_group groups[] __initdata = {
165 INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS), 165 INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
166}; 166};
167 167
168static struct intc_prio priorities[] __initdata = {
169 INTC_PRIO(SCIF0, 3),
170 INTC_PRIO(SCIF1, 3),
171 INTC_PRIO(SCIF2, 3),
172 INTC_PRIO(SCIF3, 3),
173};
174
175static struct intc_mask_reg mask_registers[] __initdata = { 168static struct intc_mask_reg mask_registers[] __initdata = {
176 { 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */ 169 { 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
177 { IRQ0, IRQ1, IRQ2, IRQ3 } }, 170 { IRQ0, IRQ1, IRQ2, IRQ3 } },
@@ -218,7 +211,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
218 INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) }, 211 INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) },
219}; 212};
220 213
221static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups, priorities, 214static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups,
222 mask_registers, prio_registers, NULL); 215 mask_registers, prio_registers, NULL);
223 216
224/* Support for external interrupt pins in IRQ mode */ 217/* Support for external interrupt pins in IRQ mode */
@@ -232,8 +225,7 @@ static struct intc_sense_reg sense_registers[] __initdata = {
232}; 225};
233 226
234static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups, 227static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
235 priorities, mask_registers, prio_registers, 228 mask_registers, prio_registers, sense_registers);
236 sense_registers);
237 229
238/* External interrupt pins in IRL mode */ 230/* External interrupt pins in IRL mode */
239static struct intc_vect vectors_irl[] __initdata = { 231static struct intc_vect vectors_irl[] __initdata = {
@@ -248,7 +240,7 @@ static struct intc_vect vectors_irl[] __initdata = {
248}; 240};
249 241
250static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, 242static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
251 priorities, mask_registers, prio_registers, NULL); 243 mask_registers, prio_registers, NULL);
252 244
253void __init plat_irq_setup_pins(int mode) 245void __init plat_irq_setup_pins(int mode)
254{ 246{
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
new file mode 100644
index 000000000000..8646363e9ded
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Linux/SuperH SH-5 backends.
3#
4obj-y := entry.o probe.o switchto.o
5
6obj-$(CONFIG_SH_FPU) += fpu.o
7obj-$(CONFIG_KALLSYMS) += unwind.o
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
new file mode 100644
index 000000000000..ba8750176d91
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -0,0 +1,2101 @@
1/*
2 * arch/sh/kernel/cpu/sh5/entry.S
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/errno.h>
13#include <linux/sys.h>
14#include <asm/cpu/registers.h>
15#include <asm/processor.h>
16#include <asm/unistd.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19
20/*
21 * SR fields.
22 */
23#define SR_ASID_MASK 0x00ff0000
24#define SR_FD_MASK 0x00008000
25#define SR_SS 0x08000000
26#define SR_BL 0x10000000
27#define SR_MD 0x40000000
28
29/*
30 * Event code.
31 */
32#define EVENT_INTERRUPT 0
33#define EVENT_FAULT_TLB 1
34#define EVENT_FAULT_NOT_TLB 2
35#define EVENT_DEBUG 3
36
37/* EXPEVT values */
38#define RESET_CAUSE 0x20
39#define DEBUGSS_CAUSE 0x980
40
41/*
42 * Frame layout. Quad index.
43 */
44#define FRAME_T(x) FRAME_TBASE+(x*8)
45#define FRAME_R(x) FRAME_RBASE+(x*8)
46#define FRAME_S(x) FRAME_SBASE+(x*8)
47#define FSPC 0
48#define FSSR 1
49#define FSYSCALL_ID 2
50
51/* Arrange the save frame to be a multiple of 32 bytes long */
52#define FRAME_SBASE 0
53#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
54#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
55#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
56#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
57
58#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
59#define FP_FRAME_BASE 0
60
61#define SAVED_R2 0*8
62#define SAVED_R3 1*8
63#define SAVED_R4 2*8
64#define SAVED_R5 3*8
65#define SAVED_R18 4*8
66#define SAVED_R6 5*8
67#define SAVED_TR0 6*8
68
69/* These are the registers saved in the TLB path that aren't saved in the first
70 level of the normal one. */
71#define TLB_SAVED_R25 7*8
72#define TLB_SAVED_TR1 8*8
73#define TLB_SAVED_TR2 9*8
74#define TLB_SAVED_TR3 10*8
75#define TLB_SAVED_TR4 11*8
76/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
77 breakage otherwise. */
78#define TLB_SAVED_R0 12*8
79#define TLB_SAVED_R1 13*8
80
81#define CLI() \
82 getcon SR, r6; \
83 ori r6, 0xf0, r6; \
84 putcon r6, SR;
85
86#define STI() \
87 getcon SR, r6; \
88 andi r6, ~0xf0, r6; \
89 putcon r6, SR;
90
91#ifdef CONFIG_PREEMPT
92# define preempt_stop() CLI()
93#else
94# define preempt_stop()
95# define resume_kernel restore_all
96#endif
97
98 .section .data, "aw"
99
100#define FAST_TLBMISS_STACK_CACHELINES 4
101#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
102
103/* Register back-up area for all exceptions */
104 .balign 32
105 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
106 * register saves etc. */
107 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
108/* This is 32 byte aligned by construction */
109/* Register back-up area for all exceptions */
110reg_save_area:
111 .quad 0
112 .quad 0
113 .quad 0
114 .quad 0
115
116 .quad 0
117 .quad 0
118 .quad 0
119 .quad 0
120
121 .quad 0
122 .quad 0
123 .quad 0
124 .quad 0
125
126 .quad 0
127 .quad 0
128
129/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
130 * reentrancy. Note this area may be accessed via physical address.
131 * Align so this fits a whole single cache line, for ease of purging.
132 */
133 .balign 32,0,32
134resvec_save_area:
135 .quad 0
136 .quad 0
137 .quad 0
138 .quad 0
139 .quad 0
140 .balign 32,0,32
141
142/* Jump table of 3rd level handlers */
143trap_jtable:
144 .long do_exception_error /* 0x000 */
145 .long do_exception_error /* 0x020 */
146 .long tlb_miss_load /* 0x040 */
147 .long tlb_miss_store /* 0x060 */
148 ! ARTIFICIAL pseudo-EXPEVT setting
149 .long do_debug_interrupt /* 0x080 */
150 .long tlb_miss_load /* 0x0A0 */
151 .long tlb_miss_store /* 0x0C0 */
152 .long do_address_error_load /* 0x0E0 */
153 .long do_address_error_store /* 0x100 */
154#ifdef CONFIG_SH_FPU
155 .long do_fpu_error /* 0x120 */
156#else
157 .long do_exception_error /* 0x120 */
158#endif
159 .long do_exception_error /* 0x140 */
160 .long system_call /* 0x160 */
161 .long do_reserved_inst /* 0x180 */
162 .long do_illegal_slot_inst /* 0x1A0 */
163 .long do_exception_error /* 0x1C0 - NMI */
164 .long do_exception_error /* 0x1E0 */
165 .rept 15
166 .long do_IRQ /* 0x200 - 0x3C0 */
167 .endr
168 .long do_exception_error /* 0x3E0 */
169 .rept 32
170 .long do_IRQ /* 0x400 - 0x7E0 */
171 .endr
172 .long fpu_error_or_IRQA /* 0x800 */
173 .long fpu_error_or_IRQB /* 0x820 */
174 .long do_IRQ /* 0x840 */
175 .long do_IRQ /* 0x860 */
176 .rept 6
177 .long do_exception_error /* 0x880 - 0x920 */
178 .endr
179 .long do_software_break_point /* 0x940 */
180 .long do_exception_error /* 0x960 */
181 .long do_single_step /* 0x980 */
182
183 .rept 3
184 .long do_exception_error /* 0x9A0 - 0x9E0 */
185 .endr
186 .long do_IRQ /* 0xA00 */
187 .long do_IRQ /* 0xA20 */
188 .long itlb_miss_or_IRQ /* 0xA40 */
189 .long do_IRQ /* 0xA60 */
190 .long do_IRQ /* 0xA80 */
191 .long itlb_miss_or_IRQ /* 0xAA0 */
192 .long do_exception_error /* 0xAC0 */
193 .long do_address_error_exec /* 0xAE0 */
194 .rept 8
195 .long do_exception_error /* 0xB00 - 0xBE0 */
196 .endr
197 .rept 18
198 .long do_IRQ /* 0xC00 - 0xE20 */
199 .endr
200
201 .section .text64, "ax"
202
203/*
204 * --- Exception/Interrupt/Event Handling Section
205 */
206
207/*
208 * VBR and RESVEC blocks.
209 *
210 * First level handler for VBR-based exceptions.
211 *
212 * To avoid waste of space, align to the maximum text block size.
213 * This is assumed to be at most 128 bytes or 32 instructions.
214 * DO NOT EXCEED 32 instructions on the first level handlers !
215 *
216 * Also note that RESVEC is contained within the VBR block
217 * where the room left (1KB - TEXT_SIZE) allows placing
218 * the RESVEC block (at most 512B + TEXT_SIZE).
219 *
220 * So first (and only) level handler for RESVEC-based exceptions.
221 *
222 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
223 * and interrupt) we are a lot tight with register space until
224 * saving onto the stack frame, which is done in handle_exception().
225 *
226 */
227
228#define TEXT_SIZE 128
229#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
230
231 .balign TEXT_SIZE
232LVBR_block:
233 .space 256, 0 /* Power-on class handler, */
234 /* not required here */
235not_a_tlb_miss:
236 synco /* TAKum03020 (but probably a good idea anyway.) */
237 /* Save original stack pointer into KCR1 */
238 putcon SP, KCR1
239
240 /* Save other original registers into reg_save_area */
241 movi reg_save_area, SP
242 st.q SP, SAVED_R2, r2
243 st.q SP, SAVED_R3, r3
244 st.q SP, SAVED_R4, r4
245 st.q SP, SAVED_R5, r5
246 st.q SP, SAVED_R6, r6
247 st.q SP, SAVED_R18, r18
248 gettr tr0, r3
249 st.q SP, SAVED_TR0, r3
250
251 /* Set args for Non-debug, Not a TLB miss class handler */
252 getcon EXPEVT, r2
253 movi ret_from_exception, r3
254 ori r3, 1, r3
255 movi EVENT_FAULT_NOT_TLB, r4
256 or SP, ZERO, r5
257 getcon KCR1, SP
258 pta handle_exception, tr0
259 blink tr0, ZERO
260
261 .balign 256
262 ! VBR+0x200
263 nop
264 .balign 256
265 ! VBR+0x300
266 nop
267 .balign 256
268 /*
269 * Instead of the natural .balign 1024 place RESVEC here
270 * respecting the final 1KB alignment.
271 */
272 .balign TEXT_SIZE
273 /*
274 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
275 * block making sure the final alignment is correct.
276 */
277tlb_miss:
278 synco /* TAKum03020 (but probably a good idea anyway.) */
279 putcon SP, KCR1
280 movi reg_save_area, SP
281 /* SP is guaranteed 32-byte aligned. */
282 st.q SP, TLB_SAVED_R0 , r0
283 st.q SP, TLB_SAVED_R1 , r1
284 st.q SP, SAVED_R2 , r2
285 st.q SP, SAVED_R3 , r3
286 st.q SP, SAVED_R4 , r4
287 st.q SP, SAVED_R5 , r5
288 st.q SP, SAVED_R6 , r6
289 st.q SP, SAVED_R18, r18
290
291 /* Save R25 for safety; as/ld may want to use it to achieve the call to
292 * the code in mm/tlbmiss.c */
293 st.q SP, TLB_SAVED_R25, r25
294 gettr tr0, r2
295 gettr tr1, r3
296 gettr tr2, r4
297 gettr tr3, r5
298 gettr tr4, r18
299 st.q SP, SAVED_TR0 , r2
300 st.q SP, TLB_SAVED_TR1 , r3
301 st.q SP, TLB_SAVED_TR2 , r4
302 st.q SP, TLB_SAVED_TR3 , r5
303 st.q SP, TLB_SAVED_TR4 , r18
304
305 pt do_fast_page_fault, tr0
306 getcon SSR, r2
307 getcon EXPEVT, r3
308 getcon TEA, r4
309 shlri r2, 30, r2
310 andi r2, 1, r2 /* r2 = SSR.MD */
311 blink tr0, LINK
312
313 pt fixup_to_invoke_general_handler, tr1
314
315 /* If the fast path handler fixed the fault, just drop through quickly
316 to the restore code right away to return to the excepting context.
317 */
318 beqi/u r2, 0, tr1
319
320fast_tlb_miss_restore:
321 ld.q SP, SAVED_TR0, r2
322 ld.q SP, TLB_SAVED_TR1, r3
323 ld.q SP, TLB_SAVED_TR2, r4
324
325 ld.q SP, TLB_SAVED_TR3, r5
326 ld.q SP, TLB_SAVED_TR4, r18
327
328 ptabs r2, tr0
329 ptabs r3, tr1
330 ptabs r4, tr2
331 ptabs r5, tr3
332 ptabs r18, tr4
333
334 ld.q SP, TLB_SAVED_R0, r0
335 ld.q SP, TLB_SAVED_R1, r1
336 ld.q SP, SAVED_R2, r2
337 ld.q SP, SAVED_R3, r3
338 ld.q SP, SAVED_R4, r4
339 ld.q SP, SAVED_R5, r5
340 ld.q SP, SAVED_R6, r6
341 ld.q SP, SAVED_R18, r18
342 ld.q SP, TLB_SAVED_R25, r25
343
344 getcon KCR1, SP
345 rte
346 nop /* for safety, in case the code is run on sh5-101 cut1.x */
347
348fixup_to_invoke_general_handler:
349
350 /* OK, new method. Restore stuff that's not expected to get saved into
351 the 'first-level' reg save area, then just fall through to setting
352 up the registers and calling the second-level handler. */
353
354 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
355 r25,tr1-4 and save r6 to get into the right state. */
356
357 ld.q SP, TLB_SAVED_TR1, r3
358 ld.q SP, TLB_SAVED_TR2, r4
359 ld.q SP, TLB_SAVED_TR3, r5
360 ld.q SP, TLB_SAVED_TR4, r18
361 ld.q SP, TLB_SAVED_R25, r25
362
363 ld.q SP, TLB_SAVED_R0, r0
364 ld.q SP, TLB_SAVED_R1, r1
365
366 ptabs/u r3, tr1
367 ptabs/u r4, tr2
368 ptabs/u r5, tr3
369 ptabs/u r18, tr4
370
371 /* Set args for Non-debug, TLB miss class handler */
372 getcon EXPEVT, r2
373 movi ret_from_exception, r3
374 ori r3, 1, r3
375 movi EVENT_FAULT_TLB, r4
376 or SP, ZERO, r5
377 getcon KCR1, SP
378 pta handle_exception, tr0
379 blink tr0, ZERO
380
381/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
382 DOES END UP AT VBR+0x600 */
383 nop
384 nop
385 nop
386 nop
387 nop
388 nop
389
390 .balign 256
391 /* VBR + 0x600 */
392
393interrupt:
394 synco /* TAKum03020 (but probably a good idea anyway.) */
395 /* Save original stack pointer into KCR1 */
396 putcon SP, KCR1
397
398 /* Save other original registers into reg_save_area */
399 movi reg_save_area, SP
400 st.q SP, SAVED_R2, r2
401 st.q SP, SAVED_R3, r3
402 st.q SP, SAVED_R4, r4
403 st.q SP, SAVED_R5, r5
404 st.q SP, SAVED_R6, r6
405 st.q SP, SAVED_R18, r18
406 gettr tr0, r3
407 st.q SP, SAVED_TR0, r3
408
409 /* Set args for interrupt class handler */
410 getcon INTEVT, r2
411 movi ret_from_irq, r3
412 ori r3, 1, r3
413 movi EVENT_INTERRUPT, r4
414 or SP, ZERO, r5
415 getcon KCR1, SP
416 pta handle_exception, tr0
417 blink tr0, ZERO
418 .balign TEXT_SIZE /* let's waste the bare minimum */
419
420LVBR_block_end: /* Marker. Used for total checking */
421
422 .balign 256
423LRESVEC_block:
424 /* Panic handler. Called with MMU off. Possible causes/actions:
425 * - Reset: Jump to program start.
426 * - Single Step: Turn off Single Step & return.
427 * - Others: Call panic handler, passing PC as arg.
428 * (this may need to be extended...)
429 */
430reset_or_panic:
431 synco /* TAKum03020 (but probably a good idea anyway.) */
432 putcon SP, DCR
433 /* First save r0-1 and tr0, as we need to use these */
434 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
435 st.q SP, 0, r0
436 st.q SP, 8, r1
437 gettr tr0, r0
438 st.q SP, 32, r0
439
440 /* Check cause */
441 getcon EXPEVT, r0
442 movi RESET_CAUSE, r1
443 sub r1, r0, r1 /* r1=0 if reset */
444 movi _stext-CONFIG_PAGE_OFFSET, r0
445 ori r0, 1, r0
446 ptabs r0, tr0
447 beqi r1, 0, tr0 /* Jump to start address if reset */
448
449 getcon EXPEVT, r0
450 movi DEBUGSS_CAUSE, r1
451 sub r1, r0, r1 /* r1=0 if single step */
452 pta single_step_panic, tr0
453 beqi r1, 0, tr0 /* jump if single step */
454
455 /* Now jump to where we save the registers. */
456 movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
457 ptabs r1, tr0
458 blink tr0, r63
459
460single_step_panic:
461 /* We are in a handler with Single Step set. We need to resume the
462 * handler, by turning on MMU & turning off Single Step. */
463 getcon SSR, r0
464 movi SR_MMU, r1
465 or r0, r1, r0
466 movi ~SR_SS, r1
467 and r0, r1, r0
468 putcon r0, SSR
469 /* Restore EXPEVT, as the rte won't do this */
470 getcon PEXPEVT, r0
471 putcon r0, EXPEVT
472 /* Restore regs */
473 ld.q SP, 32, r0
474 ptabs r0, tr0
475 ld.q SP, 0, r0
476 ld.q SP, 8, r1
477 getcon DCR, SP
478 synco
479 rte
480
481
482 .balign 256
483debug_exception:
484 synco /* TAKum03020 (but probably a good idea anyway.) */
485 /*
486 * Single step/software_break_point first level handler.
487 * Called with MMU off, so the first thing we do is enable it
488 * by doing an rte with appropriate SSR.
489 */
490 putcon SP, DCR
491 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
492 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
493
494 /* With the MMU off, we are bypassing the cache, so purge any
495 * data that will be made stale by the following stores.
496 */
497 ocbp SP, 0
498 synco
499
500 st.q SP, 0, r0
501 st.q SP, 8, r1
502 getcon SPC, r0
503 st.q SP, 16, r0
504 getcon SSR, r0
505 st.q SP, 24, r0
506
507 /* Enable MMU, block exceptions, set priv mode, disable single step */
508 movi SR_MMU | SR_BL | SR_MD, r1
509 or r0, r1, r0
510 movi ~SR_SS, r1
511 and r0, r1, r0
512 putcon r0, SSR
513 /* Force control to debug_exception_2 when rte is executed */
514 movi debug_exeception_2, r0
515 ori r0, 1, r0 /* force SHmedia, just in case */
516 putcon r0, SPC
517 getcon DCR, SP
518 synco
519 rte
520debug_exeception_2:
521 /* Restore saved regs */
522 putcon SP, KCR1
523 movi resvec_save_area, SP
524 ld.q SP, 24, r0
525 putcon r0, SSR
526 ld.q SP, 16, r0
527 putcon r0, SPC
528 ld.q SP, 0, r0
529 ld.q SP, 8, r1
530
531 /* Save other original registers into reg_save_area */
532 movi reg_save_area, SP
533 st.q SP, SAVED_R2, r2
534 st.q SP, SAVED_R3, r3
535 st.q SP, SAVED_R4, r4
536 st.q SP, SAVED_R5, r5
537 st.q SP, SAVED_R6, r6
538 st.q SP, SAVED_R18, r18
539 gettr tr0, r3
540 st.q SP, SAVED_TR0, r3
541
542 /* Set args for debug class handler */
543 getcon EXPEVT, r2
544 movi ret_from_exception, r3
545 ori r3, 1, r3
546 movi EVENT_DEBUG, r4
547 or SP, ZERO, r5
548 getcon KCR1, SP
549 pta handle_exception, tr0
550 blink tr0, ZERO
551
552 .balign 256
553debug_interrupt:
554 /* !!! WE COME HERE IN REAL MODE !!! */
555 /* Hook-up debug interrupt to allow various debugging options to be
556 * hooked into its handler. */
557 /* Save original stack pointer into KCR1 */
558 synco
559 putcon SP, KCR1
560 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
561 ocbp SP, 0
562 ocbp SP, 32
563 synco
564
565 /* Save other original registers into reg_save_area thru real addresses */
566 st.q SP, SAVED_R2, r2
567 st.q SP, SAVED_R3, r3
568 st.q SP, SAVED_R4, r4
569 st.q SP, SAVED_R5, r5
570 st.q SP, SAVED_R6, r6
571 st.q SP, SAVED_R18, r18
572 gettr tr0, r3
573 st.q SP, SAVED_TR0, r3
574
575 /* move (spc,ssr)->(pspc,pssr). The rte will shift
576 them back again, so that they look like the originals
577 as far as the real handler code is concerned. */
578 getcon spc, r6
579 putcon r6, pspc
580 getcon ssr, r6
581 putcon r6, pssr
582
583 ! construct useful SR for handle_exception
584 movi 3, r6
585 shlli r6, 30, r6
586 getcon sr, r18
587 or r18, r6, r6
588 putcon r6, ssr
589
590 ! SSR is now the current SR with the MD and MMU bits set
591 ! i.e. the rte will switch back to priv mode and put
592 ! the mmu back on
593
594 ! construct spc
595 movi handle_exception, r18
596 ori r18, 1, r18 ! for safety (do we need this?)
597 putcon r18, spc
598
599 /* Set args for Non-debug, Not a TLB miss class handler */
600
601 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
602 ! debug interrupt handler in the vectoring table
603 movi 0x80, r2
604 movi ret_from_exception, r3
605 ori r3, 1, r3
606 movi EVENT_FAULT_NOT_TLB, r4
607
608 or SP, ZERO, r5
609 movi CONFIG_PAGE_OFFSET, r6
610 add r6, r5, r5
611 getcon KCR1, SP
612
613 synco ! for safety
614 rte ! -> handle_exception, switch back to priv mode again
615
616LRESVEC_block_end: /* Marker. Unused. */
617
618 .balign TEXT_SIZE
619
620/*
621 * Second level handler for VBR-based exceptions. Pre-handler.
622 * In common to all stack-frame sensitive handlers.
623 *
624 * Inputs:
625 * (KCR0) Current [current task union]
626 * (KCR1) Original SP
627 * (r2) INTEVT/EXPEVT
628 * (r3) appropriate return address
629 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
630 * (r5) Pointer to reg_save_area
631 * (SP) Original SP
632 *
633 * Available registers:
634 * (r6)
635 * (r18)
636 * (tr0)
637 *
638 */
639handle_exception:
640 /* Common 2nd level handler. */
641
642 /* First thing we need an appropriate stack pointer */
643 getcon SSR, r6
644 shlri r6, 30, r6
645 andi r6, 1, r6
646 pta stack_ok, tr0
647 bne r6, ZERO, tr0 /* Original stack pointer is fine */
648
649 /* Set stack pointer for user fault */
650 getcon KCR0, SP
651 movi THREAD_SIZE, r6 /* Point to the end */
652 add SP, r6, SP
653
654stack_ok:
655
656/* DEBUG : check for underflow/overflow of the kernel stack */
657 pta no_underflow, tr0
658 getcon KCR0, r6
659 movi 1024, r18
660 add r6, r18, r6
661 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
662
663/* Just panic to cause a crash. */
664bad_sp:
665 ld.b r63, 0, r6
666 nop
667
668no_underflow:
669 pta bad_sp, tr0
670 getcon kcr0, r6
671 movi THREAD_SIZE, r18
672 add r18, r6, r6
673 bgt SP, r6, tr0 ! sp above the stack
674
675 /* Make some room for the BASIC frame. */
676 movi -(FRAME_SIZE), r6
677 add SP, r6, SP
678
679/* Could do this with no stalling if we had another spare register, but the
680 code below will be OK. */
681 ld.q r5, SAVED_R2, r6
682 ld.q r5, SAVED_R3, r18
683 st.q SP, FRAME_R(2), r6
684 ld.q r5, SAVED_R4, r6
685 st.q SP, FRAME_R(3), r18
686 ld.q r5, SAVED_R5, r18
687 st.q SP, FRAME_R(4), r6
688 ld.q r5, SAVED_R6, r6
689 st.q SP, FRAME_R(5), r18
690 ld.q r5, SAVED_R18, r18
691 st.q SP, FRAME_R(6), r6
692 ld.q r5, SAVED_TR0, r6
693 st.q SP, FRAME_R(18), r18
694 st.q SP, FRAME_T(0), r6
695
696 /* Keep old SP around */
697 getcon KCR1, r6
698
699 /* Save the rest of the general purpose registers */
700 st.q SP, FRAME_R(0), r0
701 st.q SP, FRAME_R(1), r1
702 st.q SP, FRAME_R(7), r7
703 st.q SP, FRAME_R(8), r8
704 st.q SP, FRAME_R(9), r9
705 st.q SP, FRAME_R(10), r10
706 st.q SP, FRAME_R(11), r11
707 st.q SP, FRAME_R(12), r12
708 st.q SP, FRAME_R(13), r13
709 st.q SP, FRAME_R(14), r14
710
711 /* SP is somewhere else */
712 st.q SP, FRAME_R(15), r6
713
714 st.q SP, FRAME_R(16), r16
715 st.q SP, FRAME_R(17), r17
716 /* r18 is saved earlier. */
717 st.q SP, FRAME_R(19), r19
718 st.q SP, FRAME_R(20), r20
719 st.q SP, FRAME_R(21), r21
720 st.q SP, FRAME_R(22), r22
721 st.q SP, FRAME_R(23), r23
722 st.q SP, FRAME_R(24), r24
723 st.q SP, FRAME_R(25), r25
724 st.q SP, FRAME_R(26), r26
725 st.q SP, FRAME_R(27), r27
726 st.q SP, FRAME_R(28), r28
727 st.q SP, FRAME_R(29), r29
728 st.q SP, FRAME_R(30), r30
729 st.q SP, FRAME_R(31), r31
730 st.q SP, FRAME_R(32), r32
731 st.q SP, FRAME_R(33), r33
732 st.q SP, FRAME_R(34), r34
733 st.q SP, FRAME_R(35), r35
734 st.q SP, FRAME_R(36), r36
735 st.q SP, FRAME_R(37), r37
736 st.q SP, FRAME_R(38), r38
737 st.q SP, FRAME_R(39), r39
738 st.q SP, FRAME_R(40), r40
739 st.q SP, FRAME_R(41), r41
740 st.q SP, FRAME_R(42), r42
741 st.q SP, FRAME_R(43), r43
742 st.q SP, FRAME_R(44), r44
743 st.q SP, FRAME_R(45), r45
744 st.q SP, FRAME_R(46), r46
745 st.q SP, FRAME_R(47), r47
746 st.q SP, FRAME_R(48), r48
747 st.q SP, FRAME_R(49), r49
748 st.q SP, FRAME_R(50), r50
749 st.q SP, FRAME_R(51), r51
750 st.q SP, FRAME_R(52), r52
751 st.q SP, FRAME_R(53), r53
752 st.q SP, FRAME_R(54), r54
753 st.q SP, FRAME_R(55), r55
754 st.q SP, FRAME_R(56), r56
755 st.q SP, FRAME_R(57), r57
756 st.q SP, FRAME_R(58), r58
757 st.q SP, FRAME_R(59), r59
758 st.q SP, FRAME_R(60), r60
759 st.q SP, FRAME_R(61), r61
760 st.q SP, FRAME_R(62), r62
761
762 /*
763 * Save the S* registers.
764 */
765 getcon SSR, r61
766 st.q SP, FRAME_S(FSSR), r61
767 getcon SPC, r62
768 st.q SP, FRAME_S(FSPC), r62
769 movi -1, r62 /* Reset syscall_nr */
770 st.q SP, FRAME_S(FSYSCALL_ID), r62
771
772 /* Save the rest of the target registers */
773 gettr tr1, r6
774 st.q SP, FRAME_T(1), r6
775 gettr tr2, r6
776 st.q SP, FRAME_T(2), r6
777 gettr tr3, r6
778 st.q SP, FRAME_T(3), r6
779 gettr tr4, r6
780 st.q SP, FRAME_T(4), r6
781 gettr tr5, r6
782 st.q SP, FRAME_T(5), r6
783 gettr tr6, r6
784 st.q SP, FRAME_T(6), r6
785 gettr tr7, r6
786 st.q SP, FRAME_T(7), r6
787
788 ! setup FP so that unwinder can wind back through nested kernel mode
789 ! exceptions
790 add SP, ZERO, r14
791
792#ifdef CONFIG_POOR_MANS_STRACE
793 /* We've pushed all the registers now, so only r2-r4 hold anything
794 * useful. Move them into callee save registers */
795 or r2, ZERO, r28
796 or r3, ZERO, r29
797 or r4, ZERO, r30
798
799 /* Preserve r2 as the event code */
800 movi evt_debug, r3
801 ori r3, 1, r3
802 ptabs r3, tr0
803
804 or SP, ZERO, r6
805 getcon TRA, r5
806 blink tr0, LINK
807
808 or r28, ZERO, r2
809 or r29, ZERO, r3
810 or r30, ZERO, r4
811#endif
812
813 /* For syscall and debug race condition, get TRA now */
814 getcon TRA, r5
815
816 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
817 * Also set FD, to catch FPU usage in the kernel.
818 *
819 * benedict.gaster@superh.com 29/07/2002
820 *
821 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
822 * same time change BL from 1->0, as any pending interrupt of a level
823 * higher than he previous value of IMASK will leak through and be
824 * taken unexpectedly.
825 *
826 * To avoid this we raise the IMASK and then issue another PUTCON to
827 * enable interrupts.
828 */
829 getcon SR, r6
830 movi SR_IMASK | SR_FD, r7
831 or r6, r7, r6
832 putcon r6, SR
833 movi SR_UNBLOCK_EXC, r7
834 and r6, r7, r6
835 putcon r6, SR
836
837
838 /* Now call the appropriate 3rd level handler */
839 or r3, ZERO, LINK
840 movi trap_jtable, r3
841 shlri r2, 3, r2
842 ldx.l r2, r3, r3
843 shlri r2, 2, r2
844 ptabs r3, tr0
845 or SP, ZERO, r3
846 blink tr0, ZERO
847
848/*
849 * Second level handler for VBR-based exceptions. Post-handlers.
850 *
851 * Post-handlers for interrupts (ret_from_irq), exceptions
852 * (ret_from_exception) and common reentrance doors (restore_all
853 * to get back to the original context, ret_from_syscall loop to
854 * check kernel exiting).
855 *
856 * ret_with_reschedule and work_notifysig are an inner lables of
857 * the ret_from_syscall loop.
858 *
859 * In common to all stack-frame sensitive handlers.
860 *
861 * Inputs:
862 * (SP) struct pt_regs *, original register's frame pointer (basic)
863 *
864 */
865 .global ret_from_irq
866ret_from_irq:
867#ifdef CONFIG_POOR_MANS_STRACE
868 pta evt_debug_ret_from_irq, tr0
869 ori SP, 0, r2
870 blink tr0, LINK
871#endif
872 ld.q SP, FRAME_S(FSSR), r6
873 shlri r6, 30, r6
874 andi r6, 1, r6
875 pta resume_kernel, tr0
876 bne r6, ZERO, tr0 /* no further checks */
877 STI()
878 pta ret_with_reschedule, tr0
879 blink tr0, ZERO /* Do not check softirqs */
880
881 .global ret_from_exception
882ret_from_exception:
883 preempt_stop()
884
885#ifdef CONFIG_POOR_MANS_STRACE
886 pta evt_debug_ret_from_exc, tr0
887 ori SP, 0, r2
888 blink tr0, LINK
889#endif
890
891 ld.q SP, FRAME_S(FSSR), r6
892 shlri r6, 30, r6
893 andi r6, 1, r6
894 pta resume_kernel, tr0
895 bne r6, ZERO, tr0 /* no further checks */
896
897 /* Check softirqs */
898
899#ifdef CONFIG_PREEMPT
900 pta ret_from_syscall, tr0
901 blink tr0, ZERO
902
903resume_kernel:
904 pta restore_all, tr0
905
906 getcon KCR0, r6
907 ld.l r6, TI_PRE_COUNT, r7
908 beq/u r7, ZERO, tr0
909
910need_resched:
911 ld.l r6, TI_FLAGS, r7
912 movi (1 << TIF_NEED_RESCHED), r8
913 and r8, r7, r8
914 bne r8, ZERO, tr0
915
916 getcon SR, r7
917 andi r7, 0xf0, r7
918 bne r7, ZERO, tr0
919
920 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
921 shori (PREEMPT_ACTIVE & 65535), r8
922 st.l r6, TI_PRE_COUNT, r8
923
924 STI()
925 movi schedule, r7
926 ori r7, 1, r7
927 ptabs r7, tr1
928 blink tr1, LINK
929
930 st.l r6, TI_PRE_COUNT, ZERO
931 CLI()
932
933 pta need_resched, tr1
934 blink tr1, ZERO
935#endif
936
937 .global ret_from_syscall
938ret_from_syscall:
939
940ret_with_reschedule:
941 getcon KCR0, r6 ! r6 contains current_thread_info
942 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
943
944 movi _TIF_NEED_RESCHED, r8
945 and r8, r7, r8
946 pta work_resched, tr0
947 bne r8, ZERO, tr0
948
949 pta restore_all, tr1
950
951 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
952 and r8, r7, r8
953 pta work_notifysig, tr0
954 bne r8, ZERO, tr0
955
956 blink tr1, ZERO
957
958work_resched:
959 pta ret_from_syscall, tr0
960 gettr tr0, LINK
961 movi schedule, r6
962 ptabs r6, tr0
963 blink tr0, ZERO /* Call schedule(), return on top */
964
965work_notifysig:
966 gettr tr1, LINK
967
968 movi do_signal, r6
969 ptabs r6, tr0
970 or SP, ZERO, r2
971 or ZERO, ZERO, r3
972 blink tr0, LINK /* Call do_signal(regs, 0), return here */
973
974restore_all:
975 /* Do prefetches */
976
977 ld.q SP, FRAME_T(0), r6
978 ld.q SP, FRAME_T(1), r7
979 ld.q SP, FRAME_T(2), r8
980 ld.q SP, FRAME_T(3), r9
981 ptabs r6, tr0
982 ptabs r7, tr1
983 ptabs r8, tr2
984 ptabs r9, tr3
985 ld.q SP, FRAME_T(4), r6
986 ld.q SP, FRAME_T(5), r7
987 ld.q SP, FRAME_T(6), r8
988 ld.q SP, FRAME_T(7), r9
989 ptabs r6, tr4
990 ptabs r7, tr5
991 ptabs r8, tr6
992 ptabs r9, tr7
993
994 ld.q SP, FRAME_R(0), r0
995 ld.q SP, FRAME_R(1), r1
996 ld.q SP, FRAME_R(2), r2
997 ld.q SP, FRAME_R(3), r3
998 ld.q SP, FRAME_R(4), r4
999 ld.q SP, FRAME_R(5), r5
1000 ld.q SP, FRAME_R(6), r6
1001 ld.q SP, FRAME_R(7), r7
1002 ld.q SP, FRAME_R(8), r8
1003 ld.q SP, FRAME_R(9), r9
1004 ld.q SP, FRAME_R(10), r10
1005 ld.q SP, FRAME_R(11), r11
1006 ld.q SP, FRAME_R(12), r12
1007 ld.q SP, FRAME_R(13), r13
1008 ld.q SP, FRAME_R(14), r14
1009
1010 ld.q SP, FRAME_R(16), r16
1011 ld.q SP, FRAME_R(17), r17
1012 ld.q SP, FRAME_R(18), r18
1013 ld.q SP, FRAME_R(19), r19
1014 ld.q SP, FRAME_R(20), r20
1015 ld.q SP, FRAME_R(21), r21
1016 ld.q SP, FRAME_R(22), r22
1017 ld.q SP, FRAME_R(23), r23
1018 ld.q SP, FRAME_R(24), r24
1019 ld.q SP, FRAME_R(25), r25
1020 ld.q SP, FRAME_R(26), r26
1021 ld.q SP, FRAME_R(27), r27
1022 ld.q SP, FRAME_R(28), r28
1023 ld.q SP, FRAME_R(29), r29
1024 ld.q SP, FRAME_R(30), r30
1025 ld.q SP, FRAME_R(31), r31
1026 ld.q SP, FRAME_R(32), r32
1027 ld.q SP, FRAME_R(33), r33
1028 ld.q SP, FRAME_R(34), r34
1029 ld.q SP, FRAME_R(35), r35
1030 ld.q SP, FRAME_R(36), r36
1031 ld.q SP, FRAME_R(37), r37
1032 ld.q SP, FRAME_R(38), r38
1033 ld.q SP, FRAME_R(39), r39
1034 ld.q SP, FRAME_R(40), r40
1035 ld.q SP, FRAME_R(41), r41
1036 ld.q SP, FRAME_R(42), r42
1037 ld.q SP, FRAME_R(43), r43
1038 ld.q SP, FRAME_R(44), r44
1039 ld.q SP, FRAME_R(45), r45
1040 ld.q SP, FRAME_R(46), r46
1041 ld.q SP, FRAME_R(47), r47
1042 ld.q SP, FRAME_R(48), r48
1043 ld.q SP, FRAME_R(49), r49
1044 ld.q SP, FRAME_R(50), r50
1045 ld.q SP, FRAME_R(51), r51
1046 ld.q SP, FRAME_R(52), r52
1047 ld.q SP, FRAME_R(53), r53
1048 ld.q SP, FRAME_R(54), r54
1049 ld.q SP, FRAME_R(55), r55
1050 ld.q SP, FRAME_R(56), r56
1051 ld.q SP, FRAME_R(57), r57
1052 ld.q SP, FRAME_R(58), r58
1053
1054 getcon SR, r59
1055 movi SR_BLOCK_EXC, r60
1056 or r59, r60, r59
1057 putcon r59, SR /* SR.BL = 1, keep nesting out */
1058 ld.q SP, FRAME_S(FSSR), r61
1059 ld.q SP, FRAME_S(FSPC), r62
1060 movi SR_ASID_MASK, r60
1061 and r59, r60, r59
1062 andc r61, r60, r61 /* Clear out older ASID */
1063 or r59, r61, r61 /* Retain current ASID */
1064 putcon r61, SSR
1065 putcon r62, SPC
1066
1067 /* Ignore FSYSCALL_ID */
1068
1069 ld.q SP, FRAME_R(59), r59
1070 ld.q SP, FRAME_R(60), r60
1071 ld.q SP, FRAME_R(61), r61
1072 ld.q SP, FRAME_R(62), r62
1073
1074 /* Last touch */
1075 ld.q SP, FRAME_R(15), SP
1076 rte
1077 nop
1078
1079/*
1080 * Third level handlers for VBR-based exceptions. Adapting args to
1081 * and/or deflecting to fourth level handlers.
1082 *
1083 * Fourth level handlers interface.
1084 * Most are C-coded handlers directly pointed by the trap_jtable.
1085 * (Third = Fourth level)
1086 * Inputs:
1087 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1088 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1089 * (r3) struct pt_regs *, original register's frame pointer
1090 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1091 * (r5) TRA control register (for syscall/debug benefit only)
1092 * (LINK) return address
1093 * (SP) = r3
1094 *
1095 * Kernel TLB fault handlers will get a slightly different interface.
1096 * (r2) struct pt_regs *, original register's frame pointer
1097 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1098 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1099 * (r5) Effective Address of fault
1100 * (LINK) return address
1101 * (SP) = r2
1102 *
1103 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1104 *
1105 */
1106tlb_miss_load:
1107 or SP, ZERO, r2
1108 or ZERO, ZERO, r3 /* Read */
1109 or ZERO, ZERO, r4 /* Data */
1110 getcon TEA, r5
1111 pta call_do_page_fault, tr0
1112 beq ZERO, ZERO, tr0
1113
1114tlb_miss_store:
1115 or SP, ZERO, r2
1116 movi 1, r3 /* Write */
1117 or ZERO, ZERO, r4 /* Data */
1118 getcon TEA, r5
1119 pta call_do_page_fault, tr0
1120 beq ZERO, ZERO, tr0
1121
1122itlb_miss_or_IRQ:
1123 pta its_IRQ, tr0
1124 beqi/u r4, EVENT_INTERRUPT, tr0
1125 or SP, ZERO, r2
1126 or ZERO, ZERO, r3 /* Read */
1127 movi 1, r4 /* Text */
1128 getcon TEA, r5
1129 /* Fall through */
1130
1131call_do_page_fault:
1132 movi do_page_fault, r6
1133 ptabs r6, tr0
1134 blink tr0, ZERO
1135
1136fpu_error_or_IRQA:
1137 pta its_IRQ, tr0
1138 beqi/l r4, EVENT_INTERRUPT, tr0
1139#ifdef CONFIG_SH_FPU
1140 movi do_fpu_state_restore, r6
1141#else
1142 movi do_exception_error, r6
1143#endif
1144 ptabs r6, tr0
1145 blink tr0, ZERO
1146
1147fpu_error_or_IRQB:
1148 pta its_IRQ, tr0
1149 beqi/l r4, EVENT_INTERRUPT, tr0
1150#ifdef CONFIG_SH_FPU
1151 movi do_fpu_state_restore, r6
1152#else
1153 movi do_exception_error, r6
1154#endif
1155 ptabs r6, tr0
1156 blink tr0, ZERO
1157
1158its_IRQ:
1159 movi do_IRQ, r6
1160 ptabs r6, tr0
1161 blink tr0, ZERO
1162
1163/*
1164 * system_call/unknown_trap third level handler:
1165 *
1166 * Inputs:
1167 * (r2) fault/interrupt code, entry number (TRAP = 11)
1168 * (r3) struct pt_regs *, original register's frame pointer
1169 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1170 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1171 * (SP) = r3
1172 * (LINK) return address: ret_from_exception
1173 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1174 *
1175 * Outputs:
1176 * (*r3) Syscall reply (Saved r2)
1177 * (LINK) In case of syscall only it can be scrapped.
1178 * Common second level post handler will be ret_from_syscall.
1179 * Common (non-trace) exit point to that is syscall_ret (saving
1180 * result to r2). Common bad exit point is syscall_bad (returning
1181 * ENOSYS then saved to r2).
1182 *
1183 */
1184
1185unknown_trap:
1186 /* Unknown Trap or User Trace */
1187 movi do_unknown_trapa, r6
1188 ptabs r6, tr0
1189 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1190 andi r2, 0x1ff, r2 /* r2 = syscall # */
1191 blink tr0, LINK
1192
1193 pta syscall_ret, tr0
1194 blink tr0, ZERO
1195
1196 /* New syscall implementation*/
1197system_call:
1198 pta unknown_trap, tr0
1199 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1200 shlri r4, 20, r4
1201 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1202
1203 /* It's a system call */
1204 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1205 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1206
1207 STI()
1208
1209 pta syscall_allowed, tr0
1210 movi NR_syscalls - 1, r4 /* Last valid */
1211 bgeu/l r4, r5, tr0
1212
1213syscall_bad:
1214 /* Return ENOSYS ! */
1215 movi -(ENOSYS), r2 /* Fall-through */
1216
1217 .global syscall_ret
1218syscall_ret:
1219 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1220
1221#ifdef CONFIG_POOR_MANS_STRACE
1222 /* nothing useful in registers at this point */
1223
1224 movi evt_debug2, r5
1225 ori r5, 1, r5
1226 ptabs r5, tr0
1227 ld.q SP, FRAME_R(9), r2
1228 or SP, ZERO, r3
1229 blink tr0, LINK
1230#endif
1231
1232 ld.q SP, FRAME_S(FSPC), r2
1233 addi r2, 4, r2 /* Move PC, being pre-execution event */
1234 st.q SP, FRAME_S(FSPC), r2
1235 pta ret_from_syscall, tr0
1236 blink tr0, ZERO
1237
1238
1239/* A different return path for ret_from_fork, because we now need
1240 * to call schedule_tail with the later kernels. Because prev is
1241 * loaded into r2 by switch_to() means we can just call it straight away
1242 */
1243
1244.global ret_from_fork
1245ret_from_fork:
1246
1247 movi schedule_tail,r5
1248 ori r5, 1, r5
1249 ptabs r5, tr0
1250 blink tr0, LINK
1251
1252#ifdef CONFIG_POOR_MANS_STRACE
1253 /* nothing useful in registers at this point */
1254
1255 movi evt_debug2, r5
1256 ori r5, 1, r5
1257 ptabs r5, tr0
1258 ld.q SP, FRAME_R(9), r2
1259 or SP, ZERO, r3
1260 blink tr0, LINK
1261#endif
1262
1263 ld.q SP, FRAME_S(FSPC), r2
1264 addi r2, 4, r2 /* Move PC, being pre-execution event */
1265 st.q SP, FRAME_S(FSPC), r2
1266 pta ret_from_syscall, tr0
1267 blink tr0, ZERO
1268
1269
1270
1271syscall_allowed:
1272 /* Use LINK to deflect the exit point, default is syscall_ret */
1273 pta syscall_ret, tr0
1274 gettr tr0, LINK
1275 pta syscall_notrace, tr0
1276
1277 getcon KCR0, r2
1278 ld.l r2, TI_FLAGS, r4
1279 movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
1280 and r6, r4, r6
1281 beq/l r6, ZERO, tr0
1282
1283 /* Trace it by calling syscall_trace before and after */
1284 movi syscall_trace, r4
1285 or SP, ZERO, r2
1286 or ZERO, ZERO, r3
1287 ptabs r4, tr0
1288 blink tr0, LINK
1289
1290 /* Reload syscall number as r5 is trashed by syscall_trace */
1291 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1292 andi r5, 0x1ff, r5
1293
1294 pta syscall_ret_trace, tr0
1295 gettr tr0, LINK
1296
1297syscall_notrace:
1298 /* Now point to the appropriate 4th level syscall handler */
1299 movi sys_call_table, r4
1300 shlli r5, 2, r5
1301 ldx.l r4, r5, r5
1302 ptabs r5, tr0
1303
1304 /* Prepare original args */
1305 ld.q SP, FRAME_R(2), r2
1306 ld.q SP, FRAME_R(3), r3
1307 ld.q SP, FRAME_R(4), r4
1308 ld.q SP, FRAME_R(5), r5
1309 ld.q SP, FRAME_R(6), r6
1310 ld.q SP, FRAME_R(7), r7
1311
1312 /* And now the trick for those syscalls requiring regs * ! */
1313 or SP, ZERO, r8
1314
1315 /* Call it */
1316 blink tr0, ZERO /* LINK is already properly set */
1317
1318syscall_ret_trace:
1319 /* We get back here only if under trace */
1320 st.q SP, FRAME_R(9), r2 /* Save return value */
1321
1322 movi syscall_trace, LINK
1323 or SP, ZERO, r2
1324 movi 1, r3
1325 ptabs LINK, tr0
1326 blink tr0, LINK
1327
1328 /* This needs to be done after any syscall tracing */
1329 ld.q SP, FRAME_S(FSPC), r2
1330 addi r2, 4, r2 /* Move PC, being pre-execution event */
1331 st.q SP, FRAME_S(FSPC), r2
1332
1333 pta ret_from_syscall, tr0
1334 blink tr0, ZERO /* Resume normal return sequence */
1335
1336/*
1337 * --- Switch to running under a particular ASID and return the previous ASID value
1338 * --- The caller is assumed to have done a cli before calling this.
1339 *
1340 * Input r2 : new ASID
1341 * Output r2 : old ASID
1342 */
1343
1344 .global switch_and_save_asid
1345switch_and_save_asid:
1346 getcon sr, r0
1347 movi 255, r4
1348 shlli r4, 16, r4 /* r4 = mask to select ASID */
1349 and r0, r4, r3 /* r3 = shifted old ASID */
1350 andi r2, 255, r2 /* mask down new ASID */
1351 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1352 andc r0, r4, r0 /* efface old ASID from SR */
1353 or r0, r2, r0 /* insert the new ASID */
1354 putcon r0, ssr
1355 movi 1f, r0
1356 putcon r0, spc
1357 rte
1358 nop
13591:
1360 ptabs LINK, tr0
1361 shlri r3, 16, r2 /* r2 = old ASID */
1362 blink tr0, r63
1363
1364 .global route_to_panic_handler
1365route_to_panic_handler:
1366 /* Switch to real mode, goto panic_handler, don't return. Useful for
1367 last-chance debugging, e.g. if no output wants to go to the console.
1368 */
1369
1370 movi panic_handler - CONFIG_PAGE_OFFSET, r1
1371 ptabs r1, tr0
1372 pta 1f, tr1
1373 gettr tr1, r0
1374 putcon r0, spc
1375 getcon sr, r0
1376 movi 1, r1
1377 shlli r1, 31, r1
1378 andc r0, r1, r0
1379 putcon r0, ssr
1380 rte
1381 nop
13821: /* Now in real mode */
1383 blink tr0, r63
1384 nop
1385
1386 .global peek_real_address_q
1387peek_real_address_q:
1388 /* Two args:
1389 r2 : real mode address to peek
1390 r2(out) : result quadword
1391
1392 This is provided as a cheapskate way of manipulating device
1393 registers for debugging (to avoid the need to onchip_remap the debug
1394 module, and to avoid the need to onchip_remap the watchpoint
1395 controller in a way that identity maps sufficient bits to avoid the
1396 SH5-101 cut2 silicon defect).
1397
1398 This code is not performance critical
1399 */
1400
1401 add.l r2, r63, r2 /* sign extend address */
1402 getcon sr, r0 /* r0 = saved original SR */
1403 movi 1, r1
1404 shlli r1, 28, r1
1405 or r0, r1, r1 /* r0 with block bit set */
1406 putcon r1, sr /* now in critical section */
1407 movi 1, r36
1408 shlli r36, 31, r36
1409 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1410
1411 putcon r1, ssr
1412 movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1413 movi 1f, r37 /* virtual mode return addr */
1414 putcon r36, spc
1415
1416 synco
1417 rte
1418 nop
1419
1420.peek0: /* come here in real mode, don't touch caches!!
1421 still in critical section (sr.bl==1) */
1422 putcon r0, ssr
1423 putcon r37, spc
1424 /* Here's the actual peek. If the address is bad, all bets are now off
1425 * what will happen (handlers invoked in real-mode = bad news) */
1426 ld.q r2, 0, r2
1427 synco
1428 rte /* Back to virtual mode */
1429 nop
1430
14311:
1432 ptabs LINK, tr0
1433 blink tr0, r63
1434
1435 .global poke_real_address_q
1436poke_real_address_q:
1437 /* Two args:
1438 r2 : real mode address to poke
1439 r3 : quadword value to write.
1440
1441 This is provided as a cheapskate way of manipulating device
1442 registers for debugging (to avoid the need to onchip_remap the debug
1443 module, and to avoid the need to onchip_remap the watchpoint
1444 controller in a way that identity maps sufficient bits to avoid the
1445 SH5-101 cut2 silicon defect).
1446
1447 This code is not performance critical
1448 */
1449
1450 add.l r2, r63, r2 /* sign extend address */
1451 getcon sr, r0 /* r0 = saved original SR */
1452 movi 1, r1
1453 shlli r1, 28, r1
1454 or r0, r1, r1 /* r0 with block bit set */
1455 putcon r1, sr /* now in critical section */
1456 movi 1, r36
1457 shlli r36, 31, r36
1458 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1459
1460 putcon r1, ssr
1461 movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1462 movi 1f, r37 /* virtual mode return addr */
1463 putcon r36, spc
1464
1465 synco
1466 rte
1467 nop
1468
1469.poke0: /* come here in real mode, don't touch caches!!
1470 still in critical section (sr.bl==1) */
1471 putcon r0, ssr
1472 putcon r37, spc
1473 /* Here's the actual poke. If the address is bad, all bets are now off
1474 * what will happen (handlers invoked in real-mode = bad news) */
1475 st.q r2, 0, r3
1476 synco
1477 rte /* Back to virtual mode */
1478 nop
1479
14801:
1481 ptabs LINK, tr0
1482 blink tr0, r63
1483
1484/*
1485 * --- User Access Handling Section
1486 */
1487
1488/*
1489 * User Access support. It all moved to non inlined Assembler
1490 * functions in here.
1491 *
1492 * __kernel_size_t __copy_user(void *__to, const void *__from,
1493 * __kernel_size_t __n)
1494 *
1495 * Inputs:
1496 * (r2) target address
1497 * (r3) source address
1498 * (r4) size in bytes
1499 *
1500 * Ouputs:
1501 * (*r2) target data
1502 * (r2) non-copied bytes
1503 *
1504 * If a fault occurs on the user pointer, bail out early and return the
1505 * number of bytes not copied in r2.
1506 * Strategy : for large blocks, call a real memcpy function which can
1507 * move >1 byte at a time using unaligned ld/st instructions, and can
1508 * manipulate the cache using prefetch + alloco to improve the speed
1509 * further. If a fault occurs in that function, just revert to the
1510 * byte-by-byte approach used for small blocks; this is rare so the
1511 * performance hit for that case does not matter.
1512 *
1513 * For small blocks it's not worth the overhead of setting up and calling
1514 * the memcpy routine; do the copy a byte at a time.
1515 *
1516 */
1517 .global __copy_user
1518__copy_user:
1519 pta __copy_user_byte_by_byte, tr1
1520 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1521 bge/u r0, r4, tr1
1522 pta copy_user_memcpy, tr0
1523 addi SP, -32, SP
1524 /* Save arguments in case we have to fix-up unhandled page fault */
1525 st.q SP, 0, r2
1526 st.q SP, 8, r3
1527 st.q SP, 16, r4
1528 st.q SP, 24, r35 ! r35 is callee-save
1529 /* Save LINK in a register to reduce RTS time later (otherwise
1530 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1531 ori LINK, 0, r35
1532 blink tr0, LINK
1533
1534 /* Copy completed normally if we get back here */
1535 ptabs r35, tr0
1536 ld.q SP, 24, r35
1537 /* don't restore r2-r4, pointless */
1538 /* set result=r2 to zero as the copy must have succeeded. */
1539 or r63, r63, r2
1540 addi SP, 32, SP
1541 blink tr0, r63 ! RTS
1542
1543 .global __copy_user_fixup
1544__copy_user_fixup:
1545 /* Restore stack frame */
1546 ori r35, 0, LINK
1547 ld.q SP, 24, r35
1548 ld.q SP, 16, r4
1549 ld.q SP, 8, r3
1550 ld.q SP, 0, r2
1551 addi SP, 32, SP
1552 /* Fall through to original code, in the 'same' state we entered with */
1553
1554/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1555 user address. In that rare case, the speed drop can be tolerated. */
1556__copy_user_byte_by_byte:
1557 pta ___copy_user_exit, tr1
1558 pta ___copy_user1, tr0
1559 beq/u r4, r63, tr1 /* early exit for zero length copy */
1560 sub r2, r3, r0
1561 addi r0, -1, r0
1562
1563___copy_user1:
1564 ld.b r3, 0, r5 /* Fault address 1 */
1565
1566 /* Could rewrite this to use just 1 add, but the second comes 'free'
1567 due to load latency */
1568 addi r3, 1, r3
1569 addi r4, -1, r4 /* No real fixup required */
1570___copy_user2:
1571 stx.b r3, r0, r5 /* Fault address 2 */
1572 bne r4, ZERO, tr0
1573
1574___copy_user_exit:
1575 or r4, ZERO, r2
1576 ptabs LINK, tr0
1577 blink tr0, ZERO
1578
1579/*
1580 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1581 *
1582 * Inputs:
1583 * (r2) target address
1584 * (r3) size in bytes
1585 *
1586 * Ouputs:
1587 * (*r2) zero-ed target data
1588 * (r2) non-zero-ed bytes
1589 */
1590 .global __clear_user
1591__clear_user:
1592 pta ___clear_user_exit, tr1
1593 pta ___clear_user1, tr0
1594 beq/u r3, r63, tr1
1595
1596___clear_user1:
1597 st.b r2, 0, ZERO /* Fault address */
1598 addi r2, 1, r2
1599 addi r3, -1, r3 /* No real fixup required */
1600 bne r3, ZERO, tr0
1601
1602___clear_user_exit:
1603 or r3, ZERO, r2
1604 ptabs LINK, tr0
1605 blink tr0, ZERO
1606
1607
1608/*
1609 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1610 * int __count)
1611 *
1612 * Inputs:
1613 * (r2) target address
1614 * (r3) source address
1615 * (r4) maximum size in bytes
1616 *
1617 * Ouputs:
1618 * (*r2) copied data
1619 * (r2) -EFAULT (in case of faulting)
1620 * copied data (otherwise)
1621 */
1622 .global __strncpy_from_user
1623__strncpy_from_user:
1624 pta ___strncpy_from_user1, tr0
1625 pta ___strncpy_from_user_done, tr1
1626 or r4, ZERO, r5 /* r5 = original count */
1627 beq/u r4, r63, tr1 /* early exit if r4==0 */
1628 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1629 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1630
1631___strncpy_from_user1:
1632 ld.b r3, 0, r7 /* Fault address: only in reading */
1633 st.b r2, 0, r7
1634 addi r2, 1, r2
1635 addi r3, 1, r3
1636 beq/u ZERO, r7, tr1
1637 addi r4, -1, r4 /* return real number of copied bytes */
1638 bne/l ZERO, r4, tr0
1639
1640___strncpy_from_user_done:
1641 sub r5, r4, r6 /* If done, return copied */
1642
1643___strncpy_from_user_exit:
1644 or r6, ZERO, r2
1645 ptabs LINK, tr0
1646 blink tr0, ZERO
1647
1648/*
1649 * extern long __strnlen_user(const char *__s, long __n)
1650 *
1651 * Inputs:
1652 * (r2) source address
1653 * (r3) source size in bytes
1654 *
1655 * Ouputs:
1656 * (r2) -EFAULT (in case of faulting)
1657 * string length (otherwise)
1658 */
1659 .global __strnlen_user
1660__strnlen_user:
1661 pta ___strnlen_user_set_reply, tr0
1662 pta ___strnlen_user1, tr1
1663 or ZERO, ZERO, r5 /* r5 = counter */
1664 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1665 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1666 beq r3, ZERO, tr0
1667
1668___strnlen_user1:
1669 ldx.b r2, r5, r7 /* Fault address: only in reading */
1670 addi r3, -1, r3 /* No real fixup */
1671 addi r5, 1, r5
1672 beq r3, ZERO, tr0
1673 bne r7, ZERO, tr1
1674! The line below used to be active. This meant led to a junk byte lying between each pair
1675! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1676! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1677! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1678! addi r5, 1, r5 /* Include '\0' */
1679
1680___strnlen_user_set_reply:
1681 or r5, ZERO, r6 /* If done, return counter */
1682
1683___strnlen_user_exit:
1684 or r6, ZERO, r2
1685 ptabs LINK, tr0
1686 blink tr0, ZERO
1687
1688/*
1689 * extern long __get_user_asm_?(void *val, long addr)
1690 *
1691 * Inputs:
1692 * (r2) dest address
1693 * (r3) source address (in User Space)
1694 *
1695 * Ouputs:
1696 * (r2) -EFAULT (faulting)
1697 * 0 (not faulting)
1698 */
1699 .global __get_user_asm_b
1700__get_user_asm_b:
1701 or r2, ZERO, r4
1702 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1703
1704___get_user_asm_b1:
1705 ld.b r3, 0, r5 /* r5 = data */
1706 st.b r4, 0, r5
1707 or ZERO, ZERO, r2
1708
1709___get_user_asm_b_exit:
1710 ptabs LINK, tr0
1711 blink tr0, ZERO
1712
1713
1714 .global __get_user_asm_w
1715__get_user_asm_w:
1716 or r2, ZERO, r4
1717 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1718
1719___get_user_asm_w1:
1720 ld.w r3, 0, r5 /* r5 = data */
1721 st.w r4, 0, r5
1722 or ZERO, ZERO, r2
1723
1724___get_user_asm_w_exit:
1725 ptabs LINK, tr0
1726 blink tr0, ZERO
1727
1728
1729 .global __get_user_asm_l
1730__get_user_asm_l:
1731 or r2, ZERO, r4
1732 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1733
1734___get_user_asm_l1:
1735 ld.l r3, 0, r5 /* r5 = data */
1736 st.l r4, 0, r5
1737 or ZERO, ZERO, r2
1738
1739___get_user_asm_l_exit:
1740 ptabs LINK, tr0
1741 blink tr0, ZERO
1742
1743
1744 .global __get_user_asm_q
1745__get_user_asm_q:
1746 or r2, ZERO, r4
1747 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1748
1749___get_user_asm_q1:
1750 ld.q r3, 0, r5 /* r5 = data */
1751 st.q r4, 0, r5
1752 or ZERO, ZERO, r2
1753
1754___get_user_asm_q_exit:
1755 ptabs LINK, tr0
1756 blink tr0, ZERO
1757
1758/*
1759 * extern long __put_user_asm_?(void *pval, long addr)
1760 *
1761 * Inputs:
1762 * (r2) kernel pointer to value
1763 * (r3) dest address (in User Space)
1764 *
1765 * Ouputs:
1766 * (r2) -EFAULT (faulting)
1767 * 0 (not faulting)
1768 */
1769 .global __put_user_asm_b
1770__put_user_asm_b:
1771 ld.b r2, 0, r4 /* r4 = data */
1772 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1773
1774___put_user_asm_b1:
1775 st.b r3, 0, r4
1776 or ZERO, ZERO, r2
1777
1778___put_user_asm_b_exit:
1779 ptabs LINK, tr0
1780 blink tr0, ZERO
1781
1782
1783 .global __put_user_asm_w
1784__put_user_asm_w:
1785 ld.w r2, 0, r4 /* r4 = data */
1786 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1787
1788___put_user_asm_w1:
1789 st.w r3, 0, r4
1790 or ZERO, ZERO, r2
1791
1792___put_user_asm_w_exit:
1793 ptabs LINK, tr0
1794 blink tr0, ZERO
1795
1796
1797 .global __put_user_asm_l
1798__put_user_asm_l:
1799 ld.l r2, 0, r4 /* r4 = data */
1800 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1801
1802___put_user_asm_l1:
1803 st.l r3, 0, r4
1804 or ZERO, ZERO, r2
1805
1806___put_user_asm_l_exit:
1807 ptabs LINK, tr0
1808 blink tr0, ZERO
1809
1810
1811 .global __put_user_asm_q
1812__put_user_asm_q:
1813 ld.q r2, 0, r4 /* r4 = data */
1814 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1815
1816___put_user_asm_q1:
1817 st.q r3, 0, r4
1818 or ZERO, ZERO, r2
1819
1820___put_user_asm_q_exit:
1821 ptabs LINK, tr0
1822 blink tr0, ZERO
1823
1824panic_stash_regs:
1825 /* The idea is : when we get an unhandled panic, we dump the registers
1826 to a known memory location, the just sit in a tight loop.
1827 This allows the human to look at the memory region through the GDB
1828 session (assuming the debug module's SHwy initiator isn't locked up
1829 or anything), to hopefully analyze the cause of the panic. */
1830
1831 /* On entry, former r15 (SP) is in DCR
1832 former r0 is at resvec_saved_area + 0
1833 former r1 is at resvec_saved_area + 8
1834 former tr0 is at resvec_saved_area + 32
1835 DCR is the only register whose value is lost altogether.
1836 */
1837
1838 movi 0xffffffff80000000, r0 ! phy of dump area
1839 ld.q SP, 0x000, r1 ! former r0
1840 st.q r0, 0x000, r1
1841 ld.q SP, 0x008, r1 ! former r1
1842 st.q r0, 0x008, r1
1843 st.q r0, 0x010, r2
1844 st.q r0, 0x018, r3
1845 st.q r0, 0x020, r4
1846 st.q r0, 0x028, r5
1847 st.q r0, 0x030, r6
1848 st.q r0, 0x038, r7
1849 st.q r0, 0x040, r8
1850 st.q r0, 0x048, r9
1851 st.q r0, 0x050, r10
1852 st.q r0, 0x058, r11
1853 st.q r0, 0x060, r12
1854 st.q r0, 0x068, r13
1855 st.q r0, 0x070, r14
1856 getcon dcr, r14
1857 st.q r0, 0x078, r14
1858 st.q r0, 0x080, r16
1859 st.q r0, 0x088, r17
1860 st.q r0, 0x090, r18
1861 st.q r0, 0x098, r19
1862 st.q r0, 0x0a0, r20
1863 st.q r0, 0x0a8, r21
1864 st.q r0, 0x0b0, r22
1865 st.q r0, 0x0b8, r23
1866 st.q r0, 0x0c0, r24
1867 st.q r0, 0x0c8, r25
1868 st.q r0, 0x0d0, r26
1869 st.q r0, 0x0d8, r27
1870 st.q r0, 0x0e0, r28
1871 st.q r0, 0x0e8, r29
1872 st.q r0, 0x0f0, r30
1873 st.q r0, 0x0f8, r31
1874 st.q r0, 0x100, r32
1875 st.q r0, 0x108, r33
1876 st.q r0, 0x110, r34
1877 st.q r0, 0x118, r35
1878 st.q r0, 0x120, r36
1879 st.q r0, 0x128, r37
1880 st.q r0, 0x130, r38
1881 st.q r0, 0x138, r39
1882 st.q r0, 0x140, r40
1883 st.q r0, 0x148, r41
1884 st.q r0, 0x150, r42
1885 st.q r0, 0x158, r43
1886 st.q r0, 0x160, r44
1887 st.q r0, 0x168, r45
1888 st.q r0, 0x170, r46
1889 st.q r0, 0x178, r47
1890 st.q r0, 0x180, r48
1891 st.q r0, 0x188, r49
1892 st.q r0, 0x190, r50
1893 st.q r0, 0x198, r51
1894 st.q r0, 0x1a0, r52
1895 st.q r0, 0x1a8, r53
1896 st.q r0, 0x1b0, r54
1897 st.q r0, 0x1b8, r55
1898 st.q r0, 0x1c0, r56
1899 st.q r0, 0x1c8, r57
1900 st.q r0, 0x1d0, r58
1901 st.q r0, 0x1d8, r59
1902 st.q r0, 0x1e0, r60
1903 st.q r0, 0x1e8, r61
1904 st.q r0, 0x1f0, r62
1905 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1906
1907 ld.q SP, 0x020, r1 ! former tr0
1908 st.q r0, 0x200, r1
1909 gettr tr1, r1
1910 st.q r0, 0x208, r1
1911 gettr tr2, r1
1912 st.q r0, 0x210, r1
1913 gettr tr3, r1
1914 st.q r0, 0x218, r1
1915 gettr tr4, r1
1916 st.q r0, 0x220, r1
1917 gettr tr5, r1
1918 st.q r0, 0x228, r1
1919 gettr tr6, r1
1920 st.q r0, 0x230, r1
1921 gettr tr7, r1
1922 st.q r0, 0x238, r1
1923
1924 getcon sr, r1
1925 getcon ssr, r2
1926 getcon pssr, r3
1927 getcon spc, r4
1928 getcon pspc, r5
1929 getcon intevt, r6
1930 getcon expevt, r7
1931 getcon pexpevt, r8
1932 getcon tra, r9
1933 getcon tea, r10
1934 getcon kcr0, r11
1935 getcon kcr1, r12
1936 getcon vbr, r13
1937 getcon resvec, r14
1938
1939 st.q r0, 0x240, r1
1940 st.q r0, 0x248, r2
1941 st.q r0, 0x250, r3
1942 st.q r0, 0x258, r4
1943 st.q r0, 0x260, r5
1944 st.q r0, 0x268, r6
1945 st.q r0, 0x270, r7
1946 st.q r0, 0x278, r8
1947 st.q r0, 0x280, r9
1948 st.q r0, 0x288, r10
1949 st.q r0, 0x290, r11
1950 st.q r0, 0x298, r12
1951 st.q r0, 0x2a0, r13
1952 st.q r0, 0x2a8, r14
1953
1954 getcon SPC,r2
1955 getcon SSR,r3
1956 getcon EXPEVT,r4
1957 /* Prepare to jump to C - physical address */
1958 movi panic_handler-CONFIG_PAGE_OFFSET, r1
1959 ori r1, 1, r1
1960 ptabs r1, tr0
1961 getcon DCR, SP
1962 blink tr0, ZERO
1963 nop
1964 nop
1965 nop
1966 nop
1967
1968
1969
1970
1971/*
1972 * --- Signal Handling Section
1973 */
1974
1975/*
1976 * extern long long _sa_default_rt_restorer
1977 * extern long long _sa_default_restorer
1978 *
1979 * or, better,
1980 *
1981 * extern void _sa_default_rt_restorer(void)
1982 * extern void _sa_default_restorer(void)
1983 *
1984 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1985 * from user space. Copied into user space by signal management.
1986 * Both must be quad aligned and 2 quad long (4 instructions).
1987 *
1988 */
1989 .balign 8
1990 .global sa_default_rt_restorer
1991sa_default_rt_restorer:
1992 movi 0x10, r9
1993 shori __NR_rt_sigreturn, r9
1994 trapa r9
1995 nop
1996
1997 .balign 8
1998 .global sa_default_restorer
1999sa_default_restorer:
2000 movi 0x10, r9
2001 shori __NR_sigreturn, r9
2002 trapa r9
2003 nop
2004
2005/*
2006 * --- __ex_table Section
2007 */
2008
2009/*
2010 * User Access Exception Table.
2011 */
2012 .section __ex_table, "a"
2013
2014 .global asm_uaccess_start /* Just a marker */
2015asm_uaccess_start:
2016
2017 .long ___copy_user1, ___copy_user_exit
2018 .long ___copy_user2, ___copy_user_exit
2019 .long ___clear_user1, ___clear_user_exit
2020 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2021 .long ___strnlen_user1, ___strnlen_user_exit
2022 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2023 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2024 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2025 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2026 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2027 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2028 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2029 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2030
2031 .global asm_uaccess_end /* Just a marker */
2032asm_uaccess_end:
2033
2034
2035
2036
2037/*
2038 * --- .text.init Section
2039 */
2040
2041 .section .text.init, "ax"
2042
2043/*
2044 * void trap_init (void)
2045 *
2046 */
2047 .global trap_init
2048trap_init:
2049 addi SP, -24, SP /* Room to save r28/r29/r30 */
2050 st.q SP, 0, r28
2051 st.q SP, 8, r29
2052 st.q SP, 16, r30
2053
2054 /* Set VBR and RESVEC */
2055 movi LVBR_block, r19
2056 andi r19, -4, r19 /* reset MMUOFF + reserved */
2057 /* For RESVEC exceptions we force the MMU off, which means we need the
2058 physical address. */
2059 movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
2060 andi r20, -4, r20 /* reset reserved */
2061 ori r20, 1, r20 /* set MMUOFF */
2062 putcon r19, VBR
2063 putcon r20, RESVEC
2064
2065 /* Sanity check */
2066 movi LVBR_block_end, r21
2067 andi r21, -4, r21
2068 movi BLOCK_SIZE, r29 /* r29 = expected size */
2069 or r19, ZERO, r30
2070 add r19, r29, r19
2071
2072 /*
2073 * Ugly, but better loop forever now than crash afterwards.
2074 * We should print a message, but if we touch LVBR or
2075 * LRESVEC blocks we should not be surprised if we get stuck
2076 * in trap_init().
2077 */
2078 pta trap_init_loop, tr1
2079 gettr tr1, r28 /* r28 = trap_init_loop */
2080 sub r21, r30, r30 /* r30 = actual size */
2081
2082 /*
2083 * VBR/RESVEC handlers overlap by being bigger than
2084 * allowed. Very bad. Just loop forever.
2085 * (r28) panic/loop address
2086 * (r29) expected size
2087 * (r30) actual size
2088 */
2089trap_init_loop:
2090 bne r19, r21, tr1
2091
2092 /* Now that exception vectors are set up reset SR.BL */
2093 getcon SR, r22
2094 movi SR_UNBLOCK_EXC, r23
2095 and r22, r23, r22
2096 putcon r22, SR
2097
2098 addi SP, 24, SP
2099 ptabs LINK, tr0
2100 blink tr0, ZERO
2101
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
new file mode 100644
index 000000000000..30b76a94abf2
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -0,0 +1,166 @@
1/*
2 * arch/sh/kernel/cpu/sh5/fpu.c
3 *
4 * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
5 * Copyright (C) 2002 STMicroelectronics Limited
6 * Author : Stuart Menefy
7 *
8 * Started from SH4 version:
9 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#include <linux/sched.h>
16#include <linux/signal.h>
17#include <asm/processor.h>
18#include <asm/user.h>
19#include <asm/io.h>
20
21/*
22 * Initially load the FPU with signalling NANS. This bit pattern
23 * has the property that no matter whether considered as single or as
24 * double precision, it still represents a signalling NAN.
25 */
26#define sNAN64 0xFFFFFFFFFFFFFFFFULL
27#define sNAN32 0xFFFFFFFFUL
28
29static union sh_fpu_union init_fpuregs = {
30 .hard = {
31 .fp_regs = { [0 ... 63] = sNAN32 },
32 .fpscr = FPSCR_INIT
33 }
34};
35
36void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
37{
38 asm volatile("fst.p %0, (0*8), fp0\n\t"
39 "fst.p %0, (1*8), fp2\n\t"
40 "fst.p %0, (2*8), fp4\n\t"
41 "fst.p %0, (3*8), fp6\n\t"
42 "fst.p %0, (4*8), fp8\n\t"
43 "fst.p %0, (5*8), fp10\n\t"
44 "fst.p %0, (6*8), fp12\n\t"
45 "fst.p %0, (7*8), fp14\n\t"
46 "fst.p %0, (8*8), fp16\n\t"
47 "fst.p %0, (9*8), fp18\n\t"
48 "fst.p %0, (10*8), fp20\n\t"
49 "fst.p %0, (11*8), fp22\n\t"
50 "fst.p %0, (12*8), fp24\n\t"
51 "fst.p %0, (13*8), fp26\n\t"
52 "fst.p %0, (14*8), fp28\n\t"
53 "fst.p %0, (15*8), fp30\n\t"
54 "fst.p %0, (16*8), fp32\n\t"
55 "fst.p %0, (17*8), fp34\n\t"
56 "fst.p %0, (18*8), fp36\n\t"
57 "fst.p %0, (19*8), fp38\n\t"
58 "fst.p %0, (20*8), fp40\n\t"
59 "fst.p %0, (21*8), fp42\n\t"
60 "fst.p %0, (22*8), fp44\n\t"
61 "fst.p %0, (23*8), fp46\n\t"
62 "fst.p %0, (24*8), fp48\n\t"
63 "fst.p %0, (25*8), fp50\n\t"
64 "fst.p %0, (26*8), fp52\n\t"
65 "fst.p %0, (27*8), fp54\n\t"
66 "fst.p %0, (28*8), fp56\n\t"
67 "fst.p %0, (29*8), fp58\n\t"
68 "fst.p %0, (30*8), fp60\n\t"
69 "fst.p %0, (31*8), fp62\n\t"
70
71 "fgetscr fr63\n\t"
72 "fst.s %0, (32*8), fr63\n\t"
73 : /* no output */
74 : "r" (&tsk->thread.fpu.hard)
75 : "memory");
76}
77
78static inline void
79fpload(struct sh_fpu_hard_struct *fpregs)
80{
81 asm volatile("fld.p %0, (0*8), fp0\n\t"
82 "fld.p %0, (1*8), fp2\n\t"
83 "fld.p %0, (2*8), fp4\n\t"
84 "fld.p %0, (3*8), fp6\n\t"
85 "fld.p %0, (4*8), fp8\n\t"
86 "fld.p %0, (5*8), fp10\n\t"
87 "fld.p %0, (6*8), fp12\n\t"
88 "fld.p %0, (7*8), fp14\n\t"
89 "fld.p %0, (8*8), fp16\n\t"
90 "fld.p %0, (9*8), fp18\n\t"
91 "fld.p %0, (10*8), fp20\n\t"
92 "fld.p %0, (11*8), fp22\n\t"
93 "fld.p %0, (12*8), fp24\n\t"
94 "fld.p %0, (13*8), fp26\n\t"
95 "fld.p %0, (14*8), fp28\n\t"
96 "fld.p %0, (15*8), fp30\n\t"
97 "fld.p %0, (16*8), fp32\n\t"
98 "fld.p %0, (17*8), fp34\n\t"
99 "fld.p %0, (18*8), fp36\n\t"
100 "fld.p %0, (19*8), fp38\n\t"
101 "fld.p %0, (20*8), fp40\n\t"
102 "fld.p %0, (21*8), fp42\n\t"
103 "fld.p %0, (22*8), fp44\n\t"
104 "fld.p %0, (23*8), fp46\n\t"
105 "fld.p %0, (24*8), fp48\n\t"
106 "fld.p %0, (25*8), fp50\n\t"
107 "fld.p %0, (26*8), fp52\n\t"
108 "fld.p %0, (27*8), fp54\n\t"
109 "fld.p %0, (28*8), fp56\n\t"
110 "fld.p %0, (29*8), fp58\n\t"
111 "fld.p %0, (30*8), fp60\n\t"
112
113 "fld.s %0, (32*8), fr63\n\t"
114 "fputscr fr63\n\t"
115
116 "fld.p %0, (31*8), fp62\n\t"
117 : /* no output */
118 : "r" (fpregs) );
119}
120
121void fpinit(struct sh_fpu_hard_struct *fpregs)
122{
123 *fpregs = init_fpuregs.hard;
124}
125
126asmlinkage void
127do_fpu_error(unsigned long ex, struct pt_regs *regs)
128{
129 struct task_struct *tsk = current;
130
131 regs->pc += 4;
132
133 tsk->thread.trap_no = 11;
134 tsk->thread.error_code = 0;
135 force_sig(SIGFPE, tsk);
136}
137
138
139asmlinkage void
140do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
141{
142 void die(const char *str, struct pt_regs *regs, long err);
143
144 if (! user_mode(regs))
145 die("FPU used in kernel", regs, ex);
146
147 regs->sr &= ~SR_FD;
148
149 if (last_task_used_math == current)
150 return;
151
152 enable_fpu();
153 if (last_task_used_math != NULL)
154 /* Other processes fpu state, save away */
155 save_fpu(last_task_used_math, regs);
156
157 last_task_used_math = current;
158 if (used_math()) {
159 fpload(&current->thread.fpu.hard);
160 } else {
161 /* First time FPU user. */
162 fpload(&init_fpuregs.hard);
163 set_used_math();
164 }
165 disable_fpu();
166}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
new file mode 100644
index 000000000000..15d167fd0ae7
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -0,0 +1,76 @@
1/*
2 * arch/sh/kernel/cpu/sh5/probe.c
3 *
4 * CPU Subtype Probing for SH-5.
5 *
6 * Copyright (C) 2000, 2001 Paolo Alberelli
7 * Copyright (C) 2003 - 2007 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/io.h>
15#include <linux/string.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18
19int __init detect_cpu_and_cache_system(void)
20{
21 unsigned long long cir;
22
23 /* Do peeks in real mode to avoid having to set up a mapping for the
24 WPC registers. On SH5-101 cut2, such a mapping would be exposed to
25 an address translation erratum which would make it hard to set up
26 correctly. */
27 cir = peek_real_address_q(0x0d000008);
28 if ((cir & 0xffff) == 0x5103) {
29 boot_cpu_data.type = CPU_SH5_103;
30 } else if (((cir >> 32) & 0xffff) == 0x51e2) {
31 /* CPU.VCR aliased at CIR address on SH5-101 */
32 boot_cpu_data.type = CPU_SH5_101;
33 } else {
34 boot_cpu_data.type = CPU_SH_NONE;
35 }
36
37 /*
38 * First, setup some sane values for the I-cache.
39 */
40 boot_cpu_data.icache.ways = 4;
41 boot_cpu_data.icache.sets = 256;
42 boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
43
44#if 0
45 /*
46 * FIXME: This can probably be cleaned up a bit as well.. for example,
47 * do we really need the way shift _and_ the way_step_shift ?? Judging
48 * by the existing code, I would guess no.. is there any valid reason
49 * why we need to be tracking this around?
50 */
51 boot_cpu_data.icache.way_shift = 13;
52 boot_cpu_data.icache.entry_shift = 5;
53 boot_cpu_data.icache.set_shift = 4;
54 boot_cpu_data.icache.way_step_shift = 16;
55 boot_cpu_data.icache.asid_shift = 2;
56
57 /*
58 * way offset = cache size / associativity, so just don't factor in
59 * associativity in the first place..
60 */
61 boot_cpu_data.icache.way_ofs = boot_cpu_data.icache.sets *
62 boot_cpu_data.icache.linesz;
63
64 boot_cpu_data.icache.asid_mask = 0x3fc;
65 boot_cpu_data.icache.idx_mask = 0x1fe0;
66 boot_cpu_data.icache.epn_mask = 0xffffe000;
67#endif
68
69 boot_cpu_data.icache.flags = 0;
70
71 /* A trivial starting point.. */
72 memcpy(&boot_cpu_data.dcache,
73 &boot_cpu_data.icache, sizeof(struct cache_info));
74
75 return 0;
76}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
new file mode 100644
index 000000000000..45c351b0f1ba
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/switchto.S
@@ -0,0 +1,198 @@
1/*
2 * arch/sh/kernel/cpu/sh5/switchto.S
3 *
4 * sh64 context switch
5 *
6 * Copyright (C) 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11*/
12
13 .section .text..SHmedia32,"ax"
14 .little
15
16 .balign 32
17
18 .type sh64_switch_to,@function
19 .global sh64_switch_to
20 .global __sh64_switch_to_end
21sh64_switch_to:
22
23/* Incoming args
24 r2 - prev
25 r3 - &prev->thread
26 r4 - next
27 r5 - &next->thread
28
29 Outgoing results
30 r2 - last (=prev) : this just stays in r2 throughout
31
32 Want to create a full (struct pt_regs) on the stack to allow backtracing
33 functions to work. However, we only need to populate the callee-save
34 register slots in this structure; since we're a function our ancestors must
35 have themselves preserved all caller saved state in the stack. This saves
36 some wasted effort since we won't need to look at the values.
37
38 In particular, all caller-save registers are immediately available for
39 scratch use.
40
41*/
42
43#define FRAME_SIZE (76*8 + 8)
44
45 movi FRAME_SIZE, r0
46 sub.l r15, r0, r15
47 ! Do normal-style register save to support backtrace
48
49 st.l r15, 0, r18 ! save link reg
50 st.l r15, 4, r14 ! save fp
51 add.l r15, r63, r14 ! setup frame pointer
52
53 ! hopefully this looks normal to the backtrace now.
54
55 addi.l r15, 8, r1 ! base of pt_regs
56 addi.l r1, 24, r0 ! base of pt_regs.regs
57 addi.l r0, (63*8), r8 ! base of pt_regs.trregs
58
59 /* Note : to be fixed?
60 struct pt_regs is really designed for holding the state on entry
61 to an exception, i.e. pc,sr,regs etc. However, for the context
62 switch state, some of this is not required. But the unwinder takes
63 struct pt_regs * as an arg so we have to build this structure
64 to allow unwinding switched tasks in show_state() */
65
66 st.q r0, ( 9*8), r9
67 st.q r0, (10*8), r10
68 st.q r0, (11*8), r11
69 st.q r0, (12*8), r12
70 st.q r0, (13*8), r13
71 st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
72 ! the point where the process is left in suspended animation, i.e. current
73 ! fp here, not the saved one.
74 st.q r0, (16*8), r16
75
76 st.q r0, (24*8), r24
77 st.q r0, (25*8), r25
78 st.q r0, (26*8), r26
79 st.q r0, (27*8), r27
80 st.q r0, (28*8), r28
81 st.q r0, (29*8), r29
82 st.q r0, (30*8), r30
83 st.q r0, (31*8), r31
84 st.q r0, (32*8), r32
85 st.q r0, (33*8), r33
86 st.q r0, (34*8), r34
87 st.q r0, (35*8), r35
88
89 st.q r0, (44*8), r44
90 st.q r0, (45*8), r45
91 st.q r0, (46*8), r46
92 st.q r0, (47*8), r47
93 st.q r0, (48*8), r48
94 st.q r0, (49*8), r49
95 st.q r0, (50*8), r50
96 st.q r0, (51*8), r51
97 st.q r0, (52*8), r52
98 st.q r0, (53*8), r53
99 st.q r0, (54*8), r54
100 st.q r0, (55*8), r55
101 st.q r0, (56*8), r56
102 st.q r0, (57*8), r57
103 st.q r0, (58*8), r58
104 st.q r0, (59*8), r59
105
106 ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
107 ! Use a local label to avoid creating a symbol that will confuse the !
108 ! backtrace
109 pta .Lsave_pc, tr0
110
111 gettr tr5, r45
112 gettr tr6, r46
113 gettr tr7, r47
114 st.q r8, (5*8), r45
115 st.q r8, (6*8), r46
116 st.q r8, (7*8), r47
117
118 ! Now switch context
119 gettr tr0, r9
120 st.l r3, 0, r15 ! prev->thread.sp
121 st.l r3, 8, r1 ! prev->thread.kregs
122 st.l r3, 4, r9 ! prev->thread.pc
123 st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
124
125 ! Load PC for next task (init value or save_pc later)
126 ld.l r5, 4, r18 ! next->thread.pc
127 ! Switch stacks
128 ld.l r5, 0, r15 ! next->thread.sp
129 ptabs r18, tr0
130
131 ! Update current
132 ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
133 putcon r9, kcr0 ! current = next->thread_info
134
135 ! go to save_pc for a reschedule, or the initial thread.pc for a new process
136 blink tr0, r63
137
138 ! Restore (when we come back to a previously saved task)
139.Lsave_pc:
140 addi.l r15, 32, r0 ! r0 = next's regs
141 addi.l r0, (63*8), r8 ! r8 = next's tr_regs
142
143 ld.q r8, (5*8), r45
144 ld.q r8, (6*8), r46
145 ld.q r8, (7*8), r47
146 ptabs r45, tr5
147 ptabs r46, tr6
148 ptabs r47, tr7
149
150 ld.q r0, ( 9*8), r9
151 ld.q r0, (10*8), r10
152 ld.q r0, (11*8), r11
153 ld.q r0, (12*8), r12
154 ld.q r0, (13*8), r13
155 ld.q r0, (14*8), r14
156 ld.q r0, (16*8), r16
157
158 ld.q r0, (24*8), r24
159 ld.q r0, (25*8), r25
160 ld.q r0, (26*8), r26
161 ld.q r0, (27*8), r27
162 ld.q r0, (28*8), r28
163 ld.q r0, (29*8), r29
164 ld.q r0, (30*8), r30
165 ld.q r0, (31*8), r31
166 ld.q r0, (32*8), r32
167 ld.q r0, (33*8), r33
168 ld.q r0, (34*8), r34
169 ld.q r0, (35*8), r35
170
171 ld.q r0, (44*8), r44
172 ld.q r0, (45*8), r45
173 ld.q r0, (46*8), r46
174 ld.q r0, (47*8), r47
175 ld.q r0, (48*8), r48
176 ld.q r0, (49*8), r49
177 ld.q r0, (50*8), r50
178 ld.q r0, (51*8), r51
179 ld.q r0, (52*8), r52
180 ld.q r0, (53*8), r53
181 ld.q r0, (54*8), r54
182 ld.q r0, (55*8), r55
183 ld.q r0, (56*8), r56
184 ld.q r0, (57*8), r57
185 ld.q r0, (58*8), r58
186 ld.q r0, (59*8), r59
187
188 ! epilogue
189 ld.l r15, 0, r18
190 ld.l r15, 4, r14
191 ptabs r18, tr0
192 movi FRAME_SIZE, r0
193 add r15, r0, r15
194 blink tr0, r63
195__sh64_switch_to_end:
196.LFE1:
197 .size sh64_switch_to,.LFE1-sh64_switch_to
198
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
new file mode 100644
index 000000000000..119c20afd4e5
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -0,0 +1,326 @@
1/*
2 * arch/sh/kernel/cpu/sh5/unwind.c
3 *
4 * Copyright (C) 2004 Paul Mundt
5 * Copyright (C) 2004 Richard Curnow
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/kallsyms.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17#include <asm/processor.h>
18#include <asm/io.h>
19
20static u8 regcache[63];
21
22/*
23 * Finding the previous stack frame isn't horribly straightforward as it is
24 * on some other platforms. In the sh64 case, we don't have "linked" stack
25 * frames, so we need to do a bit of work to determine the previous frame,
26 * and in turn, the previous r14/r18 pair.
27 *
28 * There are generally a few cases which determine where we can find out
29 * the r14/r18 values. In the general case, this can be determined by poking
30 * around the prologue of the symbol PC is in (note that we absolutely must
31 * have frame pointer support as well as the kernel symbol table mapped,
32 * otherwise we can't even get this far).
33 *
34 * In other cases, such as the interrupt/exception path, we can poke around
35 * the sp/fp.
36 *
37 * Notably, this entire approach is somewhat error prone, and in the event
38 * that the previous frame cannot be determined, that's all we can do.
39 * Either way, this still leaves us with a more correct backtrace then what
40 * we would be able to come up with by walking the stack (which is garbage
41 * for anything beyond the first frame).
42 * -- PFM.
43 */
44static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
45 unsigned long *pprev_fp, unsigned long *pprev_pc,
46 struct pt_regs *regs)
47{
48 const char *sym;
49 char namebuf[128];
50 unsigned long offset;
51 unsigned long prologue = 0;
52 unsigned long fp_displacement = 0;
53 unsigned long fp_prev = 0;
54 unsigned long offset_r14 = 0, offset_r18 = 0;
55 int i, found_prologue_end = 0;
56
57 sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
58 if (!sym)
59 return -EINVAL;
60
61 prologue = pc - offset;
62 if (!prologue)
63 return -EINVAL;
64
65 /* Validate fp, to avoid risk of dereferencing a bad pointer later.
66 Assume 128Mb since that's the amount of RAM on a Cayman. Modify
67 when there is an SH-5 board with more. */
68 if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
69 (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
70 ((fp & 7) != 0)) {
71 return -EINVAL;
72 }
73
74 /*
75 * Depth to walk, depth is completely arbitrary.
76 */
77 for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
78 unsigned long op;
79 u8 major, minor;
80 u8 src, dest, disp;
81
82 op = *(unsigned long *)prologue;
83
84 major = (op >> 26) & 0x3f;
85 src = (op >> 20) & 0x3f;
86 minor = (op >> 16) & 0xf;
87 disp = (op >> 10) & 0x3f;
88 dest = (op >> 4) & 0x3f;
89
90 /*
91 * Stack frame creation happens in a number of ways.. in the
92 * general case when the stack frame is less than 511 bytes,
93 * it's generally created by an addi or addi.l:
94 *
95 * addi/addi.l r15, -FRAME_SIZE, r15
96 *
97 * in the event that the frame size is bigger than this, it's
98 * typically created using a movi/sub pair as follows:
99 *
100 * movi FRAME_SIZE, rX
101 * sub r15, rX, r15
102 */
103
104 switch (major) {
105 case (0x00 >> 2):
106 switch (minor) {
107 case 0x8: /* add.l */
108 case 0x9: /* add */
109 /* Look for r15, r63, r14 */
110 if (src == 15 && disp == 63 && dest == 14)
111 found_prologue_end = 1;
112
113 break;
114 case 0xa: /* sub.l */
115 case 0xb: /* sub */
116 if (src != 15 || dest != 15)
117 continue;
118
119 fp_displacement -= regcache[disp];
120 fp_prev = fp - fp_displacement;
121 break;
122 }
123 break;
124 case (0xa8 >> 2): /* st.l */
125 if (src != 15)
126 continue;
127
128 switch (dest) {
129 case 14:
130 if (offset_r14 || fp_displacement == 0)
131 continue;
132
133 offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
134 offset_r14 *= sizeof(unsigned long);
135 offset_r14 += fp_displacement;
136 break;
137 case 18:
138 if (offset_r18 || fp_displacement == 0)
139 continue;
140
141 offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
142 offset_r18 *= sizeof(unsigned long);
143 offset_r18 += fp_displacement;
144 break;
145 }
146
147 break;
148 case (0xcc >> 2): /* movi */
149 if (dest >= 63) {
150 printk(KERN_NOTICE "%s: Invalid dest reg %d "
151 "specified in movi handler. Failed "
152 "opcode was 0x%lx: ", __FUNCTION__,
153 dest, op);
154
155 continue;
156 }
157
158 /* Sign extend */
159 regcache[dest] =
160 ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54;
161 break;
162 case (0xd0 >> 2): /* addi */
163 case (0xd4 >> 2): /* addi.l */
164 /* Look for r15, -FRAME_SIZE, r15 */
165 if (src != 15 || dest != 15)
166 continue;
167
168 /* Sign extended frame size.. */
169 fp_displacement +=
170 (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
171 fp_prev = fp - fp_displacement;
172 break;
173 }
174
175 if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
176 break;
177 }
178
179 if (offset_r14 == 0 || fp_prev == 0) {
180 if (!offset_r14)
181 pr_debug("Unable to find r14 offset\n");
182 if (!fp_prev)
183 pr_debug("Unable to find previous fp\n");
184
185 return -EINVAL;
186 }
187
188 /* For innermost leaf function, there might not be a offset_r18 */
189 if (!*pprev_pc && (offset_r18 == 0))
190 return -EINVAL;
191
192 *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
193
194 if (offset_r18)
195 *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
196
197 *pprev_pc &= ~1;
198
199 return 0;
200}
201
202/* Don't put this on the stack since we'll want to call sh64_unwind
203 * when we're close to underflowing the stack anyway. */
204static struct pt_regs here_regs;
205
206extern const char syscall_ret;
207extern const char ret_from_syscall;
208extern const char ret_from_exception;
209extern const char ret_from_irq;
210
211static void sh64_unwind_inner(struct pt_regs *regs);
212
213static void unwind_nested (unsigned long pc, unsigned long fp)
214{
215 if ((fp >= __MEMORY_START) &&
216 ((fp & 7) == 0)) {
217 sh64_unwind_inner((struct pt_regs *) fp);
218 }
219}
220
221static void sh64_unwind_inner(struct pt_regs *regs)
222{
223 unsigned long pc, fp;
224 int ofs = 0;
225 int first_pass;
226
227 pc = regs->pc & ~1;
228 fp = regs->regs[14];
229
230 first_pass = 1;
231 for (;;) {
232 int cond;
233 unsigned long next_fp, next_pc;
234
235 if (pc == ((unsigned long) &syscall_ret & ~1)) {
236 printk("SYSCALL\n");
237 unwind_nested(pc,fp);
238 return;
239 }
240
241 if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
242 printk("SYSCALL (PREEMPTED)\n");
243 unwind_nested(pc,fp);
244 return;
245 }
246
247 /* In this case, the PC is discovered by lookup_prev_stack_frame but
248 it has 4 taken off it to look like the 'caller' */
249 if (pc == ((unsigned long) &ret_from_exception & ~1)) {
250 printk("EXCEPTION\n");
251 unwind_nested(pc,fp);
252 return;
253 }
254
255 if (pc == ((unsigned long) &ret_from_irq & ~1)) {
256 printk("IRQ\n");
257 unwind_nested(pc,fp);
258 return;
259 }
260
261 cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
262 ((pc & 3) == 0) && ((fp & 7) == 0));
263
264 pc -= ofs;
265
266 printk("[<%08lx>] ", pc);
267 print_symbol("%s\n", pc);
268
269 if (first_pass) {
270 /* If the innermost frame is a leaf function, it's
271 * possible that r18 is never saved out to the stack.
272 */
273 next_pc = regs->regs[18];
274 } else {
275 next_pc = 0;
276 }
277
278 if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
279 ofs = sizeof(unsigned long);
280 pc = next_pc & ~1;
281 fp = next_fp;
282 } else {
283 printk("Unable to lookup previous stack frame\n");
284 break;
285 }
286 first_pass = 0;
287 }
288
289 printk("\n");
290
291}
292
293void sh64_unwind(struct pt_regs *regs)
294{
295 if (!regs) {
296 /*
297 * Fetch current regs if we have no other saved state to back
298 * trace from.
299 */
300 regs = &here_regs;
301
302 __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
303 __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
304 __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
305
306 __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
307 __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
308 __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
309 __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
310 __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
311 __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
312 __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
313 __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
314
315 __asm__ __volatile__ (
316 "pta 0f, tr0\n\t"
317 "blink tr0, %0\n\t"
318 "0: nop"
319 : "=r" (regs->pc)
320 );
321 }
322
323 printk("\nCall Trace:\n");
324 sh64_unwind_inner(regs);
325}
326
diff --git a/arch/sh/kernel/dump_task.c b/arch/sh/kernel/dump_task.c
new file mode 100644
index 000000000000..4a8a4083ff0b
--- /dev/null
+++ b/arch/sh/kernel/dump_task.c
@@ -0,0 +1,31 @@
1#include <linux/elfcore.h>
2#include <linux/sched.h>
3
4/*
5 * Capture the user space registers if the task is not running (in user space)
6 */
7int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
8{
9 struct pt_regs ptregs;
10
11 ptregs = *task_pt_regs(tsk);
12 elf_core_copy_regs(regs, &ptregs);
13
14 return 1;
15}
16
17int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
18{
19 int fpvalid = 0;
20
21#if defined(CONFIG_SH_FPU)
22 fpvalid = !!tsk_used_math(tsk);
23 if (fpvalid) {
24 unlazy_fpu(tsk, task_pt_regs(tsk));
25 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
26 }
27#endif
28
29 return fpvalid;
30}
31
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 2f30977558ad..957f25611543 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -63,7 +63,8 @@ static struct console bios_console = {
63#include <linux/serial_core.h> 63#include <linux/serial_core.h>
64#include "../../../drivers/serial/sh-sci.h" 64#include "../../../drivers/serial/sh-sci.h"
65 65
66#if defined(CONFIG_CPU_SUBTYPE_SH7720) 66#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
67 defined(CONFIG_CPU_SUBTYPE_SH7721)
67#define EPK_SCSMR_VALUE 0x000 68#define EPK_SCSMR_VALUE 0x000
68#define EPK_SCBRR_VALUE 0x00C 69#define EPK_SCBRR_VALUE 0x00C
69#define EPK_FIFO_SIZE 64 70#define EPK_FIFO_SIZE 64
@@ -117,7 +118,8 @@ static struct console scif_console = {
117}; 118};
118 119
119#if !defined(CONFIG_SH_STANDARD_BIOS) 120#if !defined(CONFIG_SH_STANDARD_BIOS)
120#if defined(CONFIG_CPU_SUBTYPE_SH7720) 121#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
122 defined(CONFIG_CPU_SUBTYPE_SH7721)
121static void scif_sercon_init(char *s) 123static void scif_sercon_init(char *s)
122{ 124{
123 sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */ 125 sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */
@@ -208,10 +210,12 @@ static int __init setup_early_printk(char *buf)
208 if (!strncmp(buf, "serial", 6)) { 210 if (!strncmp(buf, "serial", 6)) {
209 early_console = &scif_console; 211 early_console = &scif_console;
210 212
211#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720)) && \ 213#if !defined(CONFIG_SH_STANDARD_BIOS)
212 !defined(CONFIG_SH_STANDARD_BIOS) 214#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
215 defined(CONFIG_CPU_SUBTYPE_SH7721)
213 scif_sercon_init(buf + 6); 216 scif_sercon_init(buf + 6);
214#endif 217#endif
218#endif
215 } 219 }
216#endif 220#endif
217 221
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index e0317ed080c3..926b2e7b11c1 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -176,25 +176,6 @@ work_notifysig:
176 jmp @r1 176 jmp @r1
177 lds r0, pr 177 lds r0, pr
178work_resched: 178work_resched:
179#if defined(CONFIG_GUSA) && !defined(CONFIG_PREEMPT)
180 ! gUSA handling
181 mov.l @(OFF_SP,r15), r0 ! get user space stack pointer
182 mov r0, r1
183 shll r0
184 bf/s 1f
185 shll r0
186 bf/s 1f
187 mov #OFF_PC, r0
188 ! SP >= 0xc0000000 : gUSA mark
189 mov.l @(r0,r15), r2 ! get user space PC (program counter)
190 mov.l @(OFF_R0,r15), r3 ! end point
191 cmp/hs r3, r2 ! r2 >= r3?
192 bt 1f
193 add r3, r1 ! rewind point #2
194 mov.l r1, @(r0,r15) ! reset PC to rewind point #2
195 !
1961:
197#endif
198 mov.l 1f, r1 179 mov.l 1f, r1
199 jsr @r1 ! schedule 180 jsr @r1 ! schedule
200 nop 181 nop
@@ -224,7 +205,7 @@ work_resched:
224syscall_exit_work: 205syscall_exit_work:
225 ! r0: current_thread_info->flags 206 ! r0: current_thread_info->flags
226 ! r8: current_thread_info 207 ! r8: current_thread_info
227 tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP, r0 208 tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT, r0
228 bt/s work_pending 209 bt/s work_pending
229 tst #_TIF_NEED_RESCHED, r0 210 tst #_TIF_NEED_RESCHED, r0
230#ifdef CONFIG_TRACE_IRQFLAGS 211#ifdef CONFIG_TRACE_IRQFLAGS
@@ -234,6 +215,8 @@ syscall_exit_work:
234#endif 215#endif
235 sti 216 sti
236 ! XXX setup arguments... 217 ! XXX setup arguments...
218 mov r15, r4
219 mov #1, r5
237 mov.l 4f, r0 ! do_syscall_trace 220 mov.l 4f, r0 ! do_syscall_trace
238 jsr @r0 221 jsr @r0
239 nop 222 nop
@@ -244,6 +227,8 @@ syscall_exit_work:
244syscall_trace_entry: 227syscall_trace_entry:
245 ! Yes it is traced. 228 ! Yes it is traced.
246 ! XXX setup arguments... 229 ! XXX setup arguments...
230 mov r15, r4
231 mov #0, r5
247 mov.l 4f, r11 ! Call do_syscall_trace which notifies 232 mov.l 4f, r11 ! Call do_syscall_trace which notifies
248 jsr @r11 ! superior (will chomp R[0-7]) 233 jsr @r11 ! superior (will chomp R[0-7])
249 nop 234 nop
@@ -366,7 +351,7 @@ ENTRY(system_call)
366 ! 351 !
367 get_current_thread_info r8, r10 352 get_current_thread_info r8, r10
368 mov.l @(TI_FLAGS,r8), r8 353 mov.l @(TI_FLAGS,r8), r8
369 mov #_TIF_SYSCALL_TRACE, r10 354 mov #(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT), r10
370 tst r10, r8 355 tst r10, r8
371 bf syscall_trace_entry 356 bf syscall_trace_entry
372 ! 357 !
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head_32.S
index 3338239717f1..d67d7ed09f22 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head_32.S
@@ -32,7 +32,11 @@ ENTRY(empty_zero_page)
32 .long 1 /* LOADER_TYPE */ 32 .long 1 /* LOADER_TYPE */
33 .long 0x00360000 /* INITRD_START */ 33 .long 0x00360000 /* INITRD_START */
34 .long 0x000a0000 /* INITRD_SIZE */ 34 .long 0x000a0000 /* INITRD_SIZE */
35 .long 0 35#ifdef CONFIG_32BIT
36 .long 0x53453f00 + 32 /* "SE?" = 32 bit */
37#else
38 .long 0x53453f00 + 29 /* "SE?" = 29 bit */
39#endif
361: 401:
37 .skip PAGE_SIZE - empty_zero_page - 1b 41 .skip PAGE_SIZE - empty_zero_page - 1b
38 42
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
new file mode 100644
index 000000000000..f42d4c0feb76
--- /dev/null
+++ b/arch/sh/kernel/head_64.S
@@ -0,0 +1,356 @@
1/*
2 * arch/sh/kernel/head_64.S
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003, 2004 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <asm/page.h>
12#include <asm/cache.h>
13#include <asm/tlb.h>
14#include <asm/cpu/registers.h>
15#include <asm/cpu/mmu_context.h>
16#include <asm/thread_info.h>
17
18/*
19 * MMU defines: TLB boundaries.
20 */
21
22#define MMUIR_FIRST ITLB_FIXED
23#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
24#define MMUIR_STEP TLB_STEP
25
26#define MMUDR_FIRST DTLB_FIXED
27#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
28#define MMUDR_STEP TLB_STEP
29
30/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
31#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
32#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
33#endif
34
35/*
36 * MMU defines: Fixed TLBs.
37 */
38/* Deal safely with the case where the base of RAM is not 512Mb aligned */
39
40#define ALIGN_512M_MASK (0xffffffffe0000000)
41#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
42#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
43
44#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
45 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
46
47#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
48 /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
49
50#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
51 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
52#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
53 /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
54
55#ifdef CONFIG_CACHE_OFF
56#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
57#else
58#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
59#endif
60#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
61
62#if defined (CONFIG_CACHE_OFF)
63#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
64#elif defined (CONFIG_CACHE_WRITETHROUGH)
65#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
66 /* WT, invalidate */
67#elif defined (CONFIG_CACHE_WRITEBACK)
68#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
69 /* WB, invalidate */
70#else
71#error preprocessor flag CONFIG_CACHE_... not recognized!
72#endif
73
74#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
75
76 .section .empty_zero_page, "aw"
77 .global empty_zero_page
78
79empty_zero_page:
80 .long 1 /* MOUNT_ROOT_RDONLY */
81 .long 0 /* RAMDISK_FLAGS */
82 .long 0x0200 /* ORIG_ROOT_DEV */
83 .long 1 /* LOADER_TYPE */
84 .long 0x00800000 /* INITRD_START */
85 .long 0x00800000 /* INITRD_SIZE */
86 .long 0
87
88 .text
89 .balign 4096,0,4096
90
91 .section .data, "aw"
92 .balign PAGE_SIZE
93
94 .section .data, "aw"
95 .balign PAGE_SIZE
96
97 .global mmu_pdtp_cache
98mmu_pdtp_cache:
99 .space PAGE_SIZE, 0
100
101 .global empty_bad_page
102empty_bad_page:
103 .space PAGE_SIZE, 0
104
105 .global empty_bad_pte_table
106empty_bad_pte_table:
107 .space PAGE_SIZE, 0
108
109 .global fpu_in_use
110fpu_in_use: .quad 0
111
112
113 .section .text.head, "ax"
114 .balign L1_CACHE_BYTES
115/*
116 * Condition at the entry of __stext:
117 * . Reset state:
118 * . SR.FD = 1 (FPU disabled)
119 * . SR.BL = 1 (Exceptions disabled)
120 * . SR.MD = 1 (Privileged Mode)
121 * . SR.MMU = 0 (MMU Disabled)
122 * . SR.CD = 0 (CTC User Visible)
123 * . SR.IMASK = Undefined (Interrupt Mask)
124 *
125 * Operations supposed to be performed by __stext:
126 * . prevent speculative fetch onto device memory while MMU is off
127 * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
128 * . first, save CPU state and set it to something harmless
129 * . any CPU detection and/or endianness settings (?)
130 * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
131 * . set initial TLB entries for cached and uncached regions
132 * (no fine granularity paging)
133 * . set initial cache state
134 * . enable MMU and caches
135 * . set CPU to a consistent state
136 * . registers (including stack pointer and current/KCR0)
137 * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
138 * at this stage. This is all to later Linux initialization steps.
139 * . initialize FPU
140 * . clear BSS
141 * . jump into start_kernel()
142 * . be prepared to hopeless start_kernel() returns.
143 *
144 */
145 .global _stext
146_stext:
147 /*
148 * Prevent speculative fetch on device memory due to
149 * uninitialized target registers.
150 */
151 ptabs/u ZERO, tr0
152 ptabs/u ZERO, tr1
153 ptabs/u ZERO, tr2
154 ptabs/u ZERO, tr3
155 ptabs/u ZERO, tr4
156 ptabs/u ZERO, tr5
157 ptabs/u ZERO, tr6
158 ptabs/u ZERO, tr7
159 synci
160
161 /*
162 * Read/Set CPU state. After this block:
163 * r29 = Initial SR
164 */
165 getcon SR, r29
166 movi SR_HARMLESS, r20
167 putcon r20, SR
168
169 /*
170 * Initialize EMI/LMI. To Be Done.
171 */
172
173 /*
174 * CPU detection and/or endianness settings (?). To Be Done.
175 * Pure PIC code here, please ! Just save state into r30.
176 * After this block:
177 * r30 = CPU type/Platform Endianness
178 */
179
180 /*
181 * Set initial TLB entries for cached and uncached regions.
182 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
183 */
184 /* Clear ITLBs */
185 pta clear_ITLB, tr1
186 movi MMUIR_FIRST, r21
187 movi MMUIR_END, r22
188clear_ITLB:
189 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
190 addi r21, MMUIR_STEP, r21
191 bne r21, r22, tr1
192
193 /* Clear DTLBs */
194 pta clear_DTLB, tr1
195 movi MMUDR_FIRST, r21
196 movi MMUDR_END, r22
197clear_DTLB:
198 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
199 addi r21, MMUDR_STEP, r21
200 bne r21, r22, tr1
201
202 /* Map one big (512Mb) page for ITLB */
203 movi MMUIR_FIRST, r21
204 movi MMUIR_TEXT_L, r22 /* PTEL first */
205 add.l r22, r63, r22 /* Sign extend */
206 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
207 movi MMUIR_TEXT_H, r22 /* PTEH last */
208 add.l r22, r63, r22 /* Sign extend */
209 putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
210
211 /* Map one big CACHED (512Mb) page for DTLB */
212 movi MMUDR_FIRST, r21
213 movi MMUDR_CACHED_L, r22 /* PTEL first */
214 add.l r22, r63, r22 /* Sign extend */
215 putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
216 movi MMUDR_CACHED_H, r22 /* PTEH last */
217 add.l r22, r63, r22 /* Sign extend */
218 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
219
220#ifdef CONFIG_EARLY_PRINTK
221 /*
222 * Setup a DTLB translation for SCIF phys.
223 */
224 addi r21, MMUDR_STEP, r21
225 movi 0x0a03, r22 /* SCIF phys */
226 shori 0x0148, r22
227 putcfg r21, 1, r22 /* PTEL first */
228 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
229 shori 0x0003, r22
230 putcfg r21, 0, r22 /* PTEH last */
231#endif
232
233 /*
234 * Set cache behaviours.
235 */
236 /* ICache */
237 movi ICCR_BASE, r21
238 movi ICCR0_INIT_VAL, r22
239 movi ICCR1_INIT_VAL, r23
240 putcfg r21, ICCR_REG0, r22
241 putcfg r21, ICCR_REG1, r23
242
243 /* OCache */
244 movi OCCR_BASE, r21
245 movi OCCR0_INIT_VAL, r22
246 movi OCCR1_INIT_VAL, r23
247 putcfg r21, OCCR_REG0, r22
248 putcfg r21, OCCR_REG1, r23
249
250
251 /*
252 * Enable Caches and MMU. Do the first non-PIC jump.
253 * Now head.S global variables, constants and externs
254 * can be used.
255 */
256 getcon SR, r21
257 movi SR_ENABLE_MMU, r22
258 or r21, r22, r21
259 putcon r21, SSR
260 movi hyperspace, r22
261 ori r22, 1, r22 /* Make it SHmedia, not required but..*/
262 putcon r22, SPC
263 synco
264 rte /* And now go into the hyperspace ... */
265hyperspace: /* ... that's the next instruction ! */
266
267 /*
268 * Set CPU to a consistent state.
269 * r31 = FPU support flag
270 * tr0/tr7 in use. Others give a chance to loop somewhere safe
271 */
272 movi start_kernel, r32
273 ori r32, 1, r32
274
275 ptabs r32, tr0 /* r32 = _start_kernel address */
276 pta/u hopeless, tr1
277 pta/u hopeless, tr2
278 pta/u hopeless, tr3
279 pta/u hopeless, tr4
280 pta/u hopeless, tr5
281 pta/u hopeless, tr6
282 pta/u hopeless, tr7
283 gettr tr1, r28 /* r28 = hopeless address */
284
285 /* Set initial stack pointer */
286 movi init_thread_union, SP
287 putcon SP, KCR0 /* Set current to init_task */
288 movi THREAD_SIZE, r22 /* Point to the end */
289 add SP, r22, SP
290
291 /*
292 * Initialize FPU.
293 * Keep FPU flag in r31. After this block:
294 * r31 = FPU flag
295 */
296 movi fpu_in_use, r31 /* Temporary */
297
298#ifdef CONFIG_SH_FPU
299 getcon SR, r21
300 movi SR_ENABLE_FPU, r22
301 and r21, r22, r22
302 putcon r22, SR /* Try to enable */
303 getcon SR, r22
304 xor r21, r22, r21
305 shlri r21, 15, r21 /* Supposedly 0/1 */
306 st.q r31, 0 , r21 /* Set fpu_in_use */
307#else
308 movi 0, r21
309 st.q r31, 0 , r21 /* Set fpu_in_use */
310#endif
311 or r21, ZERO, r31 /* Set FPU flag at last */
312
313#ifndef CONFIG_SH_NO_BSS_INIT
314/* Don't clear BSS if running on slow platforms such as an RTL simulation,
315 remote memory via SHdebug link, etc. For these the memory can be guaranteed
316 to be all zero on boot anyway. */
317 /*
318 * Clear bss
319 */
320 pta clear_quad, tr1
321 movi __bss_start, r22
322 movi _end, r23
323clear_quad:
324 st.q r22, 0, ZERO
325 addi r22, 8, r22
326 bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
327#endif
328 pta/u hopeless, tr1
329
330 /* Say bye to head.S but be prepared to wrongly get back ... */
331 blink tr0, LINK
332
333 /* If we ever get back here through LINK/tr1-tr7 */
334 pta/u hopeless, tr7
335
336hopeless:
337 /*
338 * Something's badly wrong here. Loop endlessly,
339 * there's nothing more we can do about it.
340 *
341 * Note on hopeless: it can be jumped into invariably
342 * before or after jumping into hyperspace. The only
343 * requirement is to be PIC called (PTA) before and
344 * any way (PTA/PTABS) after. According to Virtual
345 * to Physical mapping a simulator/emulator can easily
346 * tell where we came here from just looking at hopeless
347 * (PC) address.
348 *
349 * For debugging purposes:
350 * (r28) hopeless/loop address
351 * (r29) Original SR
352 * (r30) CPU type/Platform endianness
353 * (r31) FPU Support
354 * (r32) _start_kernel address
355 */
356 blink tr7, ZERO
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
index 4b449c4a6bad..f9bcc606127e 100644
--- a/arch/sh/kernel/init_task.c
+++ b/arch/sh/kernel/init_task.c
@@ -11,8 +11,8 @@ static struct fs_struct init_fs = INIT_FS;
11static struct files_struct init_files = INIT_FILES; 11static struct files_struct init_files = INIT_FILES;
12static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 12static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
14struct pt_regs fake_swapper_regs;
14struct mm_struct init_mm = INIT_MM(init_mm); 15struct mm_struct init_mm = INIT_MM(init_mm);
15
16EXPORT_SYMBOL(init_mm); 16EXPORT_SYMBOL(init_mm);
17 17
18/* 18/*
@@ -22,7 +22,7 @@ EXPORT_SYMBOL(init_mm);
22 * way process stacks are handled. This is done by having a special 22 * way process stacks are handled. This is done by having a special
23 * "init_task" linker map entry.. 23 * "init_task" linker map entry..
24 */ 24 */
25union thread_union init_thread_union 25union thread_union init_thread_union
26 __attribute__((__section__(".data.init_task"))) = 26 __attribute__((__section__(".data.init_task"))) =
27 { INIT_THREAD_INFO(init_task) }; 27 { INIT_THREAD_INFO(init_task) };
28 28
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 501fe03e3715..71c9fde2fd90 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -61,73 +61,6 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
61} 61}
62EXPORT_SYMBOL(memset_io); 62EXPORT_SYMBOL(memset_io);
63 63
64void __raw_readsl(unsigned long addr, void *datap, int len)
65{
66 u32 *data;
67
68 for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
69 *data++ = ctrl_inl(addr);
70
71 if (likely(len >= (0x20 >> 2))) {
72 int tmp2, tmp3, tmp4, tmp5, tmp6;
73
74 __asm__ __volatile__(
75 "1: \n\t"
76 "mov.l @%7, r0 \n\t"
77 "mov.l @%7, %2 \n\t"
78#ifdef CONFIG_CPU_SH4
79 "movca.l r0, @%0 \n\t"
80#else
81 "mov.l r0, @%0 \n\t"
82#endif
83 "mov.l @%7, %3 \n\t"
84 "mov.l @%7, %4 \n\t"
85 "mov.l @%7, %5 \n\t"
86 "mov.l @%7, %6 \n\t"
87 "mov.l @%7, r7 \n\t"
88 "mov.l @%7, r0 \n\t"
89 "mov.l %2, @(0x04,%0) \n\t"
90 "mov #0x20>>2, %2 \n\t"
91 "mov.l %3, @(0x08,%0) \n\t"
92 "sub %2, %1 \n\t"
93 "mov.l %4, @(0x0c,%0) \n\t"
94 "cmp/hi %1, %2 ! T if 32 > len \n\t"
95 "mov.l %5, @(0x10,%0) \n\t"
96 "mov.l %6, @(0x14,%0) \n\t"
97 "mov.l r7, @(0x18,%0) \n\t"
98 "mov.l r0, @(0x1c,%0) \n\t"
99 "bf.s 1b \n\t"
100 " add #0x20, %0 \n\t"
101 : "=&r" (data), "=&r" (len),
102 "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
103 "=&r" (tmp5), "=&r" (tmp6)
104 : "r"(addr), "0" (data), "1" (len)
105 : "r0", "r7", "t", "memory");
106 }
107
108 for (; len != 0; len--)
109 *data++ = ctrl_inl(addr);
110}
111EXPORT_SYMBOL(__raw_readsl);
112
113void __raw_writesl(unsigned long addr, const void *data, int len)
114{
115 if (likely(len != 0)) {
116 int tmp1;
117
118 __asm__ __volatile__ (
119 "1: \n\t"
120 "mov.l @%0+, %1 \n\t"
121 "dt %3 \n\t"
122 "bf.s 1b \n\t"
123 " mov.l %1, @%4 \n\t"
124 : "=&r" (data), "=&r" (tmp1)
125 : "0" (data), "r" (len), "r"(addr)
126 : "t", "memory");
127 }
128}
129EXPORT_SYMBOL(__raw_writesl);
130
131void __iomem *ioport_map(unsigned long port, unsigned int nr) 64void __iomem *ioport_map(unsigned long port, unsigned int nr)
132{ 65{
133 return sh_mv.mv_ioport_map(port, nr); 66 return sh_mv.mv_ioport_map(port, nr);
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index 142a4e5b7ebc..b3d0a03b4c76 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -1,5 +1,15 @@
1/* Kernel module help for SH. 1/* Kernel module help for SH.
2 2
3 SHcompact version by Kaz Kojima and Paul Mundt.
4
5 SHmedia bits:
6
7 Copyright 2004 SuperH (UK) Ltd
8 Author: Richard Curnow
9
10 Based on the sh version, and on code from the sh64-specific parts of
11 modutils, originally written by Richard Curnow and Ben Gaster.
12
3 This program is free software; you can redistribute it and/or modify 13 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by 14 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; either version 2 of the License, or 15 the Free Software Foundation; either version 2 of the License, or
@@ -21,12 +31,6 @@
21#include <linux/string.h> 31#include <linux/string.h>
22#include <linux/kernel.h> 32#include <linux/kernel.h>
23 33
24#if 0
25#define DEBUGP printk
26#else
27#define DEBUGP(fmt...)
28#endif
29
30void *module_alloc(unsigned long size) 34void *module_alloc(unsigned long size)
31{ 35{
32 if (size == 0) 36 if (size == 0)
@@ -52,6 +56,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
52 return 0; 56 return 0;
53} 57}
54 58
59#ifdef CONFIG_SUPERH32
55#define COPY_UNALIGNED_WORD(sw, tw, align) \ 60#define COPY_UNALIGNED_WORD(sw, tw, align) \
56{ \ 61{ \
57 void *__s = &(sw), *__t = &(tw); \ 62 void *__s = &(sw), *__t = &(tw); \
@@ -74,6 +79,10 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
74 break; \ 79 break; \
75 } \ 80 } \
76} 81}
82#else
83/* One thing SHmedia doesn't screw up! */
84#define COPY_UNALIGNED_WORD(sw, tw, align) { (tw) = (sw); }
85#endif
77 86
78int apply_relocate_add(Elf32_Shdr *sechdrs, 87int apply_relocate_add(Elf32_Shdr *sechdrs,
79 const char *strtab, 88 const char *strtab,
@@ -89,8 +98,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
89 uint32_t value; 98 uint32_t value;
90 int align; 99 int align;
91 100
92 DEBUGP("Applying relocate section %u to %u\n", relsec, 101 pr_debug("Applying relocate section %u to %u\n", relsec,
93 sechdrs[relsec].sh_info); 102 sechdrs[relsec].sh_info);
94 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 103 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
95 /* This is where to make the change */ 104 /* This is where to make the change */
96 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 105 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -102,17 +111,44 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
102 relocation = sym->st_value + rel[i].r_addend; 111 relocation = sym->st_value + rel[i].r_addend;
103 align = (int)location & 3; 112 align = (int)location & 3;
104 113
114#ifdef CONFIG_SUPERH64
115 /* For text addresses, bit2 of the st_other field indicates
116 * whether the symbol is SHmedia (1) or SHcompact (0). If
117 * SHmedia, the LSB of the symbol needs to be asserted
118 * for the CPU to be in SHmedia mode when it starts executing
119 * the branch target. */
120 relocation |= (sym->st_other & 4);
121#endif
122
105 switch (ELF32_R_TYPE(rel[i].r_info)) { 123 switch (ELF32_R_TYPE(rel[i].r_info)) {
106 case R_SH_DIR32: 124 case R_SH_DIR32:
107 COPY_UNALIGNED_WORD (*location, value, align); 125 COPY_UNALIGNED_WORD (*location, value, align);
108 value += relocation; 126 value += relocation;
109 COPY_UNALIGNED_WORD (value, *location, align); 127 COPY_UNALIGNED_WORD (value, *location, align);
110 break; 128 break;
111 case R_SH_REL32: 129 case R_SH_REL32:
112 relocation = (relocation - (Elf32_Addr) location); 130 relocation = (relocation - (Elf32_Addr) location);
113 COPY_UNALIGNED_WORD (*location, value, align); 131 COPY_UNALIGNED_WORD (*location, value, align);
114 value += relocation; 132 value += relocation;
115 COPY_UNALIGNED_WORD (value, *location, align); 133 COPY_UNALIGNED_WORD (value, *location, align);
134 break;
135 case R_SH_IMM_LOW16:
136 *location = (*location & ~0x3fffc00) |
137 ((relocation & 0xffff) << 10);
138 break;
139 case R_SH_IMM_MEDLOW16:
140 *location = (*location & ~0x3fffc00) |
141 (((relocation >> 16) & 0xffff) << 10);
142 break;
143 case R_SH_IMM_LOW16_PCREL:
144 relocation -= (Elf32_Addr) location;
145 *location = (*location & ~0x3fffc00) |
146 ((relocation & 0xffff) << 10);
147 break;
148 case R_SH_IMM_MEDLOW16_PCREL:
149 relocation -= (Elf32_Addr) location;
150 *location = (*location & ~0x3fffc00) |
151 (((relocation >> 16) & 0xffff) << 10);
116 break; 152 break;
117 default: 153 default:
118 printk(KERN_ERR "module %s: Unknown relocation: %u\n", 154 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process_32.c
index 6d7f2b07e491..9ab1926b9d10 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process_32.c
@@ -230,34 +230,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
230 return fpvalid; 230 return fpvalid;
231} 231}
232 232
233/*
234 * Capture the user space registers if the task is not running (in user space)
235 */
236int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
237{
238 struct pt_regs ptregs;
239
240 ptregs = *task_pt_regs(tsk);
241 elf_core_copy_regs(regs, &ptregs);
242
243 return 1;
244}
245
246int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
247{
248 int fpvalid = 0;
249
250#if defined(CONFIG_SH_FPU)
251 fpvalid = !!tsk_used_math(tsk);
252 if (fpvalid) {
253 unlazy_fpu(tsk, task_pt_regs(tsk));
254 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
255 }
256#endif
257
258 return fpvalid;
259}
260
261asmlinkage void ret_from_fork(void); 233asmlinkage void ret_from_fork(void);
262 234
263int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 235int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
@@ -350,25 +322,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
350 unlazy_fpu(prev, task_pt_regs(prev)); 322 unlazy_fpu(prev, task_pt_regs(prev));
351#endif 323#endif
352 324
353#if defined(CONFIG_GUSA) && defined(CONFIG_PREEMPT)
354 {
355 struct pt_regs *regs;
356
357 preempt_disable();
358 regs = task_pt_regs(prev);
359 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
360 int offset = (int)regs->regs[15];
361
362 /* Reset stack pointer: clear critical region mark */
363 regs->regs[15] = regs->regs[1];
364 if (regs->pc < regs->regs[0])
365 /* Go to rewind point */
366 regs->pc = regs->regs[0] + offset;
367 }
368 preempt_enable_no_resched();
369 }
370#endif
371
372#ifdef CONFIG_MMU 325#ifdef CONFIG_MMU
373 /* 326 /*
374 * Restore the kernel mode register 327 * Restore the kernel mode register
@@ -510,49 +463,3 @@ asmlinkage void break_point_trap(void)
510 463
511 force_sig(SIGTRAP, current); 464 force_sig(SIGTRAP, current);
512} 465}
513
514/*
515 * Generic trap handler.
516 */
517asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
518 unsigned long r6, unsigned long r7,
519 struct pt_regs __regs)
520{
521 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
522
523 /* Rewind */
524 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
525
526 if (notify_die(DIE_TRAP, "debug trap", regs, 0, regs->tra & 0xff,
527 SIGTRAP) == NOTIFY_STOP)
528 return;
529
530 force_sig(SIGTRAP, current);
531}
532
533/*
534 * Special handler for BUG() traps.
535 */
536asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
537 unsigned long r6, unsigned long r7,
538 struct pt_regs __regs)
539{
540 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
541
542 /* Rewind */
543 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
544
545 if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
546 SIGTRAP) == NOTIFY_STOP)
547 return;
548
549#ifdef CONFIG_BUG
550 if (__kernel_text_address(instruction_pointer(regs))) {
551 u16 insn = *(u16 *)instruction_pointer(regs);
552 if (insn == TRAPA_BUG_OPCODE)
553 handle_BUG(regs);
554 }
555#endif
556
557 force_sig(SIGTRAP, current);
558}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
new file mode 100644
index 000000000000..cff3b7dc9c56
--- /dev/null
+++ b/arch/sh/kernel/process_64.c
@@ -0,0 +1,701 @@
1/*
2 * arch/sh/kernel/process_64.c
3 *
4 * This file handles the architecture-dependent parts of process handling..
5 *
6 * Copyright (C) 2000, 2001 Paolo Alberelli
7 * Copyright (C) 2003 - 2007 Paul Mundt
8 * Copyright (C) 2003, 2004 Richard Curnow
9 *
10 * Started from SH3/4 version:
11 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
12 *
13 * In turn started from i386 version:
14 * Copyright (C) 1995 Linus Torvalds
15 *
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive
18 * for more details.
19 */
20#include <linux/mm.h>
21#include <linux/fs.h>
22#include <linux/ptrace.h>
23#include <linux/reboot.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/proc_fs.h>
27#include <linux/io.h>
28#include <asm/uaccess.h>
29#include <asm/pgtable.h>
30#include <asm/mmu_context.h>
31
32struct task_struct *last_task_used_math = NULL;
33
34static int hlt_counter = 1;
35
36#define HARD_IDLE_TIMEOUT (HZ / 3)
37
38void disable_hlt(void)
39{
40 hlt_counter++;
41}
42
43void enable_hlt(void)
44{
45 hlt_counter--;
46}
47
48static int __init nohlt_setup(char *__unused)
49{
50 hlt_counter = 1;
51 return 1;
52}
53
54static int __init hlt_setup(char *__unused)
55{
56 hlt_counter = 0;
57 return 1;
58}
59
60__setup("nohlt", nohlt_setup);
61__setup("hlt", hlt_setup);
62
63static inline void hlt(void)
64{
65 __asm__ __volatile__ ("sleep" : : : "memory");
66}
67
68/*
69 * The idle loop on a uniprocessor SH..
70 */
71void cpu_idle(void)
72{
73 /* endless idle loop with no priority at all */
74 while (1) {
75 if (hlt_counter) {
76 while (!need_resched())
77 cpu_relax();
78 } else {
79 local_irq_disable();
80 while (!need_resched()) {
81 local_irq_enable();
82 hlt();
83 local_irq_disable();
84 }
85 local_irq_enable();
86 }
87 preempt_enable_no_resched();
88 schedule();
89 preempt_disable();
90 }
91
92}
93
94void machine_restart(char * __unused)
95{
96 extern void phys_stext(void);
97
98 phys_stext();
99}
100
101void machine_halt(void)
102{
103 for (;;);
104}
105
106void machine_power_off(void)
107{
108#if 0
109 /* Disable watchdog timer */
110 ctrl_outl(0xa5000000, WTCSR);
111 /* Configure deep standby on sleep */
112 ctrl_outl(0x03, STBCR);
113#endif
114
115 __asm__ __volatile__ (
116 "sleep\n\t"
117 "synci\n\t"
118 "nop;nop;nop;nop\n\t"
119 );
120
121 panic("Unexpected wakeup!\n");
122}
123
124void (*pm_power_off)(void) = machine_power_off;
125EXPORT_SYMBOL(pm_power_off);
126
127void show_regs(struct pt_regs * regs)
128{
129 unsigned long long ah, al, bh, bl, ch, cl;
130
131 printk("\n");
132
133 ah = (regs->pc) >> 32;
134 al = (regs->pc) & 0xffffffff;
135 bh = (regs->regs[18]) >> 32;
136 bl = (regs->regs[18]) & 0xffffffff;
137 ch = (regs->regs[15]) >> 32;
138 cl = (regs->regs[15]) & 0xffffffff;
139 printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
140 ah, al, bh, bl, ch, cl);
141
142 ah = (regs->sr) >> 32;
143 al = (regs->sr) & 0xffffffff;
144 asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
145 asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
146 bh = (bh) >> 32;
147 bl = (bl) & 0xffffffff;
148 asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
149 asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
150 ch = (ch) >> 32;
151 cl = (cl) & 0xffffffff;
152 printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
153 ah, al, bh, bl, ch, cl);
154
155 ah = (regs->regs[0]) >> 32;
156 al = (regs->regs[0]) & 0xffffffff;
157 bh = (regs->regs[1]) >> 32;
158 bl = (regs->regs[1]) & 0xffffffff;
159 ch = (regs->regs[2]) >> 32;
160 cl = (regs->regs[2]) & 0xffffffff;
161 printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
162 ah, al, bh, bl, ch, cl);
163
164 ah = (regs->regs[3]) >> 32;
165 al = (regs->regs[3]) & 0xffffffff;
166 bh = (regs->regs[4]) >> 32;
167 bl = (regs->regs[4]) & 0xffffffff;
168 ch = (regs->regs[5]) >> 32;
169 cl = (regs->regs[5]) & 0xffffffff;
170 printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
171 ah, al, bh, bl, ch, cl);
172
173 ah = (regs->regs[6]) >> 32;
174 al = (regs->regs[6]) & 0xffffffff;
175 bh = (regs->regs[7]) >> 32;
176 bl = (regs->regs[7]) & 0xffffffff;
177 ch = (regs->regs[8]) >> 32;
178 cl = (regs->regs[8]) & 0xffffffff;
179 printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
180 ah, al, bh, bl, ch, cl);
181
182 ah = (regs->regs[9]) >> 32;
183 al = (regs->regs[9]) & 0xffffffff;
184 bh = (regs->regs[10]) >> 32;
185 bl = (regs->regs[10]) & 0xffffffff;
186 ch = (regs->regs[11]) >> 32;
187 cl = (regs->regs[11]) & 0xffffffff;
188 printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
189 ah, al, bh, bl, ch, cl);
190
191 ah = (regs->regs[12]) >> 32;
192 al = (regs->regs[12]) & 0xffffffff;
193 bh = (regs->regs[13]) >> 32;
194 bl = (regs->regs[13]) & 0xffffffff;
195 ch = (regs->regs[14]) >> 32;
196 cl = (regs->regs[14]) & 0xffffffff;
197 printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
198 ah, al, bh, bl, ch, cl);
199
200 ah = (regs->regs[16]) >> 32;
201 al = (regs->regs[16]) & 0xffffffff;
202 bh = (regs->regs[17]) >> 32;
203 bl = (regs->regs[17]) & 0xffffffff;
204 ch = (regs->regs[19]) >> 32;
205 cl = (regs->regs[19]) & 0xffffffff;
206 printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
207 ah, al, bh, bl, ch, cl);
208
209 ah = (regs->regs[20]) >> 32;
210 al = (regs->regs[20]) & 0xffffffff;
211 bh = (regs->regs[21]) >> 32;
212 bl = (regs->regs[21]) & 0xffffffff;
213 ch = (regs->regs[22]) >> 32;
214 cl = (regs->regs[22]) & 0xffffffff;
215 printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
216 ah, al, bh, bl, ch, cl);
217
218 ah = (regs->regs[23]) >> 32;
219 al = (regs->regs[23]) & 0xffffffff;
220 bh = (regs->regs[24]) >> 32;
221 bl = (regs->regs[24]) & 0xffffffff;
222 ch = (regs->regs[25]) >> 32;
223 cl = (regs->regs[25]) & 0xffffffff;
224 printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
225 ah, al, bh, bl, ch, cl);
226
227 ah = (regs->regs[26]) >> 32;
228 al = (regs->regs[26]) & 0xffffffff;
229 bh = (regs->regs[27]) >> 32;
230 bl = (regs->regs[27]) & 0xffffffff;
231 ch = (regs->regs[28]) >> 32;
232 cl = (regs->regs[28]) & 0xffffffff;
233 printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
234 ah, al, bh, bl, ch, cl);
235
236 ah = (regs->regs[29]) >> 32;
237 al = (regs->regs[29]) & 0xffffffff;
238 bh = (regs->regs[30]) >> 32;
239 bl = (regs->regs[30]) & 0xffffffff;
240 ch = (regs->regs[31]) >> 32;
241 cl = (regs->regs[31]) & 0xffffffff;
242 printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
243 ah, al, bh, bl, ch, cl);
244
245 ah = (regs->regs[32]) >> 32;
246 al = (regs->regs[32]) & 0xffffffff;
247 bh = (regs->regs[33]) >> 32;
248 bl = (regs->regs[33]) & 0xffffffff;
249 ch = (regs->regs[34]) >> 32;
250 cl = (regs->regs[34]) & 0xffffffff;
251 printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
252 ah, al, bh, bl, ch, cl);
253
254 ah = (regs->regs[35]) >> 32;
255 al = (regs->regs[35]) & 0xffffffff;
256 bh = (regs->regs[36]) >> 32;
257 bl = (regs->regs[36]) & 0xffffffff;
258 ch = (regs->regs[37]) >> 32;
259 cl = (regs->regs[37]) & 0xffffffff;
260 printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
261 ah, al, bh, bl, ch, cl);
262
263 ah = (regs->regs[38]) >> 32;
264 al = (regs->regs[38]) & 0xffffffff;
265 bh = (regs->regs[39]) >> 32;
266 bl = (regs->regs[39]) & 0xffffffff;
267 ch = (regs->regs[40]) >> 32;
268 cl = (regs->regs[40]) & 0xffffffff;
269 printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
270 ah, al, bh, bl, ch, cl);
271
272 ah = (regs->regs[41]) >> 32;
273 al = (regs->regs[41]) & 0xffffffff;
274 bh = (regs->regs[42]) >> 32;
275 bl = (regs->regs[42]) & 0xffffffff;
276 ch = (regs->regs[43]) >> 32;
277 cl = (regs->regs[43]) & 0xffffffff;
278 printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
279 ah, al, bh, bl, ch, cl);
280
281 ah = (regs->regs[44]) >> 32;
282 al = (regs->regs[44]) & 0xffffffff;
283 bh = (regs->regs[45]) >> 32;
284 bl = (regs->regs[45]) & 0xffffffff;
285 ch = (regs->regs[46]) >> 32;
286 cl = (regs->regs[46]) & 0xffffffff;
287 printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
288 ah, al, bh, bl, ch, cl);
289
290 ah = (regs->regs[47]) >> 32;
291 al = (regs->regs[47]) & 0xffffffff;
292 bh = (regs->regs[48]) >> 32;
293 bl = (regs->regs[48]) & 0xffffffff;
294 ch = (regs->regs[49]) >> 32;
295 cl = (regs->regs[49]) & 0xffffffff;
296 printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
297 ah, al, bh, bl, ch, cl);
298
299 ah = (regs->regs[50]) >> 32;
300 al = (regs->regs[50]) & 0xffffffff;
301 bh = (regs->regs[51]) >> 32;
302 bl = (regs->regs[51]) & 0xffffffff;
303 ch = (regs->regs[52]) >> 32;
304 cl = (regs->regs[52]) & 0xffffffff;
305 printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
306 ah, al, bh, bl, ch, cl);
307
308 ah = (regs->regs[53]) >> 32;
309 al = (regs->regs[53]) & 0xffffffff;
310 bh = (regs->regs[54]) >> 32;
311 bl = (regs->regs[54]) & 0xffffffff;
312 ch = (regs->regs[55]) >> 32;
313 cl = (regs->regs[55]) & 0xffffffff;
314 printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
315 ah, al, bh, bl, ch, cl);
316
317 ah = (regs->regs[56]) >> 32;
318 al = (regs->regs[56]) & 0xffffffff;
319 bh = (regs->regs[57]) >> 32;
320 bl = (regs->regs[57]) & 0xffffffff;
321 ch = (regs->regs[58]) >> 32;
322 cl = (regs->regs[58]) & 0xffffffff;
323 printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
324 ah, al, bh, bl, ch, cl);
325
326 ah = (regs->regs[59]) >> 32;
327 al = (regs->regs[59]) & 0xffffffff;
328 bh = (regs->regs[60]) >> 32;
329 bl = (regs->regs[60]) & 0xffffffff;
330 ch = (regs->regs[61]) >> 32;
331 cl = (regs->regs[61]) & 0xffffffff;
332 printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
333 ah, al, bh, bl, ch, cl);
334
335 ah = (regs->regs[62]) >> 32;
336 al = (regs->regs[62]) & 0xffffffff;
337 bh = (regs->tregs[0]) >> 32;
338 bl = (regs->tregs[0]) & 0xffffffff;
339 ch = (regs->tregs[1]) >> 32;
340 cl = (regs->tregs[1]) & 0xffffffff;
341 printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
342 ah, al, bh, bl, ch, cl);
343
344 ah = (regs->tregs[2]) >> 32;
345 al = (regs->tregs[2]) & 0xffffffff;
346 bh = (regs->tregs[3]) >> 32;
347 bl = (regs->tregs[3]) & 0xffffffff;
348 ch = (regs->tregs[4]) >> 32;
349 cl = (regs->tregs[4]) & 0xffffffff;
350 printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
351 ah, al, bh, bl, ch, cl);
352
353 ah = (regs->tregs[5]) >> 32;
354 al = (regs->tregs[5]) & 0xffffffff;
355 bh = (regs->tregs[6]) >> 32;
356 bl = (regs->tregs[6]) & 0xffffffff;
357 ch = (regs->tregs[7]) >> 32;
358 cl = (regs->tregs[7]) & 0xffffffff;
359 printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
360 ah, al, bh, bl, ch, cl);
361
362 /*
363 * If we're in kernel mode, dump the stack too..
364 */
365 if (!user_mode(regs)) {
366 void show_stack(struct task_struct *tsk, unsigned long *sp);
367 unsigned long sp = regs->regs[15] & 0xffffffff;
368 struct task_struct *tsk = get_current();
369
370 tsk->thread.kregs = regs;
371
372 show_stack(tsk, (unsigned long *)sp);
373 }
374}
375
376struct task_struct * alloc_task_struct(void)
377{
378 /* Get task descriptor pages */
379 return (struct task_struct *)
380 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
381}
382
383void free_task_struct(struct task_struct *p)
384{
385 free_pages((unsigned long) p, get_order(THREAD_SIZE));
386}
387
388/*
389 * Create a kernel thread
390 */
391ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
392{
393 do_exit(fn(arg));
394}
395
396/*
397 * This is the mechanism for creating a new kernel thread.
398 *
399 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
400 * who haven't done an "execve()") should use this: it will work within
401 * a system call from a "real" process, but the process memory space will
402 * not be freed until both the parent and the child have exited.
403 */
404int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
405{
406 struct pt_regs regs;
407
408 memset(&regs, 0, sizeof(regs));
409 regs.regs[2] = (unsigned long)arg;
410 regs.regs[3] = (unsigned long)fn;
411
412 regs.pc = (unsigned long)kernel_thread_helper;
413 regs.sr = (1 << 30);
414
415 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
416 &regs, 0, NULL, NULL);
417}
418
419/*
420 * Free current thread data structures etc..
421 */
422void exit_thread(void)
423{
424 /*
425 * See arch/sparc/kernel/process.c for the precedent for doing
426 * this -- RPC.
427 *
428 * The SH-5 FPU save/restore approach relies on
429 * last_task_used_math pointing to a live task_struct. When
430 * another task tries to use the FPU for the 1st time, the FPUDIS
431 * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
432 * existing FPU state to the FP regs field within
433 * last_task_used_math before re-loading the new task's FPU state
434 * (or initialising it if the FPU has been used before). So if
435 * last_task_used_math is stale, and its page has already been
436 * re-allocated for another use, the consequences are rather
437 * grim. Unless we null it here, there is no other path through
438 * which it would get safely nulled.
439 */
440#ifdef CONFIG_SH_FPU
441 if (last_task_used_math == current) {
442 last_task_used_math = NULL;
443 }
444#endif
445}
446
447void flush_thread(void)
448{
449
450 /* Called by fs/exec.c (flush_old_exec) to remove traces of a
451 * previously running executable. */
452#ifdef CONFIG_SH_FPU
453 if (last_task_used_math == current) {
454 last_task_used_math = NULL;
455 }
456 /* Force FPU state to be reinitialised after exec */
457 clear_used_math();
458#endif
459
460 /* if we are a kernel thread, about to change to user thread,
461 * update kreg
462 */
463 if(current->thread.kregs==&fake_swapper_regs) {
464 current->thread.kregs =
465 ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
466 current->thread.uregs = current->thread.kregs;
467 }
468}
469
470void release_thread(struct task_struct *dead_task)
471{
472 /* do nothing */
473}
474
475/* Fill in the fpu structure for a core dump.. */
476int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
477{
478#ifdef CONFIG_SH_FPU
479 int fpvalid;
480 struct task_struct *tsk = current;
481
482 fpvalid = !!tsk_used_math(tsk);
483 if (fpvalid) {
484 if (current == last_task_used_math) {
485 enable_fpu();
486 save_fpu(tsk, regs);
487 disable_fpu();
488 last_task_used_math = 0;
489 regs->sr |= SR_FD;
490 }
491
492 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
493 }
494
495 return fpvalid;
496#else
497 return 0; /* Task didn't use the fpu at all. */
498#endif
499}
500
501asmlinkage void ret_from_fork(void);
502
503int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
504 unsigned long unused,
505 struct task_struct *p, struct pt_regs *regs)
506{
507 struct pt_regs *childregs;
508 unsigned long long se; /* Sign extension */
509
510#ifdef CONFIG_SH_FPU
511 if(last_task_used_math == current) {
512 enable_fpu();
513 save_fpu(current, regs);
514 disable_fpu();
515 last_task_used_math = NULL;
516 regs->sr |= SR_FD;
517 }
518#endif
519 /* Copy from sh version */
520 childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
521
522 *childregs = *regs;
523
524 if (user_mode(regs)) {
525 childregs->regs[15] = usp;
526 p->thread.uregs = childregs;
527 } else {
528 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
529 }
530
531 childregs->regs[9] = 0; /* Set return value for child */
532 childregs->sr |= SR_FD; /* Invalidate FPU flag */
533
534 p->thread.sp = (unsigned long) childregs;
535 p->thread.pc = (unsigned long) ret_from_fork;
536
537 /*
538 * Sign extend the edited stack.
539 * Note that thread.pc and thread.pc will stay
540 * 32-bit wide and context switch must take care
541 * of NEFF sign extension.
542 */
543
544 se = childregs->regs[15];
545 se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
546 childregs->regs[15] = se;
547
548 return 0;
549}
550
551asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
552 unsigned long r4, unsigned long r5,
553 unsigned long r6, unsigned long r7,
554 struct pt_regs *pregs)
555{
556 return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
557}
558
559asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
560 unsigned long r4, unsigned long r5,
561 unsigned long r6, unsigned long r7,
562 struct pt_regs *pregs)
563{
564 if (!newsp)
565 newsp = pregs->regs[15];
566 return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
567}
568
569/*
570 * This is trivial, and on the face of it looks like it
571 * could equally well be done in user mode.
572 *
573 * Not so, for quite unobvious reasons - register pressure.
574 * In user mode vfork() cannot have a stack frame, and if
575 * done by calling the "clone()" system call directly, you
576 * do not have enough call-clobbered registers to hold all
577 * the information you need.
578 */
579asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
580 unsigned long r4, unsigned long r5,
581 unsigned long r6, unsigned long r7,
582 struct pt_regs *pregs)
583{
584 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
585}
586
587/*
588 * sys_execve() executes a new program.
589 */
590asmlinkage int sys_execve(char *ufilename, char **uargv,
591 char **uenvp, unsigned long r5,
592 unsigned long r6, unsigned long r7,
593 struct pt_regs *pregs)
594{
595 int error;
596 char *filename;
597
598 lock_kernel();
599 filename = getname((char __user *)ufilename);
600 error = PTR_ERR(filename);
601 if (IS_ERR(filename))
602 goto out;
603
604 error = do_execve(filename,
605 (char __user * __user *)uargv,
606 (char __user * __user *)uenvp,
607 pregs);
608 if (error == 0) {
609 task_lock(current);
610 current->ptrace &= ~PT_DTRACE;
611 task_unlock(current);
612 }
613 putname(filename);
614out:
615 unlock_kernel();
616 return error;
617}
618
619/*
620 * These bracket the sleeping functions..
621 */
622extern void interruptible_sleep_on(wait_queue_head_t *q);
623
624#define mid_sched ((unsigned long) interruptible_sleep_on)
625
626static int in_sh64_switch_to(unsigned long pc)
627{
628 extern char __sh64_switch_to_end;
629 /* For a sleeping task, the PC is somewhere in the middle of the function,
630 so we don't have to worry about masking the LSB off */
631 return (pc >= (unsigned long) sh64_switch_to) &&
632 (pc < (unsigned long) &__sh64_switch_to_end);
633}
634
635unsigned long get_wchan(struct task_struct *p)
636{
637 unsigned long schedule_fp;
638 unsigned long sh64_switch_to_fp;
639 unsigned long schedule_caller_pc;
640 unsigned long pc;
641
642 if (!p || p == current || p->state == TASK_RUNNING)
643 return 0;
644
645 /*
646 * The same comment as on the Alpha applies here, too ...
647 */
648 pc = thread_saved_pc(p);
649
650#ifdef CONFIG_FRAME_POINTER
651 if (in_sh64_switch_to(pc)) {
652 sh64_switch_to_fp = (long) p->thread.sp;
653 /* r14 is saved at offset 4 in the sh64_switch_to frame */
654 schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
655
656 /* and the caller of 'schedule' is (currently!) saved at offset 24
657 in the frame of schedule (from disasm) */
658 schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
659 return schedule_caller_pc;
660 }
661#endif
662 return pc;
663}
664
665/* Provide a /proc/asids file that lists out the
666 ASIDs currently associated with the processes. (If the DM.PC register is
667 examined through the debug link, this shows ASID + PC. To make use of this,
668 the PID->ASID relationship needs to be known. This is primarily for
669 debugging.)
670 */
671
672#if defined(CONFIG_SH64_PROC_ASIDS)
673static int
674asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
675{
676 int len=0;
677 struct task_struct *p;
678 read_lock(&tasklist_lock);
679 for_each_process(p) {
680 int pid = p->pid;
681
682 if (!pid)
683 continue;
684 if (p->mm)
685 len += sprintf(buf+len, "%5d : %02lx\n", pid,
686 asid_cache(smp_processor_id()));
687 else
688 len += sprintf(buf+len, "%5d : (none)\n", pid);
689 }
690 read_unlock(&tasklist_lock);
691 *eof = 1;
692 return len;
693}
694
695static int __init register_proc_asids(void)
696{
697 create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
698 return 0;
699}
700__initcall(register_proc_asids);
701#endif
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace_32.c
index ac725f0aeb72..ce0664a58b49 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -6,7 +6,7 @@
6 * edited by Linus Torvalds 6 * edited by Linus Torvalds
7 * 7 *
8 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 8 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
9 * 9 * Audit support: Yuichi Nakamura <ynakam@hitachisoft.jp>
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
@@ -19,6 +19,7 @@
19#include <linux/security.h> 19#include <linux/security.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/audit.h>
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23#include <asm/pgtable.h> 24#include <asm/pgtable.h>
24#include <asm/system.h> 25#include <asm/system.h>
@@ -248,15 +249,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
248 return ret; 249 return ret;
249} 250}
250 251
251asmlinkage void do_syscall_trace(void) 252asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
252{ 253{
253 struct task_struct *tsk = current; 254 struct task_struct *tsk = current;
254 255
256 if (unlikely(current->audit_context) && entryexit)
257 audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
258 regs->regs[0]);
259
255 if (!test_thread_flag(TIF_SYSCALL_TRACE) && 260 if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
256 !test_thread_flag(TIF_SINGLESTEP)) 261 !test_thread_flag(TIF_SINGLESTEP))
257 return; 262 goto out;
258 if (!(tsk->ptrace & PT_PTRACED)) 263 if (!(tsk->ptrace & PT_PTRACED))
259 return; 264 goto out;
265
260 /* the 0x80 provides a way for the tracing parent to distinguish 266 /* the 0x80 provides a way for the tracing parent to distinguish
261 between a syscall stop and SIGTRAP delivery */ 267 between a syscall stop and SIGTRAP delivery */
262 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) && 268 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
@@ -271,4 +277,11 @@ asmlinkage void do_syscall_trace(void)
271 send_sig(tsk->exit_code, tsk, 1); 277 send_sig(tsk->exit_code, tsk, 1);
272 tsk->exit_code = 0; 278 tsk->exit_code = 0;
273 } 279 }
280
281out:
282 if (unlikely(current->audit_context) && !entryexit)
283 audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[3],
284 regs->regs[4], regs->regs[5],
285 regs->regs[6], regs->regs[7]);
286
274} 287}
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
new file mode 100644
index 000000000000..f6fbdfa6876d
--- /dev/null
+++ b/arch/sh/kernel/ptrace_64.c
@@ -0,0 +1,341 @@
1/*
2 * arch/sh/kernel/ptrace_64.c
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 - 2007 Paul Mundt
6 *
7 * Started from SH3/4 version:
8 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
9 *
10 * Original x86 implementation:
11 * By Ross Biro 1/23/92
12 * edited by Linus Torvalds
13 *
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
17 */
18#include <linux/kernel.h>
19#include <linux/rwsem.h>
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/errno.h>
25#include <linux/ptrace.h>
26#include <linux/user.h>
27#include <linux/signal.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
30#include <asm/io.h>
31#include <asm/uaccess.h>
32#include <asm/pgtable.h>
33#include <asm/system.h>
34#include <asm/processor.h>
35#include <asm/mmu_context.h>
36
37/* This mask defines the bits of the SR which the user is not allowed to
38 change, which are everything except S, Q, M, PR, SZ, FR. */
39#define SR_MASK (0xffff8cfd)
40
41/*
42 * does not yet catch signals sent when the child dies.
43 * in exit.c or in signal.c.
44 */
45
46/*
47 * This routine will get a word from the user area in the process kernel stack.
48 */
49static inline int get_stack_long(struct task_struct *task, int offset)
50{
51 unsigned char *stack;
52
53 stack = (unsigned char *)(task->thread.uregs);
54 stack += offset;
55 return (*((int *)stack));
56}
57
58static inline unsigned long
59get_fpu_long(struct task_struct *task, unsigned long addr)
60{
61 unsigned long tmp;
62 struct pt_regs *regs;
63 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
64
65 if (!tsk_used_math(task)) {
66 if (addr == offsetof(struct user_fpu_struct, fpscr)) {
67 tmp = FPSCR_INIT;
68 } else {
69 tmp = 0xffffffffUL; /* matches initial value in fpu.c */
70 }
71 return tmp;
72 }
73
74 if (last_task_used_math == task) {
75 enable_fpu();
76 save_fpu(task, regs);
77 disable_fpu();
78 last_task_used_math = 0;
79 regs->sr |= SR_FD;
80 }
81
82 tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
83 return tmp;
84}
85
86/*
87 * This routine will put a word into the user area in the process kernel stack.
88 */
89static inline int put_stack_long(struct task_struct *task, int offset,
90 unsigned long data)
91{
92 unsigned char *stack;
93
94 stack = (unsigned char *)(task->thread.uregs);
95 stack += offset;
96 *(unsigned long *) stack = data;
97 return 0;
98}
99
100static inline int
101put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
102{
103 struct pt_regs *regs;
104
105 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
106
107 if (!tsk_used_math(task)) {
108 fpinit(&task->thread.fpu.hard);
109 set_stopped_child_used_math(task);
110 } else if (last_task_used_math == task) {
111 enable_fpu();
112 save_fpu(task, regs);
113 disable_fpu();
114 last_task_used_math = 0;
115 regs->sr |= SR_FD;
116 }
117
118 ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
119 return 0;
120}
121
122
123long arch_ptrace(struct task_struct *child, long request, long addr, long data)
124{
125 int ret;
126
127 switch (request) {
128 /* when I and D space are separate, these will need to be fixed. */
129 case PTRACE_PEEKTEXT: /* read word at location addr. */
130 case PTRACE_PEEKDATA:
131 ret = generic_ptrace_peekdata(child, addr, data);
132 break;
133
134 /* read the word at location addr in the USER area. */
135 case PTRACE_PEEKUSR: {
136 unsigned long tmp;
137
138 ret = -EIO;
139 if ((addr & 3) || addr < 0)
140 break;
141
142 if (addr < sizeof(struct pt_regs))
143 tmp = get_stack_long(child, addr);
144 else if ((addr >= offsetof(struct user, fpu)) &&
145 (addr < offsetof(struct user, u_fpvalid))) {
146 tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
147 } else if (addr == offsetof(struct user, u_fpvalid)) {
148 tmp = !!tsk_used_math(child);
149 } else {
150 break;
151 }
152 ret = put_user(tmp, (unsigned long *)data);
153 break;
154 }
155
156 /* when I and D space are separate, this will have to be fixed. */
157 case PTRACE_POKETEXT: /* write the word at location addr. */
158 case PTRACE_POKEDATA:
159 ret = generic_ptrace_pokedata(child, addr, data);
160 break;
161
162 case PTRACE_POKEUSR:
163 /* write the word at location addr in the USER area. We must
164 disallow any changes to certain SR bits or u_fpvalid, since
165 this could crash the kernel or result in a security
166 loophole. */
167 ret = -EIO;
168 if ((addr & 3) || addr < 0)
169 break;
170
171 if (addr < sizeof(struct pt_regs)) {
172 /* Ignore change of top 32 bits of SR */
173 if (addr == offsetof (struct pt_regs, sr)+4)
174 {
175 ret = 0;
176 break;
177 }
178 /* If lower 32 bits of SR, ignore non-user bits */
179 if (addr == offsetof (struct pt_regs, sr))
180 {
181 long cursr = get_stack_long(child, addr);
182 data &= ~(SR_MASK);
183 data |= (cursr & SR_MASK);
184 }
185 ret = put_stack_long(child, addr, data);
186 }
187 else if ((addr >= offsetof(struct user, fpu)) &&
188 (addr < offsetof(struct user, u_fpvalid))) {
189 ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
190 }
191 break;
192
193 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
194 case PTRACE_CONT: { /* restart after signal. */
195 ret = -EIO;
196 if (!valid_signal(data))
197 break;
198 if (request == PTRACE_SYSCALL)
199 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
200 else
201 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
202 child->exit_code = data;
203 wake_up_process(child);
204 ret = 0;
205 break;
206 }
207
208/*
209 * make the child exit. Best I can do is send it a sigkill.
210 * perhaps it should be put in the status that it wants to
211 * exit.
212 */
213 case PTRACE_KILL: {
214 ret = 0;
215 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
216 break;
217 child->exit_code = SIGKILL;
218 wake_up_process(child);
219 break;
220 }
221
222 case PTRACE_SINGLESTEP: { /* set the trap flag. */
223 struct pt_regs *regs;
224
225 ret = -EIO;
226 if (!valid_signal(data))
227 break;
228 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
229 if ((child->ptrace & PT_DTRACE) == 0) {
230 /* Spurious delayed TF traps may occur */
231 child->ptrace |= PT_DTRACE;
232 }
233
234 regs = child->thread.uregs;
235
236 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
237
238 child->exit_code = data;
239 /* give it a chance to run. */
240 wake_up_process(child);
241 ret = 0;
242 break;
243 }
244
245 default:
246 ret = ptrace_request(child, request, addr, data);
247 break;
248 }
249 return ret;
250}
251
252asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
253{
254#define WPC_DBRMODE 0x0d104008
255 static int first_call = 1;
256
257 lock_kernel();
258 if (first_call) {
259 /* Set WPC.DBRMODE to 0. This makes all debug events get
260 * delivered through RESVEC, i.e. into the handlers in entry.S.
261 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
262 * would normally be left set to 1, which makes debug events get
263 * delivered through DBRVEC, i.e. into the remote gdb's
264 * handlers. This prevents ptrace getting them, and confuses
265 * the remote gdb.) */
266 printk("DBRMODE set to 0 to permit native debugging\n");
267 poke_real_address_q(WPC_DBRMODE, 0);
268 first_call = 0;
269 }
270 unlock_kernel();
271
272 return sys_ptrace(request, pid, addr, data);
273}
274
275asmlinkage void syscall_trace(struct pt_regs *regs, int entryexit)
276{
277 struct task_struct *tsk = current;
278
279 if (unlikely(current->audit_context) && entryexit)
280 audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
281 regs->regs[9]);
282
283 if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
284 !test_thread_flag(TIF_SINGLESTEP))
285 goto out;
286 if (!(tsk->ptrace & PT_PTRACED))
287 goto out;
288
289 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
290 !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
291
292 /*
293 * this isn't the same as continuing with a signal, but it will do
294 * for normal use. strace only continues with a signal if the
295 * stopping signal is not SIGTRAP. -brl
296 */
297 if (tsk->exit_code) {
298 send_sig(tsk->exit_code, tsk, 1);
299 tsk->exit_code = 0;
300 }
301
302out:
303 if (unlikely(current->audit_context) && !entryexit)
304 audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[1],
305 regs->regs[2], regs->regs[3],
306 regs->regs[4], regs->regs[5]);
307}
308
309/* Called with interrupts disabled */
310asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
311{
312 /* This is called after a single step exception (DEBUGSS).
313 There is no need to change the PC, as it is a post-execution
314 exception, as entry.S does not do anything to the PC for DEBUGSS.
315 We need to clear the Single Step setting in SR to avoid
316 continually stepping. */
317 local_irq_enable();
318 regs->sr &= ~SR_SSTEP;
319 force_sig(SIGTRAP, current);
320}
321
322/* Called with interrupts disabled */
323asmlinkage void do_software_break_point(unsigned long long vec,
324 struct pt_regs *regs)
325{
326 /* We need to forward step the PC, to counteract the backstep done
327 in signal.c. */
328 local_irq_enable();
329 force_sig(SIGTRAP, current);
330 regs->pc += 4;
331}
332
333/*
334 * Called by kernel/ptrace.c when detaching..
335 *
336 * Make sure single step bits etc are not set.
337 */
338void ptrace_disable(struct task_struct *child)
339{
340 /* nothing to do.. */
341}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 4156aac8c27d..855cdf9d85b1 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -26,6 +26,7 @@
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/page.h> 28#include <asm/page.h>
29#include <asm/elf.h>
29#include <asm/sections.h> 30#include <asm/sections.h>
30#include <asm/irq.h> 31#include <asm/irq.h>
31#include <asm/setup.h> 32#include <asm/setup.h>
@@ -78,12 +79,25 @@ EXPORT_SYMBOL(memory_start);
78unsigned long memory_end = 0; 79unsigned long memory_end = 0;
79EXPORT_SYMBOL(memory_end); 80EXPORT_SYMBOL(memory_end);
80 81
82int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
83
81static int __init early_parse_mem(char *p) 84static int __init early_parse_mem(char *p)
82{ 85{
83 unsigned long size; 86 unsigned long size;
84 87
85 memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START; 88 memory_start = (unsigned long)__va(__MEMORY_START);
86 size = memparse(p, &p); 89 size = memparse(p, &p);
90
91 if (size > __MEMORY_SIZE) {
92 static char msg[] __initdata = KERN_ERR
93 "Using mem= to increase the size of kernel memory "
94 "is not allowed.\n"
95 " Recompile the kernel with the correct value for "
96 "CONFIG_MEMORY_SIZE.\n";
97 printk(msg);
98 return 0;
99 }
100
87 memory_end = memory_start + size; 101 memory_end = memory_start + size;
88 102
89 return 0; 103 return 0;
@@ -243,7 +257,7 @@ void __init setup_arch(char **cmdline_p)
243 data_resource.start = virt_to_phys(_etext); 257 data_resource.start = virt_to_phys(_etext);
244 data_resource.end = virt_to_phys(_edata)-1; 258 data_resource.end = virt_to_phys(_edata)-1;
245 259
246 memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START; 260 memory_start = (unsigned long)__va(__MEMORY_START);
247 if (!memory_end) 261 if (!memory_end)
248 memory_end = memory_start + __MEMORY_SIZE; 262 memory_end = memory_start + __MEMORY_SIZE;
249 263
@@ -294,20 +308,23 @@ void __init setup_arch(char **cmdline_p)
294} 308}
295 309
296static const char *cpu_name[] = { 310static const char *cpu_name[] = {
311 [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
297 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619", 312 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
298 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", 313 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
299 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708", 314 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
300 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710", 315 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
301 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720", 316 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
302 [CPU_SH7729] = "SH7729", [CPU_SH7750] = "SH7750", 317 [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
303 [CPU_SH7750S] = "SH7750S", [CPU_SH7750R] = "SH7750R", 318 [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
304 [CPU_SH7751] = "SH7751", [CPU_SH7751R] = "SH7751R", 319 [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
305 [CPU_SH7760] = "SH7760", 320 [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
306 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", 321 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
307 [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", 322 [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
308 [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", 323 [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
309 [CPU_SH7785] = "SH7785", [CPU_SH7722] = "SH7722", 324 [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
310 [CPU_SHX3] = "SH-X3", [CPU_SH_NONE] = "Unknown" 325 [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
326 [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
327 [CPU_SH_NONE] = "Unknown"
311}; 328};
312 329
313const char *get_cpu_subtype(struct sh_cpuinfo *c) 330const char *get_cpu_subtype(struct sh_cpuinfo *c)
@@ -410,7 +427,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
410static void c_stop(struct seq_file *m, void *v) 427static void c_stop(struct seq_file *m, void *v)
411{ 428{
412} 429}
413struct seq_operations cpuinfo_op = { 430const struct seq_operations cpuinfo_op = {
414 .start = c_start, 431 .start = c_start,
415 .next = c_next, 432 .next = c_next,
416 .stop = c_stop, 433 .stop = c_stop,
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms_32.c
index e1a6de9088b5..e1a6de9088b5 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
new file mode 100644
index 000000000000..8004c38d3d37
--- /dev/null
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -0,0 +1,55 @@
1/*
2 * arch/sh/kernel/sh_ksyms_64.c
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/rwsem.h>
11#include <linux/module.h>
12#include <linux/smp.h>
13#include <linux/user.h>
14#include <linux/elfcore.h>
15#include <linux/sched.h>
16#include <linux/in6.h>
17#include <linux/interrupt.h>
18#include <linux/screen_info.h>
19#include <asm/semaphore.h>
20#include <asm/processor.h>
21#include <asm/uaccess.h>
22#include <asm/checksum.h>
23#include <asm/io.h>
24#include <asm/delay.h>
25#include <asm/irq.h>
26
27extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
28
29/* platform dependent support */
30EXPORT_SYMBOL(dump_fpu);
31EXPORT_SYMBOL(kernel_thread);
32
33/* Networking helper routines. */
34EXPORT_SYMBOL(csum_partial_copy_nocheck);
35
36#ifdef CONFIG_VT
37EXPORT_SYMBOL(screen_info);
38#endif
39
40EXPORT_SYMBOL(__down);
41EXPORT_SYMBOL(__down_trylock);
42EXPORT_SYMBOL(__up);
43EXPORT_SYMBOL(__put_user_asm_l);
44EXPORT_SYMBOL(__get_user_asm_l);
45EXPORT_SYMBOL(__copy_user);
46EXPORT_SYMBOL(memcpy);
47EXPORT_SYMBOL(__udelay);
48EXPORT_SYMBOL(__ndelay);
49
50/* Ugh. These come in from libgcc.a at link time. */
51#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
52
53DECLARE_EXPORT(__sdivsi3);
54DECLARE_EXPORT(__muldi3);
55DECLARE_EXPORT(__udivsi3);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal_32.c
index ca754fd42437..f6b5fbfe75c4 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal_32.c
@@ -507,24 +507,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
507 ctrl_inw(regs->pc - 4)); 507 ctrl_inw(regs->pc - 4));
508 break; 508 break;
509 } 509 }
510#ifdef CONFIG_GUSA
511 } else {
512 /* gUSA handling */
513 preempt_disable();
514
515 if (regs->regs[15] >= 0xc0000000) {
516 int offset = (int)regs->regs[15];
517
518 /* Reset stack pointer: clear critical region mark */
519 regs->regs[15] = regs->regs[1];
520 if (regs->pc < regs->regs[0])
521 /* Go to rewind point #1 */
522 regs->pc = regs->regs[0] + offset -
523 instruction_size(ctrl_inw(regs->pc-4));
524 }
525
526 preempt_enable_no_resched();
527#endif
528 } 510 }
529 511
530 /* Set up the stack frame */ 512 /* Set up the stack frame */
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
new file mode 100644
index 000000000000..80bde19d445b
--- /dev/null
+++ b/arch/sh/kernel/signal_64.c
@@ -0,0 +1,751 @@
1/*
2 * arch/sh/kernel/signal_64.c
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Paul Mundt
6 * Copyright (C) 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/rwsem.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/kernel.h>
17#include <linux/signal.h>
18#include <linux/errno.h>
19#include <linux/wait.h>
20#include <linux/personality.h>
21#include <linux/freezer.h>
22#include <linux/ptrace.h>
23#include <linux/unistd.h>
24#include <linux/stddef.h>
25#include <asm/ucontext.h>
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/cacheflush.h>
29
30#define REG_RET 9
31#define REG_ARG1 2
32#define REG_ARG2 3
33#define REG_ARG3 4
34#define REG_SP 15
35#define REG_PR 18
36#define REF_REG_RET regs->regs[REG_RET]
37#define REF_REG_SP regs->regs[REG_SP]
38#define DEREF_REG_PR regs->regs[REG_PR]
39
40#define DEBUG_SIG 0
41
42#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
43
44asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
45
46/*
47 * Atomically swap in the new signal mask, and wait for a signal.
48 */
49
50asmlinkage int
51sys_sigsuspend(old_sigset_t mask,
52 unsigned long r3, unsigned long r4, unsigned long r5,
53 unsigned long r6, unsigned long r7,
54 struct pt_regs * regs)
55{
56 sigset_t saveset;
57
58 mask &= _BLOCKABLE;
59 spin_lock_irq(&current->sighand->siglock);
60 saveset = current->blocked;
61 siginitset(&current->blocked, mask);
62 recalc_sigpending();
63 spin_unlock_irq(&current->sighand->siglock);
64
65 REF_REG_RET = -EINTR;
66 while (1) {
67 current->state = TASK_INTERRUPTIBLE;
68 schedule();
69 regs->pc += 4; /* because sys_sigreturn decrements the pc */
70 if (do_signal(regs, &saveset)) {
71 /* pc now points at signal handler. Need to decrement
72 it because entry.S will increment it. */
73 regs->pc -= 4;
74 return -EINTR;
75 }
76 }
77}
78
79asmlinkage int
80sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
81 unsigned long r4, unsigned long r5, unsigned long r6,
82 unsigned long r7,
83 struct pt_regs * regs)
84{
85 sigset_t saveset, newset;
86
87 /* XXX: Don't preclude handling different sized sigset_t's. */
88 if (sigsetsize != sizeof(sigset_t))
89 return -EINVAL;
90
91 if (copy_from_user(&newset, unewset, sizeof(newset)))
92 return -EFAULT;
93 sigdelsetmask(&newset, ~_BLOCKABLE);
94 spin_lock_irq(&current->sighand->siglock);
95 saveset = current->blocked;
96 current->blocked = newset;
97 recalc_sigpending();
98 spin_unlock_irq(&current->sighand->siglock);
99
100 REF_REG_RET = -EINTR;
101 while (1) {
102 current->state = TASK_INTERRUPTIBLE;
103 schedule();
104 regs->pc += 4; /* because sys_sigreturn decrements the pc */
105 if (do_signal(regs, &saveset)) {
106 /* pc now points at signal handler. Need to decrement
107 it because entry.S will increment it. */
108 regs->pc -= 4;
109 return -EINTR;
110 }
111 }
112}
113
114asmlinkage int
115sys_sigaction(int sig, const struct old_sigaction __user *act,
116 struct old_sigaction __user *oact)
117{
118 struct k_sigaction new_ka, old_ka;
119 int ret;
120
121 if (act) {
122 old_sigset_t mask;
123 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
124 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
125 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
126 return -EFAULT;
127 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
128 __get_user(mask, &act->sa_mask);
129 siginitset(&new_ka.sa.sa_mask, mask);
130 }
131
132 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
133
134 if (!ret && oact) {
135 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
136 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
137 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
138 return -EFAULT;
139 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
140 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
141 }
142
143 return ret;
144}
145
146asmlinkage int
147sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
148 unsigned long r4, unsigned long r5, unsigned long r6,
149 unsigned long r7,
150 struct pt_regs * regs)
151{
152 return do_sigaltstack(uss, uoss, REF_REG_SP);
153}
154
155
156/*
157 * Do a signal return; undo the signal stack.
158 */
159
160struct sigframe
161{
162 struct sigcontext sc;
163 unsigned long extramask[_NSIG_WORDS-1];
164 long long retcode[2];
165};
166
167struct rt_sigframe
168{
169 struct siginfo __user *pinfo;
170 void *puc;
171 struct siginfo info;
172 struct ucontext uc;
173 long long retcode[2];
174};
175
176#ifdef CONFIG_SH_FPU
177static inline int
178restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
179{
180 int err = 0;
181 int fpvalid;
182
183 err |= __get_user (fpvalid, &sc->sc_fpvalid);
184 conditional_used_math(fpvalid);
185 if (! fpvalid)
186 return err;
187
188 if (current == last_task_used_math) {
189 last_task_used_math = NULL;
190 regs->sr |= SR_FD;
191 }
192
193 err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
194 (sizeof(long long) * 32) + (sizeof(int) * 1));
195
196 return err;
197}
198
199static inline int
200setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
201{
202 int err = 0;
203 int fpvalid;
204
205 fpvalid = !!used_math();
206 err |= __put_user(fpvalid, &sc->sc_fpvalid);
207 if (! fpvalid)
208 return err;
209
210 if (current == last_task_used_math) {
211 enable_fpu();
212 save_fpu(current, regs);
213 disable_fpu();
214 last_task_used_math = NULL;
215 regs->sr |= SR_FD;
216 }
217
218 err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
219 (sizeof(long long) * 32) + (sizeof(int) * 1));
220 clear_used_math();
221
222 return err;
223}
224#else
225static inline int
226restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
227{
228 return 0;
229}
230static inline int
231setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
232{
233 return 0;
234}
235#endif
236
237static int
238restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
239{
240 unsigned int err = 0;
241 unsigned long long current_sr, new_sr;
242#define SR_MASK 0xffff8cfd
243
244#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
245
246 COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
247 COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
248 COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
249 COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
250 COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
251 COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
252 COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
253 COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
254 COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
255 COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
256 COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
257 COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
258 COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
259 COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
260 COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
261 COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
262 COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
263 COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
264
265 /* Prevent the signal handler manipulating SR in a way that can
266 crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
267 modified */
268 current_sr = regs->sr;
269 err |= __get_user(new_sr, &sc->sc_sr);
270 regs->sr &= SR_MASK;
271 regs->sr |= (new_sr & ~SR_MASK);
272
273 COPY(pc);
274
275#undef COPY
276
277 /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
278 * has been restored above.) */
279 err |= restore_sigcontext_fpu(regs, sc);
280
281 regs->syscall_nr = -1; /* disable syscall checks */
282 err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
283 return err;
284}
285
286asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
287 unsigned long r4, unsigned long r5,
288 unsigned long r6, unsigned long r7,
289 struct pt_regs * regs)
290{
291 struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
292 sigset_t set;
293 long long ret;
294
295 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
296 goto badframe;
297
298 if (__get_user(set.sig[0], &frame->sc.oldmask)
299 || (_NSIG_WORDS > 1
300 && __copy_from_user(&set.sig[1], &frame->extramask,
301 sizeof(frame->extramask))))
302 goto badframe;
303
304 sigdelsetmask(&set, ~_BLOCKABLE);
305
306 spin_lock_irq(&current->sighand->siglock);
307 current->blocked = set;
308 recalc_sigpending();
309 spin_unlock_irq(&current->sighand->siglock);
310
311 if (restore_sigcontext(regs, &frame->sc, &ret))
312 goto badframe;
313 regs->pc -= 4;
314
315 return (int) ret;
316
317badframe:
318 force_sig(SIGSEGV, current);
319 return 0;
320}
321
322asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
323 unsigned long r4, unsigned long r5,
324 unsigned long r6, unsigned long r7,
325 struct pt_regs * regs)
326{
327 struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
328 sigset_t set;
329 stack_t __user st;
330 long long ret;
331
332 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
333 goto badframe;
334
335 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
336 goto badframe;
337
338 sigdelsetmask(&set, ~_BLOCKABLE);
339 spin_lock_irq(&current->sighand->siglock);
340 current->blocked = set;
341 recalc_sigpending();
342 spin_unlock_irq(&current->sighand->siglock);
343
344 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
345 goto badframe;
346 regs->pc -= 4;
347
348 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
349 goto badframe;
350 /* It is more difficult to avoid calling this function than to
351 call it and ignore errors. */
352 do_sigaltstack(&st, NULL, REF_REG_SP);
353
354 return (int) ret;
355
356badframe:
357 force_sig(SIGSEGV, current);
358 return 0;
359}
360
361/*
362 * Set up a signal frame.
363 */
364
365static int
366setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
367 unsigned long mask)
368{
369 int err = 0;
370
371 /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
372 err |= setup_sigcontext_fpu(regs, sc);
373
374#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
375
376 COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
377 COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
378 COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
379 COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
380 COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
381 COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
382 COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
383 COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
384 COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
385 COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
386 COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
387 COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
388 COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
389 COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
390 COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
391 COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
392 COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
393 COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
394 COPY(sr); COPY(pc);
395
396#undef COPY
397
398 err |= __put_user(mask, &sc->oldmask);
399
400 return err;
401}
402
403/*
404 * Determine which stack to use..
405 */
406static inline void __user *
407get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
408{
409 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
410 sp = current->sas_ss_sp + current->sas_ss_size;
411
412 return (void __user *)((sp - frame_size) & -8ul);
413}
414
415void sa_default_restorer(void); /* See comments below */
416void sa_default_rt_restorer(void); /* See comments below */
417
418static void setup_frame(int sig, struct k_sigaction *ka,
419 sigset_t *set, struct pt_regs *regs)
420{
421 struct sigframe __user *frame;
422 int err = 0;
423 int signal;
424
425 frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
426
427 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
428 goto give_sigsegv;
429
430 signal = current_thread_info()->exec_domain
431 && current_thread_info()->exec_domain->signal_invmap
432 && sig < 32
433 ? current_thread_info()->exec_domain->signal_invmap[sig]
434 : sig;
435
436 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
437
438 /* Give up earlier as i386, in case */
439 if (err)
440 goto give_sigsegv;
441
442 if (_NSIG_WORDS > 1) {
443 err |= __copy_to_user(frame->extramask, &set->sig[1],
444 sizeof(frame->extramask)); }
445
446 /* Give up earlier as i386, in case */
447 if (err)
448 goto give_sigsegv;
449
450 /* Set up to return from userspace. If provided, use a stub
451 already in userspace. */
452 if (ka->sa.sa_flags & SA_RESTORER) {
453 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
454
455 /*
456 * On SH5 all edited pointers are subject to NEFF
457 */
458 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
459 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
460 } else {
461 /*
462 * Different approach on SH5.
463 * . Endianness independent asm code gets placed in entry.S .
464 * This is limited to four ASM instructions corresponding
465 * to two long longs in size.
466 * . err checking is done on the else branch only
467 * . flush_icache_range() is called upon __put_user() only
468 * . all edited pointers are subject to NEFF
469 * . being code, linker turns ShMedia bit on, always
470 * dereference index -1.
471 */
472 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
473 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
474 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
475
476 if (__copy_to_user(frame->retcode,
477 (unsigned long long)sa_default_restorer & (~1), 16) != 0)
478 goto give_sigsegv;
479
480 /* Cohere the trampoline with the I-cache. */
481 flush_cache_sigtramp(DEREF_REG_PR-1);
482 }
483
484 /*
485 * Set up registers for signal handler.
486 * All edited pointers are subject to NEFF.
487 */
488 regs->regs[REG_SP] = (unsigned long) frame;
489 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
490 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
491 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
492
493 /* FIXME:
494 The glibc profiling support for SH-5 needs to be passed a sigcontext
495 so it can retrieve the PC. At some point during 2003 the glibc
496 support was changed to receive the sigcontext through the 2nd
497 argument, but there are still versions of libc.so in use that use
498 the 3rd argument. Until libc.so is stabilised, pass the sigcontext
499 through both 2nd and 3rd arguments.
500 */
501
502 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
503 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
504
505 regs->pc = (unsigned long) ka->sa.sa_handler;
506 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
507
508 set_fs(USER_DS);
509
510#if DEBUG_SIG
511 /* Broken %016Lx */
512 printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
513 signal,
514 current->comm, current->pid, frame,
515 regs->pc >> 32, regs->pc & 0xffffffff,
516 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
517#endif
518
519 return;
520
521give_sigsegv:
522 force_sigsegv(sig, current);
523}
524
525static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
526 sigset_t *set, struct pt_regs *regs)
527{
528 struct rt_sigframe __user *frame;
529 int err = 0;
530 int signal;
531
532 frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame));
533
534 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
535 goto give_sigsegv;
536
537 signal = current_thread_info()->exec_domain
538 && current_thread_info()->exec_domain->signal_invmap
539 && sig < 32
540 ? current_thread_info()->exec_domain->signal_invmap[sig]
541 : sig;
542
543 err |= __put_user(&frame->info, &frame->pinfo);
544 err |= __put_user(&frame->uc, &frame->puc);
545 err |= copy_siginfo_to_user(&frame->info, info);
546
547 /* Give up earlier as i386, in case */
548 if (err)
549 goto give_sigsegv;
550
551 /* Create the ucontext. */
552 err |= __put_user(0, &frame->uc.uc_flags);
553 err |= __put_user(0, &frame->uc.uc_link);
554 err |= __put_user((void *)current->sas_ss_sp,
555 &frame->uc.uc_stack.ss_sp);
556 err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
557 &frame->uc.uc_stack.ss_flags);
558 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
559 err |= setup_sigcontext(&frame->uc.uc_mcontext,
560 regs, set->sig[0]);
561 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
562
563 /* Give up earlier as i386, in case */
564 if (err)
565 goto give_sigsegv;
566
567 /* Set up to return from userspace. If provided, use a stub
568 already in userspace. */
569 if (ka->sa.sa_flags & SA_RESTORER) {
570 DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
571
572 /*
573 * On SH5 all edited pointers are subject to NEFF
574 */
575 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
576 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
577 } else {
578 /*
579 * Different approach on SH5.
580 * . Endianness independent asm code gets placed in entry.S .
581 * This is limited to four ASM instructions corresponding
582 * to two long longs in size.
583 * . err checking is done on the else branch only
584 * . flush_icache_range() is called upon __put_user() only
585 * . all edited pointers are subject to NEFF
586 * . being code, linker turns ShMedia bit on, always
587 * dereference index -1.
588 */
589
590 DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
591 DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
592 (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
593
594 if (__copy_to_user(frame->retcode,
595 (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
596 goto give_sigsegv;
597
598 flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
599 }
600
601 /*
602 * Set up registers for signal handler.
603 * All edited pointers are subject to NEFF.
604 */
605 regs->regs[REG_SP] = (unsigned long) frame;
606 regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
607 (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
608 regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
609 regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
610 regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
611 regs->pc = (unsigned long) ka->sa.sa_handler;
612 regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
613
614 set_fs(USER_DS);
615
616#if DEBUG_SIG
617 /* Broken %016Lx */
618 printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
619 signal,
620 current->comm, current->pid, frame,
621 regs->pc >> 32, regs->pc & 0xffffffff,
622 DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
623#endif
624
625 return;
626
627give_sigsegv:
628 force_sigsegv(sig, current);
629}
630
631/*
632 * OK, we're invoking a handler
633 */
634
635static void
636handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
637 sigset_t *oldset, struct pt_regs * regs)
638{
639 /* Are we from a system call? */
640 if (regs->syscall_nr >= 0) {
641 /* If so, check system call restarting.. */
642 switch (regs->regs[REG_RET]) {
643 case -ERESTART_RESTARTBLOCK:
644 case -ERESTARTNOHAND:
645 regs->regs[REG_RET] = -EINTR;
646 break;
647
648 case -ERESTARTSYS:
649 if (!(ka->sa.sa_flags & SA_RESTART)) {
650 regs->regs[REG_RET] = -EINTR;
651 break;
652 }
653 /* fallthrough */
654 case -ERESTARTNOINTR:
655 /* Decode syscall # */
656 regs->regs[REG_RET] = regs->syscall_nr;
657 regs->pc -= 4;
658 }
659 }
660
661 /* Set up the stack frame */
662 if (ka->sa.sa_flags & SA_SIGINFO)
663 setup_rt_frame(sig, ka, info, oldset, regs);
664 else
665 setup_frame(sig, ka, oldset, regs);
666
667 spin_lock_irq(&current->sighand->siglock);
668 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
669 if (!(ka->sa.sa_flags & SA_NODEFER))
670 sigaddset(&current->blocked,sig);
671 recalc_sigpending();
672 spin_unlock_irq(&current->sighand->siglock);
673}
674
675/*
676 * Note that 'init' is a special process: it doesn't get signals it doesn't
677 * want to handle. Thus you cannot kill init even with a SIGKILL even by
678 * mistake.
679 *
680 * Note that we go through the signals twice: once to check the signals that
681 * the kernel can handle, and then we build all the user-level signal handling
682 * stack-frames in one go after that.
683 */
684int do_signal(struct pt_regs *regs, sigset_t *oldset)
685{
686 siginfo_t info;
687 int signr;
688 struct k_sigaction ka;
689
690 /*
691 * We want the common case to go fast, which
692 * is why we may in certain cases get here from
693 * kernel mode. Just return without doing anything
694 * if so.
695 */
696 if (!user_mode(regs))
697 return 1;
698
699 if (try_to_freeze())
700 goto no_signal;
701
702 if (test_thread_flag(TIF_RESTORE_SIGMASK))
703 oldset = &current->saved_sigmask;
704 else if (!oldset)
705 oldset = &current->blocked;
706
707 signr = get_signal_to_deliver(&info, &ka, regs, 0);
708
709 if (signr > 0) {
710 /* Whee! Actually deliver the signal. */
711 handle_signal(signr, &info, &ka, oldset, regs);
712
713 /*
714 * If a signal was successfully delivered, the saved sigmask
715 * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
716 * flag.
717 */
718 if (test_thread_flag(TIF_RESTORE_SIGMASK))
719 clear_thread_flag(TIF_RESTORE_SIGMASK);
720
721 return 1;
722 }
723
724no_signal:
725 /* Did we come from a system call? */
726 if (regs->syscall_nr >= 0) {
727 /* Restart the system call - no handlers present */
728 switch (regs->regs[REG_RET]) {
729 case -ERESTARTNOHAND:
730 case -ERESTARTSYS:
731 case -ERESTARTNOINTR:
732 /* Decode Syscall # */
733 regs->regs[REG_RET] = regs->syscall_nr;
734 regs->pc -= 4;
735 break;
736
737 case -ERESTART_RESTARTBLOCK:
738 regs->regs[REG_RET] = __NR_restart_syscall;
739 regs->pc -= 4;
740 break;
741 }
742 }
743
744 /* No signal to deliver -- put the saved sigmask back */
745 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
746 clear_thread_flag(TIF_RESTORE_SIGMASK);
747 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
748 }
749
750 return 0;
751}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index d545a686a201..59cd2859ce9b 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -7,7 +7,6 @@
7 * 7 *
8 * Taken from i386 version. 8 * Taken from i386 version.
9 */ 9 */
10
11#include <linux/errno.h> 10#include <linux/errno.h>
12#include <linux/sched.h> 11#include <linux/sched.h>
13#include <linux/mm.h> 12#include <linux/mm.h>
@@ -27,28 +26,7 @@
27#include <asm/uaccess.h> 26#include <asm/uaccess.h>
28#include <asm/unistd.h> 27#include <asm/unistd.h>
29 28
30/*
31 * sys_pipe() is the normal C calling standard for creating
32 * a pipe. It's not the way Unix traditionally does this, though.
33 */
34asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
35 unsigned long r6, unsigned long r7,
36 struct pt_regs __regs)
37{
38 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
39 int fd[2];
40 int error;
41
42 error = do_pipe(fd);
43 if (!error) {
44 regs->regs[1] = fd[1];
45 return fd[0];
46 }
47 return error;
48}
49
50unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 29unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
51
52EXPORT_SYMBOL(shm_align_mask); 30EXPORT_SYMBOL(shm_align_mask);
53 31
54#ifdef CONFIG_MMU 32#ifdef CONFIG_MMU
@@ -140,7 +118,7 @@ full_search:
140#endif /* CONFIG_MMU */ 118#endif /* CONFIG_MMU */
141 119
142static inline long 120static inline long
143do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 121do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
144 unsigned long flags, int fd, unsigned long pgoff) 122 unsigned long flags, int fd, unsigned long pgoff)
145{ 123{
146 int error = -EBADF; 124 int error = -EBADF;
@@ -195,12 +173,13 @@ asmlinkage int sys_ipc(uint call, int first, int second,
195 if (call <= SEMCTL) 173 if (call <= SEMCTL)
196 switch (call) { 174 switch (call) {
197 case SEMOP: 175 case SEMOP:
198 return sys_semtimedop(first, (struct sembuf __user *)ptr, 176 return sys_semtimedop(first,
177 (struct sembuf __user *)ptr,
199 second, NULL); 178 second, NULL);
200 case SEMTIMEDOP: 179 case SEMTIMEDOP:
201 return sys_semtimedop(first, (struct sembuf __user *)ptr, 180 return sys_semtimedop(first,
202 second, 181 (struct sembuf __user *)ptr, second,
203 (const struct timespec __user *)fifth); 182 (const struct timespec __user *)fifth);
204 case SEMGET: 183 case SEMGET:
205 return sys_semget (first, second, third); 184 return sys_semget (first, second, third);
206 case SEMCTL: { 185 case SEMCTL: {
@@ -215,25 +194,28 @@ asmlinkage int sys_ipc(uint call, int first, int second,
215 return -EINVAL; 194 return -EINVAL;
216 } 195 }
217 196
218 if (call <= MSGCTL) 197 if (call <= MSGCTL)
219 switch (call) { 198 switch (call) {
220 case MSGSND: 199 case MSGSND:
221 return sys_msgsnd (first, (struct msgbuf __user *) ptr, 200 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
222 second, third); 201 second, third);
223 case MSGRCV: 202 case MSGRCV:
224 switch (version) { 203 switch (version) {
225 case 0: { 204 case 0:
205 {
226 struct ipc_kludge tmp; 206 struct ipc_kludge tmp;
207
227 if (!ptr) 208 if (!ptr)
228 return -EINVAL; 209 return -EINVAL;
229 210
230 if (copy_from_user(&tmp, 211 if (copy_from_user(&tmp,
231 (struct ipc_kludge __user *) ptr, 212 (struct ipc_kludge __user *) ptr,
232 sizeof (tmp))) 213 sizeof (tmp)))
233 return -EFAULT; 214 return -EFAULT;
215
234 return sys_msgrcv (first, tmp.msgp, second, 216 return sys_msgrcv (first, tmp.msgp, second,
235 tmp.msgtyp, third); 217 tmp.msgtyp, third);
236 } 218 }
237 default: 219 default:
238 return sys_msgrcv (first, 220 return sys_msgrcv (first,
239 (struct msgbuf __user *) ptr, 221 (struct msgbuf __user *) ptr,
@@ -247,7 +229,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
247 default: 229 default:
248 return -EINVAL; 230 return -EINVAL;
249 } 231 }
250 if (call <= SHMCTL) 232 if (call <= SHMCTL)
251 switch (call) { 233 switch (call) {
252 case SHMAT: 234 case SHMAT:
253 switch (version) { 235 switch (version) {
@@ -265,7 +247,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
265 return do_shmat (first, (char __user *) ptr, 247 return do_shmat (first, (char __user *) ptr,
266 second, (ulong *) third); 248 second, (ulong *) third);
267 } 249 }
268 case SHMDT: 250 case SHMDT:
269 return sys_shmdt ((char __user *)ptr); 251 return sys_shmdt ((char __user *)ptr);
270 case SHMGET: 252 case SHMGET:
271 return sys_shmget (first, second, third); 253 return sys_shmget (first, second, third);
@@ -275,7 +257,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
275 default: 257 default:
276 return -EINVAL; 258 return -EINVAL;
277 } 259 }
278 260
279 return -EINVAL; 261 return -EINVAL;
280} 262}
281 263
@@ -289,49 +271,3 @@ asmlinkage int sys_uname(struct old_utsname * name)
289 up_read(&uts_sem); 271 up_read(&uts_sem);
290 return err?-EFAULT:0; 272 return err?-EFAULT:0;
291} 273}
292
293asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
294 size_t count, long dummy, loff_t pos)
295{
296 return sys_pread64(fd, buf, count, pos);
297}
298
299asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
300 size_t count, long dummy, loff_t pos)
301{
302 return sys_pwrite64(fd, buf, count, pos);
303}
304
305asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
306 u32 len0, u32 len1, int advice)
307{
308#ifdef __LITTLE_ENDIAN__
309 return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
310 (u64)len1 << 32 | len0, advice);
311#else
312 return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
313 (u64)len0 << 32 | len1, advice);
314#endif
315}
316
317#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
318#define SYSCALL_ARG3 "trapa #0x23"
319#else
320#define SYSCALL_ARG3 "trapa #0x13"
321#endif
322
323/*
324 * Do a system call from kernel instead of calling sys_execve so we
325 * end up with proper pt_regs.
326 */
327int kernel_execve(const char *filename, char *const argv[], char *const envp[])
328{
329 register long __sc0 __asm__ ("r3") = __NR_execve;
330 register long __sc4 __asm__ ("r4") = (long) filename;
331 register long __sc5 __asm__ ("r5") = (long) argv;
332 register long __sc6 __asm__ ("r6") = (long) envp;
333 __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
334 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
335 : "memory");
336 return __sc0;
337}
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
new file mode 100644
index 000000000000..125e493ead82
--- /dev/null
+++ b/arch/sh/kernel/sys_sh32.c
@@ -0,0 +1,84 @@
1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/sem.h>
6#include <linux/msg.h>
7#include <linux/shm.h>
8#include <linux/stat.h>
9#include <linux/syscalls.h>
10#include <linux/mman.h>
11#include <linux/file.h>
12#include <linux/utsname.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/ipc.h>
16#include <asm/cacheflush.h>
17#include <asm/uaccess.h>
18#include <asm/unistd.h>
19
20/*
21 * sys_pipe() is the normal C calling standard for creating
22 * a pipe. It's not the way Unix traditionally does this, though.
23 */
24asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
25 unsigned long r6, unsigned long r7,
26 struct pt_regs __regs)
27{
28 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
29 int fd[2];
30 int error;
31
32 error = do_pipe(fd);
33 if (!error) {
34 regs->regs[1] = fd[1];
35 return fd[0];
36 }
37 return error;
38}
39
40asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
41 size_t count, long dummy, loff_t pos)
42{
43 return sys_pread64(fd, buf, count, pos);
44}
45
46asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
47 size_t count, long dummy, loff_t pos)
48{
49 return sys_pwrite64(fd, buf, count, pos);
50}
51
52asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
53 u32 len0, u32 len1, int advice)
54{
55#ifdef __LITTLE_ENDIAN__
56 return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
57 (u64)len1 << 32 | len0, advice);
58#else
59 return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
60 (u64)len0 << 32 | len1, advice);
61#endif
62}
63
64#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
65#define SYSCALL_ARG3 "trapa #0x23"
66#else
67#define SYSCALL_ARG3 "trapa #0x13"
68#endif
69
70/*
71 * Do a system call from kernel instead of calling sys_execve so we
72 * end up with proper pt_regs.
73 */
74int kernel_execve(const char *filename, char *const argv[], char *const envp[])
75{
76 register long __sc0 __asm__ ("r3") = __NR_execve;
77 register long __sc4 __asm__ ("r4") = (long) filename;
78 register long __sc5 __asm__ ("r5") = (long) argv;
79 register long __sc6 __asm__ ("r6") = (long) envp;
80 __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
81 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
82 : "memory");
83 return __sc0;
84}
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
new file mode 100644
index 000000000000..578004d71e02
--- /dev/null
+++ b/arch/sh/kernel/sys_sh64.c
@@ -0,0 +1,66 @@
1/*
2 * arch/sh/kernel/sys_sh64.c
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 *
6 * This file contains various random system calls that
7 * have a non-standard calling sequence on the Linux/SH5
8 * platform.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/errno.h>
15#include <linux/rwsem.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/fs.h>
19#include <linux/smp.h>
20#include <linux/sem.h>
21#include <linux/msg.h>
22#include <linux/shm.h>
23#include <linux/stat.h>
24#include <linux/mman.h>
25#include <linux/file.h>
26#include <linux/utsname.h>
27#include <linux/syscalls.h>
28#include <linux/ipc.h>
29#include <asm/uaccess.h>
30#include <asm/ptrace.h>
31#include <asm/unistd.h>
32
33/*
34 * sys_pipe() is the normal C calling standard for creating
35 * a pipe. It's not the way Unix traditionally does this, though.
36 */
37asmlinkage int sys_pipe(unsigned long * fildes)
38{
39 int fd[2];
40 int error;
41
42 error = do_pipe(fd);
43 if (!error) {
44 if (copy_to_user(fildes, fd, 2*sizeof(int)))
45 error = -EFAULT;
46 }
47 return error;
48}
49
50/*
51 * Do a system call from kernel instead of calling sys_execve so we
52 * end up with proper pt_regs.
53 */
54int kernel_execve(const char *filename, char *const argv[], char *const envp[])
55{
56 register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
57 register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
58 register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
59 register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
60 __asm__ __volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)"
61 : "=r" (__sc0)
62 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
63 __asm__ __volatile__ ("!dummy %0 %1 %2 %3"
64 : : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
65 return __sc0;
66}
diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls_32.S
index 10bec45415ba..10bec45415ba 100644
--- a/arch/sh/kernel/syscalls.S
+++ b/arch/sh/kernel/syscalls_32.S
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
new file mode 100644
index 000000000000..98a93efe3691
--- /dev/null
+++ b/arch/sh/kernel/syscalls_64.S
@@ -0,0 +1,381 @@
1/*
2 * arch/sh/kernel/syscalls_64.S
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/sys.h>
14
15 .section .data, "aw"
16 .balign 32
17
18/*
19 * System calls jump table
20 */
21 .globl sys_call_table
22sys_call_table:
23 .long sys_restart_syscall /* 0 - old "setup()" system call */
24 .long sys_exit
25 .long sys_fork
26 .long sys_read
27 .long sys_write
28 .long sys_open /* 5 */
29 .long sys_close
30 .long sys_waitpid
31 .long sys_creat
32 .long sys_link
33 .long sys_unlink /* 10 */
34 .long sys_execve
35 .long sys_chdir
36 .long sys_time
37 .long sys_mknod
38 .long sys_chmod /* 15 */
39 .long sys_lchown16
40 .long sys_ni_syscall /* old break syscall holder */
41 .long sys_stat
42 .long sys_lseek
43 .long sys_getpid /* 20 */
44 .long sys_mount
45 .long sys_oldumount
46 .long sys_setuid16
47 .long sys_getuid16
48 .long sys_stime /* 25 */
49 .long sh64_ptrace
50 .long sys_alarm
51 .long sys_fstat
52 .long sys_pause
53 .long sys_utime /* 30 */
54 .long sys_ni_syscall /* old stty syscall holder */
55 .long sys_ni_syscall /* old gtty syscall holder */
56 .long sys_access
57 .long sys_nice
58 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
59 .long sys_sync
60 .long sys_kill
61 .long sys_rename
62 .long sys_mkdir
63 .long sys_rmdir /* 40 */
64 .long sys_dup
65 .long sys_pipe
66 .long sys_times
67 .long sys_ni_syscall /* old prof syscall holder */
68 .long sys_brk /* 45 */
69 .long sys_setgid16
70 .long sys_getgid16
71 .long sys_signal
72 .long sys_geteuid16
73 .long sys_getegid16 /* 50 */
74 .long sys_acct
75 .long sys_umount /* recycled never used phys( */
76 .long sys_ni_syscall /* old lock syscall holder */
77 .long sys_ioctl
78 .long sys_fcntl /* 55 */
79 .long sys_ni_syscall /* old mpx syscall holder */
80 .long sys_setpgid
81 .long sys_ni_syscall /* old ulimit syscall holder */
82 .long sys_ni_syscall /* sys_olduname */
83 .long sys_umask /* 60 */
84 .long sys_chroot
85 .long sys_ustat
86 .long sys_dup2
87 .long sys_getppid
88 .long sys_getpgrp /* 65 */
89 .long sys_setsid
90 .long sys_sigaction
91 .long sys_sgetmask
92 .long sys_ssetmask
93 .long sys_setreuid16 /* 70 */
94 .long sys_setregid16
95 .long sys_sigsuspend
96 .long sys_sigpending
97 .long sys_sethostname
98 .long sys_setrlimit /* 75 */
99 .long sys_old_getrlimit
100 .long sys_getrusage
101 .long sys_gettimeofday
102 .long sys_settimeofday
103 .long sys_getgroups16 /* 80 */
104 .long sys_setgroups16
105 .long sys_ni_syscall /* sys_oldselect */
106 .long sys_symlink
107 .long sys_lstat
108 .long sys_readlink /* 85 */
109 .long sys_uselib
110 .long sys_swapon
111 .long sys_reboot
112 .long old_readdir
113 .long old_mmap /* 90 */
114 .long sys_munmap
115 .long sys_truncate
116 .long sys_ftruncate
117 .long sys_fchmod
118 .long sys_fchown16 /* 95 */
119 .long sys_getpriority
120 .long sys_setpriority
121 .long sys_ni_syscall /* old profil syscall holder */
122 .long sys_statfs
123 .long sys_fstatfs /* 100 */
124 .long sys_ni_syscall /* ioperm */
125 .long sys_socketcall /* Obsolete implementation of socket syscall */
126 .long sys_syslog
127 .long sys_setitimer
128 .long sys_getitimer /* 105 */
129 .long sys_newstat
130 .long sys_newlstat
131 .long sys_newfstat
132 .long sys_uname
133 .long sys_ni_syscall /* 110 */ /* iopl */
134 .long sys_vhangup
135 .long sys_ni_syscall /* idle */
136 .long sys_ni_syscall /* vm86old */
137 .long sys_wait4
138 .long sys_swapoff /* 115 */
139 .long sys_sysinfo
140 .long sys_ipc /* Obsolete ipc syscall implementation */
141 .long sys_fsync
142 .long sys_sigreturn
143 .long sys_clone /* 120 */
144 .long sys_setdomainname
145 .long sys_newuname
146 .long sys_ni_syscall /* sys_modify_ldt */
147 .long sys_adjtimex
148 .long sys_mprotect /* 125 */
149 .long sys_sigprocmask
150 .long sys_ni_syscall /* old "create_module" */
151 .long sys_init_module
152 .long sys_delete_module
153 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
154 .long sys_quotactl
155 .long sys_getpgid
156 .long sys_fchdir
157 .long sys_bdflush
158 .long sys_sysfs /* 135 */
159 .long sys_personality
160 .long sys_ni_syscall /* for afs_syscall */
161 .long sys_setfsuid16
162 .long sys_setfsgid16
163 .long sys_llseek /* 140 */
164 .long sys_getdents
165 .long sys_select
166 .long sys_flock
167 .long sys_msync
168 .long sys_readv /* 145 */
169 .long sys_writev
170 .long sys_getsid
171 .long sys_fdatasync
172 .long sys_sysctl
173 .long sys_mlock /* 150 */
174 .long sys_munlock
175 .long sys_mlockall
176 .long sys_munlockall
177 .long sys_sched_setparam
178 .long sys_sched_getparam /* 155 */
179 .long sys_sched_setscheduler
180 .long sys_sched_getscheduler
181 .long sys_sched_yield
182 .long sys_sched_get_priority_max
183 .long sys_sched_get_priority_min /* 160 */
184 .long sys_sched_rr_get_interval
185 .long sys_nanosleep
186 .long sys_mremap
187 .long sys_setresuid16
188 .long sys_getresuid16 /* 165 */
189 .long sys_ni_syscall /* vm86 */
190 .long sys_ni_syscall /* old "query_module" */
191 .long sys_poll
192 .long sys_nfsservctl
193 .long sys_setresgid16 /* 170 */
194 .long sys_getresgid16
195 .long sys_prctl
196 .long sys_rt_sigreturn
197 .long sys_rt_sigaction
198 .long sys_rt_sigprocmask /* 175 */
199 .long sys_rt_sigpending
200 .long sys_rt_sigtimedwait
201 .long sys_rt_sigqueueinfo
202 .long sys_rt_sigsuspend
203 .long sys_pread64 /* 180 */
204 .long sys_pwrite64
205 .long sys_chown16
206 .long sys_getcwd
207 .long sys_capget
208 .long sys_capset /* 185 */
209 .long sys_sigaltstack
210 .long sys_sendfile
211 .long sys_ni_syscall /* streams1 */
212 .long sys_ni_syscall /* streams2 */
213 .long sys_vfork /* 190 */
214 .long sys_getrlimit
215 .long sys_mmap2
216 .long sys_truncate64
217 .long sys_ftruncate64
218 .long sys_stat64 /* 195 */
219 .long sys_lstat64
220 .long sys_fstat64
221 .long sys_lchown
222 .long sys_getuid
223 .long sys_getgid /* 200 */
224 .long sys_geteuid
225 .long sys_getegid
226 .long sys_setreuid
227 .long sys_setregid
228 .long sys_getgroups /* 205 */
229 .long sys_setgroups
230 .long sys_fchown
231 .long sys_setresuid
232 .long sys_getresuid
233 .long sys_setresgid /* 210 */
234 .long sys_getresgid
235 .long sys_chown
236 .long sys_setuid
237 .long sys_setgid
238 .long sys_setfsuid /* 215 */
239 .long sys_setfsgid
240 .long sys_pivot_root
241 .long sys_mincore
242 .long sys_madvise
243 /* Broken-out socket family (maintain backwards compatibility in syscall
244 numbering with 2.4) */
245 .long sys_socket /* 220 */
246 .long sys_bind
247 .long sys_connect
248 .long sys_listen
249 .long sys_accept
250 .long sys_getsockname /* 225 */
251 .long sys_getpeername
252 .long sys_socketpair
253 .long sys_send
254 .long sys_sendto
255 .long sys_recv /* 230*/
256 .long sys_recvfrom
257 .long sys_shutdown
258 .long sys_setsockopt
259 .long sys_getsockopt
260 .long sys_sendmsg /* 235 */
261 .long sys_recvmsg
262 /* Broken-out IPC family (maintain backwards compatibility in syscall
263 numbering with 2.4) */
264 .long sys_semop
265 .long sys_semget
266 .long sys_semctl
267 .long sys_msgsnd /* 240 */
268 .long sys_msgrcv
269 .long sys_msgget
270 .long sys_msgctl
271 .long sys_shmat
272 .long sys_shmdt /* 245 */
273 .long sys_shmget
274 .long sys_shmctl
275 /* Rest of syscalls listed in 2.4 i386 unistd.h */
276 .long sys_getdents64
277 .long sys_fcntl64
278 .long sys_ni_syscall /* 250 reserved for TUX */
279 .long sys_ni_syscall /* Reserved for Security */
280 .long sys_gettid
281 .long sys_readahead
282 .long sys_setxattr
283 .long sys_lsetxattr /* 255 */
284 .long sys_fsetxattr
285 .long sys_getxattr
286 .long sys_lgetxattr
287 .long sys_fgetxattr
288 .long sys_listxattr /* 260 */
289 .long sys_llistxattr
290 .long sys_flistxattr
291 .long sys_removexattr
292 .long sys_lremovexattr
293 .long sys_fremovexattr /* 265 */
294 .long sys_tkill
295 .long sys_sendfile64
296 .long sys_futex
297 .long sys_sched_setaffinity
298 .long sys_sched_getaffinity /* 270 */
299 .long sys_ni_syscall
300 .long sys_ni_syscall
301 .long sys_io_setup
302 .long sys_io_destroy
303 .long sys_io_getevents /* 275 */
304 .long sys_io_submit
305 .long sys_io_cancel
306 .long sys_fadvise64
307 .long sys_ni_syscall
308 .long sys_exit_group /* 280 */
309 /* Rest of new 2.6 syscalls */
310 .long sys_lookup_dcookie
311 .long sys_epoll_create
312 .long sys_epoll_ctl
313 .long sys_epoll_wait
314 .long sys_remap_file_pages /* 285 */
315 .long sys_set_tid_address
316 .long sys_timer_create
317 .long sys_timer_settime
318 .long sys_timer_gettime
319 .long sys_timer_getoverrun /* 290 */
320 .long sys_timer_delete
321 .long sys_clock_settime
322 .long sys_clock_gettime
323 .long sys_clock_getres
324 .long sys_clock_nanosleep /* 295 */
325 .long sys_statfs64
326 .long sys_fstatfs64
327 .long sys_tgkill
328 .long sys_utimes
329 .long sys_fadvise64_64 /* 300 */
330 .long sys_ni_syscall /* Reserved for vserver */
331 .long sys_ni_syscall /* Reserved for mbind */
332 .long sys_ni_syscall /* get_mempolicy */
333 .long sys_ni_syscall /* set_mempolicy */
334 .long sys_mq_open /* 305 */
335 .long sys_mq_unlink
336 .long sys_mq_timedsend
337 .long sys_mq_timedreceive
338 .long sys_mq_notify
339 .long sys_mq_getsetattr /* 310 */
340 .long sys_ni_syscall /* Reserved for kexec */
341 .long sys_waitid
342 .long sys_add_key
343 .long sys_request_key
344 .long sys_keyctl /* 315 */
345 .long sys_ioprio_set
346 .long sys_ioprio_get
347 .long sys_inotify_init
348 .long sys_inotify_add_watch
349 .long sys_inotify_rm_watch /* 320 */
350 .long sys_ni_syscall
351 .long sys_migrate_pages
352 .long sys_openat
353 .long sys_mkdirat
354 .long sys_mknodat /* 325 */
355 .long sys_fchownat
356 .long sys_futimesat
357 .long sys_fstatat64
358 .long sys_unlinkat
359 .long sys_renameat /* 330 */
360 .long sys_linkat
361 .long sys_symlinkat
362 .long sys_readlinkat
363 .long sys_fchmodat
364 .long sys_faccessat /* 335 */
365 .long sys_pselect6
366 .long sys_ppoll
367 .long sys_unshare
368 .long sys_set_robust_list
369 .long sys_get_robust_list /* 340 */
370 .long sys_splice
371 .long sys_sync_file_range
372 .long sys_tee
373 .long sys_vmsplice
374 .long sys_move_pages /* 345 */
375 .long sys_getcpu
376 .long sys_epoll_pwait
377 .long sys_utimensat
378 .long sys_signalfd
379 .long sys_timerfd /* 350 */
380 .long sys_eventfd
381 .long sys_fallocate
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time_32.c
index 2bc04bfee738..2bc04bfee738 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time_32.c
diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c
new file mode 100644
index 000000000000..f819ba38a6ce
--- /dev/null
+++ b/arch/sh/kernel/time_64.c
@@ -0,0 +1,519 @@
1/*
2 * arch/sh/kernel/time_64.c
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 - 2007 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
7 *
8 * Original TMU/RTC code taken from sh version.
9 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
10 * Some code taken from i386 version.
11 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/errno.h>
18#include <linux/rwsem.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/param.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/interrupt.h>
25#include <linux/time.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/profile.h>
29#include <linux/smp.h>
30#include <linux/module.h>
31#include <linux/bcd.h>
32#include <linux/timex.h>
33#include <linux/irq.h>
34#include <linux/io.h>
35#include <linux/platform_device.h>
36#include <asm/cpu/registers.h> /* required by inline __asm__ stmt. */
37#include <asm/cpu/irq.h>
38#include <asm/addrspace.h>
39#include <asm/processor.h>
40#include <asm/uaccess.h>
41#include <asm/delay.h>
42
43#define TMU_TOCR_INIT 0x00
44#define TMU0_TCR_INIT 0x0020
45#define TMU_TSTR_INIT 1
46#define TMU_TSTR_OFF 0
47
48/* Real Time Clock */
49#define RTC_BLOCK_OFF 0x01040000
50#define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
51#define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */
52#define RTC_RCR1 (rtc_base + 0x38)
53
54/* Clock, Power and Reset Controller */
55#define CPRC_BLOCK_OFF 0x01010000
56#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
57
58#define FRQCR (cprc_base+0x0)
59#define WTCSR (cprc_base+0x0018)
60#define STBCR (cprc_base+0x0030)
61
62/* Time Management Unit */
63#define TMU_BLOCK_OFF 0x01020000
64#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
65#define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
66#define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
67#define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
68
69#define TMU_TOCR tmu_base+0x0 /* Byte access */
70#define TMU_TSTR tmu_base+0x4 /* Byte access */
71
72#define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
73#define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
74#define TMU0_TCR TMU0_BASE+0x8 /* Word access */
75
76#define TICK_SIZE (tick_nsec / 1000)
77
78static unsigned long tmu_base, rtc_base;
79unsigned long cprc_base;
80
81/* Variables to allow interpolation of time of day to resolution better than a
82 * jiffy. */
83
84/* This is effectively protected by xtime_lock */
85static unsigned long ctc_last_interrupt;
86static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
87
88#define CTC_JIFFY_SCALE_SHIFT 40
89
90/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
91static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
92
93/* Estimate number of microseconds that have elapsed since the last timer tick,
94 by scaling the delta that has occurred in the CTC register.
95
96 WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
97 the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
98 in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
99 probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
100 sleeping, though will be coarser.
101
102 FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
103 is running or if the freq or tick arguments of adjtimex are modified after
104 we have calibrated the scaling factor? This will result in either a jump at
105 the end of a tick period, or a wrap backwards at the start of the next one,
106 if the application is reading the time of day often enough. I think we
107 ought to do better than this. For this reason, usecs_per_jiffy is left
108 separated out in the calculation below. This allows some future hook into
109 the adjtime-related stuff in kernel/timer.c to remove this hazard.
110
111*/
112
113static unsigned long usecs_since_tick(void)
114{
115 unsigned long long current_ctc;
116 long ctc_ticks_since_interrupt;
117 unsigned long long ull_ctc_ticks_since_interrupt;
118 unsigned long result;
119
120 unsigned long long mul1_out;
121 unsigned long long mul1_out_high;
122 unsigned long long mul2_out_low, mul2_out_high;
123
124 /* Read CTC register */
125 asm ("getcon cr62, %0" : "=r" (current_ctc));
126 /* Note, the CTC counts down on each CPU clock, not up.
127 Note(2), use long type to get correct wraparound arithmetic when
128 the counter crosses zero. */
129 ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
130 ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
131
132 /* Inline assembly to do 32x32x32->64 multiplier */
133 asm volatile ("mulu.l %1, %2, %0" :
134 "=r" (mul1_out) :
135 "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
136
137 mul1_out_high = mul1_out >> 32;
138
139 asm volatile ("mulu.l %1, %2, %0" :
140 "=r" (mul2_out_low) :
141 "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
142
143#if 1
144 asm volatile ("mulu.l %1, %2, %0" :
145 "=r" (mul2_out_high) :
146 "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
147#endif
148
149 result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
150
151 return result;
152}
153
154void do_gettimeofday(struct timeval *tv)
155{
156 unsigned long flags;
157 unsigned long seq;
158 unsigned long usec, sec;
159
160 do {
161 seq = read_seqbegin_irqsave(&xtime_lock, flags);
162 usec = usecs_since_tick();
163 sec = xtime.tv_sec;
164 usec += xtime.tv_nsec / 1000;
165 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
166
167 while (usec >= 1000000) {
168 usec -= 1000000;
169 sec++;
170 }
171
172 tv->tv_sec = sec;
173 tv->tv_usec = usec;
174}
175
176int do_settimeofday(struct timespec *tv)
177{
178 time_t wtm_sec, sec = tv->tv_sec;
179 long wtm_nsec, nsec = tv->tv_nsec;
180
181 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
182 return -EINVAL;
183
184 write_seqlock_irq(&xtime_lock);
185 /*
186 * This is revolting. We need to set "xtime" correctly. However, the
187 * value in this location is the value at the most recent update of
188 * wall time. Discover what correction gettimeofday() would have
189 * made, and then undo it!
190 */
191 nsec -= 1000 * usecs_since_tick();
192
193 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
194 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
195
196 set_normalized_timespec(&xtime, sec, nsec);
197 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
198
199 ntp_clear();
200 write_sequnlock_irq(&xtime_lock);
201 clock_was_set();
202
203 return 0;
204}
205EXPORT_SYMBOL(do_settimeofday);
206
207/* Dummy RTC ops */
208static void null_rtc_get_time(struct timespec *tv)
209{
210 tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
211 tv->tv_nsec = 0;
212}
213
214static int null_rtc_set_time(const time_t secs)
215{
216 return 0;
217}
218
219void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
220int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
221
222/* last time the RTC clock got updated */
223static long last_rtc_update;
224
225/*
226 * timer_interrupt() needs to keep up the real-time clock,
227 * as well as call the "do_timer()" routine every clocktick
228 */
229static inline void do_timer_interrupt(void)
230{
231 unsigned long long current_ctc;
232 asm ("getcon cr62, %0" : "=r" (current_ctc));
233 ctc_last_interrupt = (unsigned long) current_ctc;
234
235 do_timer(1);
236#ifndef CONFIG_SMP
237 update_process_times(user_mode(get_irq_regs()));
238#endif
239 if (current->pid)
240 profile_tick(CPU_PROFILING);
241
242#ifdef CONFIG_HEARTBEAT
243 if (sh_mv.mv_heartbeat != NULL)
244 sh_mv.mv_heartbeat();
245#endif
246
247 /*
248 * If we have an externally synchronized Linux clock, then update
249 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
250 * called as close as possible to 500 ms before the new second starts.
251 */
252 if (ntp_synced() &&
253 xtime.tv_sec > last_rtc_update + 660 &&
254 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
255 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
256 if (rtc_sh_set_time(xtime.tv_sec) == 0)
257 last_rtc_update = xtime.tv_sec;
258 else
259 /* do it again in 60 s */
260 last_rtc_update = xtime.tv_sec - 600;
261 }
262}
263
264/*
265 * This is the same as the above, except we _also_ save the current
266 * Time Stamp Counter value at the time of the timer interrupt, so that
267 * we later on can estimate the time of day more exactly.
268 */
269static irqreturn_t timer_interrupt(int irq, void *dev_id)
270{
271 unsigned long timer_status;
272
273 /* Clear UNF bit */
274 timer_status = ctrl_inw(TMU0_TCR);
275 timer_status &= ~0x100;
276 ctrl_outw(timer_status, TMU0_TCR);
277
278 /*
279 * Here we are in the timer irq handler. We just have irqs locally
280 * disabled but we don't know if the timer_bh is running on the other
281 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
282 * the irq version of write_lock because as just said we have irq
283 * locally disabled. -arca
284 */
285 write_lock(&xtime_lock);
286 do_timer_interrupt();
287 write_unlock(&xtime_lock);
288
289 return IRQ_HANDLED;
290}
291
292
293static __init unsigned int get_cpu_hz(void)
294{
295 unsigned int count;
296 unsigned long __dummy;
297 unsigned long ctc_val_init, ctc_val;
298
299 /*
300 ** Regardless the toolchain, force the compiler to use the
301 ** arbitrary register r3 as a clock tick counter.
302 ** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
303 */
304 register unsigned long long __rtc_irq_flag __asm__ ("r3");
305
306 local_irq_enable();
307 do {} while (ctrl_inb(rtc_base) != 0);
308 ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */
309
310 /*
311 * r3 is arbitrary. CDC does not support "=z".
312 */
313 ctc_val_init = 0xffffffff;
314 ctc_val = ctc_val_init;
315
316 asm volatile("gettr tr0, %1\n\t"
317 "putcon %0, " __CTC "\n\t"
318 "and %2, r63, %2\n\t"
319 "pta $+4, tr0\n\t"
320 "beq/l %2, r63, tr0\n\t"
321 "ptabs %1, tr0\n\t"
322 "getcon " __CTC ", %0\n\t"
323 : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
324 : "0" (0));
325 local_irq_disable();
326 /*
327 * SH-3:
328 * CPU clock = 4 stages * loop
329 * tst rm,rm if id ex
330 * bt/s 1b if id ex
331 * add #1,rd if id ex
332 * (if) pipe line stole
333 * tst rm,rm if id ex
334 * ....
335 *
336 *
337 * SH-4:
338 * CPU clock = 6 stages * loop
339 * I don't know why.
340 * ....
341 *
342 * SH-5:
343 * Use CTC register to count. This approach returns the right value
344 * even if the I-cache is disabled (e.g. whilst debugging.)
345 *
346 */
347
348 count = ctc_val_init - ctc_val; /* CTC counts down */
349
350 /*
351 * This really is count by the number of clock cycles
352 * by the ratio between a complete R64CNT
353 * wrap-around (128) and CUI interrupt being raised (64).
354 */
355 return count*2;
356}
357
358static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
359{
360 struct pt_regs *regs = get_irq_regs();
361
362 ctrl_outb(0, RTC_RCR1); /* Disable Carry Interrupts */
363 regs->regs[3] = 1; /* Using r3 */
364
365 return IRQ_HANDLED;
366}
367
368static struct irqaction irq0 = {
369 .handler = timer_interrupt,
370 .flags = IRQF_DISABLED,
371 .mask = CPU_MASK_NONE,
372 .name = "timer",
373};
374static struct irqaction irq1 = {
375 .handler = sh64_rtc_interrupt,
376 .flags = IRQF_DISABLED,
377 .mask = CPU_MASK_NONE,
378 .name = "rtc",
379};
380
381void __init time_init(void)
382{
383 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
384 unsigned long interval;
385 unsigned long frqcr, ifc, pfc;
386 static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
387#define bfc_table ifc_table /* Same */
388#define pfc_table ifc_table /* Same */
389
390 tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
391 if (!tmu_base) {
392 panic("Unable to remap TMU\n");
393 }
394
395 rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
396 if (!rtc_base) {
397 panic("Unable to remap RTC\n");
398 }
399
400 cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
401 if (!cprc_base) {
402 panic("Unable to remap CPRC\n");
403 }
404
405 rtc_sh_get_time(&xtime);
406
407 setup_irq(TIMER_IRQ, &irq0);
408 setup_irq(RTC_IRQ, &irq1);
409
410 /* Check how fast it is.. */
411 cpu_clock = get_cpu_hz();
412
413 /* Note careful order of operations to maintain reasonable precision and avoid overflow. */
414 scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
415
416 free_irq(RTC_IRQ, NULL);
417
418 printk("CPU clock: %d.%02dMHz\n",
419 (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
420 {
421 unsigned short bfc;
422 frqcr = ctrl_inl(FRQCR);
423 ifc = ifc_table[(frqcr>> 6) & 0x0007];
424 bfc = bfc_table[(frqcr>> 3) & 0x0007];
425 pfc = pfc_table[(frqcr>> 12) & 0x0007];
426 master_clock = cpu_clock * ifc;
427 bus_clock = master_clock/bfc;
428 }
429
430 printk("Bus clock: %d.%02dMHz\n",
431 (bus_clock/1000000), (bus_clock % 1000000)/10000);
432 module_clock = master_clock/pfc;
433 printk("Module clock: %d.%02dMHz\n",
434 (module_clock/1000000), (module_clock % 1000000)/10000);
435 interval = (module_clock/(HZ*4));
436
437 printk("Interval = %ld\n", interval);
438
439 current_cpu_data.cpu_clock = cpu_clock;
440 current_cpu_data.master_clock = master_clock;
441 current_cpu_data.bus_clock = bus_clock;
442 current_cpu_data.module_clock = module_clock;
443
444 /* Start TMU0 */
445 ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
446 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
447 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
448 ctrl_outl(interval, TMU0_TCOR);
449 ctrl_outl(interval, TMU0_TCNT);
450 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
451}
452
453void enter_deep_standby(void)
454{
455 /* Disable watchdog timer */
456 ctrl_outl(0xa5000000, WTCSR);
457 /* Configure deep standby on sleep */
458 ctrl_outl(0x03, STBCR);
459
460#ifdef CONFIG_SH_ALPHANUMERIC
461 {
462 extern void mach_alphanum(int position, unsigned char value);
463 extern void mach_alphanum_brightness(int setting);
464 char halted[] = "Halted. ";
465 int i;
466 mach_alphanum_brightness(6); /* dimmest setting above off */
467 for (i=0; i<8; i++) {
468 mach_alphanum(i, halted[i]);
469 }
470 asm __volatile__ ("synco");
471 }
472#endif
473
474 asm __volatile__ ("sleep");
475 asm __volatile__ ("synci");
476 asm __volatile__ ("nop");
477 asm __volatile__ ("nop");
478 asm __volatile__ ("nop");
479 asm __volatile__ ("nop");
480 panic("Unexpected wakeup!\n");
481}
482
483static struct resource rtc_resources[] = {
484 [0] = {
485 /* RTC base, filled in by rtc_init */
486 .flags = IORESOURCE_IO,
487 },
488 [1] = {
489 /* Period IRQ */
490 .start = IRQ_PRI,
491 .flags = IORESOURCE_IRQ,
492 },
493 [2] = {
494 /* Carry IRQ */
495 .start = IRQ_CUI,
496 .flags = IORESOURCE_IRQ,
497 },
498 [3] = {
499 /* Alarm IRQ */
500 .start = IRQ_ATI,
501 .flags = IORESOURCE_IRQ,
502 },
503};
504
505static struct platform_device rtc_device = {
506 .name = "sh-rtc",
507 .id = -1,
508 .num_resources = ARRAY_SIZE(rtc_resources),
509 .resource = rtc_resources,
510};
511
512static int __init rtc_init(void)
513{
514 rtc_resources[0].start = rtc_base;
515 rtc_resources[0].end = rtc_resources[0].start + 0x58 - 1;
516
517 return platform_device_register(&rtc_device);
518}
519device_initcall(rtc_init);
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
index 82de6895ade5..499e07beebe2 100644
--- a/arch/sh/kernel/timers/timer-cmt.c
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -31,7 +31,9 @@
31#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0) 31#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
32#define CMT_CMCSR_INIT 0x0040 32#define CMT_CMCSR_INIT 0x0040
33#define CMT_CMCSR_CALIB 0x0000 33#define CMT_CMCSR_CALIB 0x0000
34#elif defined(CONFIG_CPU_SUBTYPE_SH7206) 34#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \
35 defined(CONFIG_CPU_SUBTYPE_SH7206) || \
36 defined(CONFIG_CPU_SUBTYPE_SH7263)
35#define CMT_CMSTR 0xfffec000 37#define CMT_CMSTR 0xfffec000
36#define CMT_CMCSR_0 0xfffec002 38#define CMT_CMCSR_0 0xfffec002
37#define CMT_CMCNT_0 0xfffec004 39#define CMT_CMCNT_0 0xfffec004
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 628ec9a15e38..8935570008d2 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -174,6 +174,7 @@ static int tmu_timer_init(void)
174 tmu_timer_stop(); 174 tmu_timer_stop();
175 175
176#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \ 176#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
177 !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
177 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ 178 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
178 !defined(CONFIG_CPU_SUBTYPE_SH7785) && \ 179 !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
179 !defined(CONFIG_CPU_SUBTYPE_SHX3) 180 !defined(CONFIG_CPU_SUBTYPE_SHX3)
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index cf99111cb33f..a3bdc68ef02c 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -1,947 +1,68 @@
1/*
2 * 'traps.c' handles hardware traps and faults after we have saved some
3 * state in 'entry.S'.
4 *
5 * SuperH version: Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000 Philipp Rumpf
7 * Copyright (C) 2000 David Howells
8 * Copyright (C) 2002 - 2007 Paul Mundt
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/ptrace.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/kallsyms.h>
20#include <linux/io.h>
21#include <linux/bug.h> 1#include <linux/bug.h>
22#include <linux/debug_locks.h> 2#include <linux/io.h>
3#include <linux/types.h>
23#include <linux/kdebug.h> 4#include <linux/kdebug.h>
24#include <linux/kexec.h> 5#include <linux/signal.h>
25#include <linux/limits.h> 6#include <linux/sched.h>
26#include <asm/system.h> 7#include <asm/system.h>
27#include <asm/uaccess.h>
28
29#ifdef CONFIG_SH_KGDB
30#include <asm/kgdb.h>
31#define CHK_REMOTE_DEBUG(regs) \
32{ \
33 if (kgdb_debug_hook && !user_mode(regs))\
34 (*kgdb_debug_hook)(regs); \
35}
36#else
37#define CHK_REMOTE_DEBUG(regs)
38#endif
39
40#ifdef CONFIG_CPU_SH2
41# define TRAP_RESERVED_INST 4
42# define TRAP_ILLEGAL_SLOT_INST 6
43# define TRAP_ADDRESS_ERROR 9
44# ifdef CONFIG_CPU_SH2A
45# define TRAP_DIVZERO_ERROR 17
46# define TRAP_DIVOVF_ERROR 18
47# endif
48#else
49#define TRAP_RESERVED_INST 12
50#define TRAP_ILLEGAL_SLOT_INST 13
51#endif
52
53static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
54{
55 unsigned long p;
56 int i;
57
58 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
59
60 for (p = bottom & ~31; p < top; ) {
61 printk("%04lx: ", p & 0xffff);
62
63 for (i = 0; i < 8; i++, p += 4) {
64 unsigned int val;
65
66 if (p < bottom || p >= top)
67 printk(" ");
68 else {
69 if (__get_user(val, (unsigned int __user *)p)) {
70 printk("\n");
71 return;
72 }
73 printk("%08x ", val);
74 }
75 }
76 printk("\n");
77 }
78}
79
80static DEFINE_SPINLOCK(die_lock);
81
82void die(const char * str, struct pt_regs * regs, long err)
83{
84 static int die_counter;
85
86 oops_enter();
87
88 console_verbose();
89 spin_lock_irq(&die_lock);
90 bust_spinlocks(1);
91
92 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
93
94 CHK_REMOTE_DEBUG(regs);
95 print_modules();
96 show_regs(regs);
97
98 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
99 task_pid_nr(current), task_stack_page(current) + 1);
100
101 if (!user_mode(regs) || in_interrupt())
102 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
103 (unsigned long)task_stack_page(current));
104
105 bust_spinlocks(0);
106 add_taint(TAINT_DIE);
107 spin_unlock_irq(&die_lock);
108
109 if (kexec_should_crash(current))
110 crash_kexec(regs);
111
112 if (in_interrupt())
113 panic("Fatal exception in interrupt");
114
115 if (panic_on_oops)
116 panic("Fatal exception");
117
118 oops_exit();
119 do_exit(SIGSEGV);
120}
121
122static inline void die_if_kernel(const char *str, struct pt_regs *regs,
123 long err)
124{
125 if (!user_mode(regs))
126 die(str, regs, err);
127}
128
129/*
130 * try and fix up kernelspace address errors
131 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
132 * - kernel/userspace interfaces cause a jump to an appropriate handler
133 * - other kernel errors are bad
134 * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
135 */
136static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
137{
138 if (!user_mode(regs)) {
139 const struct exception_table_entry *fixup;
140 fixup = search_exception_tables(regs->pc);
141 if (fixup) {
142 regs->pc = fixup->fixup;
143 return 0;
144 }
145 die(str, regs, err);
146 }
147 return -EFAULT;
148}
149
150/*
151 * handle an instruction that does an unaligned memory access by emulating the
152 * desired behaviour
153 * - note that PC _may not_ point to the faulting instruction
154 * (if that instruction is in a branch delay slot)
155 * - return 0 if emulation okay, -EFAULT on existential error
156 */
157static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
158{
159 int ret, index, count;
160 unsigned long *rm, *rn;
161 unsigned char *src, *dst;
162
163 index = (instruction>>8)&15; /* 0x0F00 */
164 rn = &regs->regs[index];
165
166 index = (instruction>>4)&15; /* 0x00F0 */
167 rm = &regs->regs[index];
168
169 count = 1<<(instruction&3);
170
171 ret = -EFAULT;
172 switch (instruction>>12) {
173 case 0: /* mov.[bwl] to/from memory via r0+rn */
174 if (instruction & 8) {
175 /* from memory */
176 src = (unsigned char*) *rm;
177 src += regs->regs[0];
178 dst = (unsigned char*) rn;
179 *(unsigned long*)dst = 0;
180
181#ifdef __LITTLE_ENDIAN__
182 if (copy_from_user(dst, src, count))
183 goto fetch_fault;
184
185 if ((count == 2) && dst[1] & 0x80) {
186 dst[2] = 0xff;
187 dst[3] = 0xff;
188 }
189#else
190 dst += 4-count;
191
192 if (__copy_user(dst, src, count))
193 goto fetch_fault;
194
195 if ((count == 2) && dst[2] & 0x80) {
196 dst[0] = 0xff;
197 dst[1] = 0xff;
198 }
199#endif
200 } else {
201 /* to memory */
202 src = (unsigned char*) rm;
203#if !defined(__LITTLE_ENDIAN__)
204 src += 4-count;
205#endif
206 dst = (unsigned char*) *rn;
207 dst += regs->regs[0];
208
209 if (copy_to_user(dst, src, count))
210 goto fetch_fault;
211 }
212 ret = 0;
213 break;
214
215 case 1: /* mov.l Rm,@(disp,Rn) */
216 src = (unsigned char*) rm;
217 dst = (unsigned char*) *rn;
218 dst += (instruction&0x000F)<<2;
219
220 if (copy_to_user(dst,src,4))
221 goto fetch_fault;
222 ret = 0;
223 break;
224
225 case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
226 if (instruction & 4)
227 *rn -= count;
228 src = (unsigned char*) rm;
229 dst = (unsigned char*) *rn;
230#if !defined(__LITTLE_ENDIAN__)
231 src += 4-count;
232#endif
233 if (copy_to_user(dst, src, count))
234 goto fetch_fault;
235 ret = 0;
236 break;
237
238 case 5: /* mov.l @(disp,Rm),Rn */
239 src = (unsigned char*) *rm;
240 src += (instruction&0x000F)<<2;
241 dst = (unsigned char*) rn;
242 *(unsigned long*)dst = 0;
243
244 if (copy_from_user(dst,src,4))
245 goto fetch_fault;
246 ret = 0;
247 break;
248 8
249 case 6: /* mov.[bwl] from memory, possibly with post-increment */ 9#ifdef CONFIG_BUG
250 src = (unsigned char*) *rm; 10static void handle_BUG(struct pt_regs *regs)
251 if (instruction & 4)
252 *rm += count;
253 dst = (unsigned char*) rn;
254 *(unsigned long*)dst = 0;
255
256#ifdef __LITTLE_ENDIAN__
257 if (copy_from_user(dst, src, count))
258 goto fetch_fault;
259
260 if ((count == 2) && dst[1] & 0x80) {
261 dst[2] = 0xff;
262 dst[3] = 0xff;
263 }
264#else
265 dst += 4-count;
266
267 if (copy_from_user(dst, src, count))
268 goto fetch_fault;
269
270 if ((count == 2) && dst[2] & 0x80) {
271 dst[0] = 0xff;
272 dst[1] = 0xff;
273 }
274#endif
275 ret = 0;
276 break;
277
278 case 8:
279 switch ((instruction&0xFF00)>>8) {
280 case 0x81: /* mov.w R0,@(disp,Rn) */
281 src = (unsigned char*) &regs->regs[0];
282#if !defined(__LITTLE_ENDIAN__)
283 src += 2;
284#endif
285 dst = (unsigned char*) *rm; /* called Rn in the spec */
286 dst += (instruction&0x000F)<<1;
287
288 if (copy_to_user(dst, src, 2))
289 goto fetch_fault;
290 ret = 0;
291 break;
292
293 case 0x85: /* mov.w @(disp,Rm),R0 */
294 src = (unsigned char*) *rm;
295 src += (instruction&0x000F)<<1;
296 dst = (unsigned char*) &regs->regs[0];
297 *(unsigned long*)dst = 0;
298
299#if !defined(__LITTLE_ENDIAN__)
300 dst += 2;
301#endif
302
303 if (copy_from_user(dst, src, 2))
304 goto fetch_fault;
305
306#ifdef __LITTLE_ENDIAN__
307 if (dst[1] & 0x80) {
308 dst[2] = 0xff;
309 dst[3] = 0xff;
310 }
311#else
312 if (dst[2] & 0x80) {
313 dst[0] = 0xff;
314 dst[1] = 0xff;
315 }
316#endif
317 ret = 0;
318 break;
319 }
320 break;
321 }
322 return ret;
323
324 fetch_fault:
325 /* Argh. Address not only misaligned but also non-existent.
326 * Raise an EFAULT and see if it's trapped
327 */
328 return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
329}
330
331/*
332 * emulate the instruction in the delay slot
333 * - fetches the instruction from PC+2
334 */
335static inline int handle_unaligned_delayslot(struct pt_regs *regs)
336{ 11{
337 u16 instruction; 12 enum bug_trap_type tt;
338 13 tt = report_bug(regs->pc, regs);
339 if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) { 14 if (tt == BUG_TRAP_TYPE_WARN) {
340 /* the instruction-fetch faulted */ 15 regs->pc += instruction_size(regs->pc);
341 if (user_mode(regs)) 16 return;
342 return -EFAULT;
343
344 /* kernel */
345 die("delay-slot-insn faulting in handle_unaligned_delayslot",
346 regs, 0);
347 } 17 }
348 18
349 return handle_unaligned_ins(instruction,regs); 19 die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
350} 20}
351 21
352/* 22int is_valid_bugaddr(unsigned long addr)
353 * handle an instruction that does an unaligned memory access
354 * - have to be careful of branch delay-slot instructions that fault
355 * SH3:
356 * - if the branch would be taken PC points to the branch
357 * - if the branch would not be taken, PC points to delay-slot
358 * SH4:
359 * - PC always points to delayed branch
360 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
361 */
362
363/* Macros to determine offset from current PC for branch instructions */
364/* Explicit type coercion is used to force sign extension where needed */
365#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
366#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
367
368/*
369 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
370 * opcodes..
371 */
372#ifndef CONFIG_CPU_SH2A
373static int handle_unaligned_notify_count = 10;
374
375static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
376{ 23{
377 u_int rm; 24 return addr >= PAGE_OFFSET;
378 int ret, index;
379
380 index = (instruction>>8)&15; /* 0x0F00 */
381 rm = regs->regs[index];
382
383 /* shout about the first ten userspace fixups */
384 if (user_mode(regs) && handle_unaligned_notify_count>0) {
385 handle_unaligned_notify_count--;
386
387 printk(KERN_NOTICE "Fixing up unaligned userspace access "
388 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
389 current->comm, task_pid_nr(current),
390 (u16 *)regs->pc, instruction);
391 }
392
393 ret = -EFAULT;
394 switch (instruction&0xF000) {
395 case 0x0000:
396 if (instruction==0x000B) {
397 /* rts */
398 ret = handle_unaligned_delayslot(regs);
399 if (ret==0)
400 regs->pc = regs->pr;
401 }
402 else if ((instruction&0x00FF)==0x0023) {
403 /* braf @Rm */
404 ret = handle_unaligned_delayslot(regs);
405 if (ret==0)
406 regs->pc += rm + 4;
407 }
408 else if ((instruction&0x00FF)==0x0003) {
409 /* bsrf @Rm */
410 ret = handle_unaligned_delayslot(regs);
411 if (ret==0) {
412 regs->pr = regs->pc + 4;
413 regs->pc += rm + 4;
414 }
415 }
416 else {
417 /* mov.[bwl] to/from memory via r0+rn */
418 goto simple;
419 }
420 break;
421
422 case 0x1000: /* mov.l Rm,@(disp,Rn) */
423 goto simple;
424
425 case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
426 goto simple;
427
428 case 0x4000:
429 if ((instruction&0x00FF)==0x002B) {
430 /* jmp @Rm */
431 ret = handle_unaligned_delayslot(regs);
432 if (ret==0)
433 regs->pc = rm;
434 }
435 else if ((instruction&0x00FF)==0x000B) {
436 /* jsr @Rm */
437 ret = handle_unaligned_delayslot(regs);
438 if (ret==0) {
439 regs->pr = regs->pc + 4;
440 regs->pc = rm;
441 }
442 }
443 else {
444 /* mov.[bwl] to/from memory via r0+rn */
445 goto simple;
446 }
447 break;
448
449 case 0x5000: /* mov.l @(disp,Rm),Rn */
450 goto simple;
451
452 case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
453 goto simple;
454
455 case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
456 switch (instruction&0x0F00) {
457 case 0x0100: /* mov.w R0,@(disp,Rm) */
458 goto simple;
459 case 0x0500: /* mov.w @(disp,Rm),R0 */
460 goto simple;
461 case 0x0B00: /* bf lab - no delayslot*/
462 break;
463 case 0x0F00: /* bf/s lab */
464 ret = handle_unaligned_delayslot(regs);
465 if (ret==0) {
466#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
467 if ((regs->sr & 0x00000001) != 0)
468 regs->pc += 4; /* next after slot */
469 else
470#endif
471 regs->pc += SH_PC_8BIT_OFFSET(instruction);
472 }
473 break;
474 case 0x0900: /* bt lab - no delayslot */
475 break;
476 case 0x0D00: /* bt/s lab */
477 ret = handle_unaligned_delayslot(regs);
478 if (ret==0) {
479#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
480 if ((regs->sr & 0x00000001) == 0)
481 regs->pc += 4; /* next after slot */
482 else
483#endif
484 regs->pc += SH_PC_8BIT_OFFSET(instruction);
485 }
486 break;
487 }
488 break;
489
490 case 0xA000: /* bra label */
491 ret = handle_unaligned_delayslot(regs);
492 if (ret==0)
493 regs->pc += SH_PC_12BIT_OFFSET(instruction);
494 break;
495
496 case 0xB000: /* bsr label */
497 ret = handle_unaligned_delayslot(regs);
498 if (ret==0) {
499 regs->pr = regs->pc + 4;
500 regs->pc += SH_PC_12BIT_OFFSET(instruction);
501 }
502 break;
503 }
504 return ret;
505
506 /* handle non-delay-slot instruction */
507 simple:
508 ret = handle_unaligned_ins(instruction,regs);
509 if (ret==0)
510 regs->pc += instruction_size(instruction);
511 return ret;
512} 25}
513#endif /* CONFIG_CPU_SH2A */
514
515#ifdef CONFIG_CPU_HAS_SR_RB
516#define lookup_exception_vector(x) \
517 __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
518#else
519#define lookup_exception_vector(x) \
520 __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
521#endif 26#endif
522 27
523/* 28/*
524 * Handle various address error exceptions: 29 * Generic trap handler.
525 * - instruction address error:
526 * misaligned PC
527 * PC >= 0x80000000 in user mode
528 * - data address error (read and write)
529 * misaligned data access
530 * access to >= 0x80000000 is user mode
531 * Unfortuntaly we can't distinguish between instruction address error
532 * and data address errors caused by read accesses.
533 */ 30 */
534asmlinkage void do_address_error(struct pt_regs *regs, 31BUILD_TRAP_HANDLER(debug)
535 unsigned long writeaccess,
536 unsigned long address)
537{ 32{
538 unsigned long error_code = 0; 33 TRAP_HANDLER_DECL;
539 mm_segment_t oldfs;
540 siginfo_t info;
541#ifndef CONFIG_CPU_SH2A
542 u16 instruction;
543 int tmp;
544#endif
545
546 /* Intentional ifdef */
547#ifdef CONFIG_CPU_HAS_SR_RB
548 lookup_exception_vector(error_code);
549#endif
550
551 oldfs = get_fs();
552
553 if (user_mode(regs)) {
554 int si_code = BUS_ADRERR;
555
556 local_irq_enable();
557 34
558 /* bad PC is not something we can fix */ 35 /* Rewind */
559 if (regs->pc & 1) { 36 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
560 si_code = BUS_ADRALN;
561 goto uspace_segv;
562 }
563 37
564#ifndef CONFIG_CPU_SH2A 38 if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
565 set_fs(USER_DS); 39 SIGTRAP) == NOTIFY_STOP)
566 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { 40 return;
567 /* Argh. Fault on the instruction itself.
568 This should never happen non-SMP
569 */
570 set_fs(oldfs);
571 goto uspace_segv;
572 }
573
574 tmp = handle_unaligned_access(instruction, regs);
575 set_fs(oldfs);
576
577 if (tmp==0)
578 return; /* sorted */
579#endif
580
581uspace_segv:
582 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
583 "access (PC %lx PR %lx)\n", current->comm, regs->pc,
584 regs->pr);
585
586 info.si_signo = SIGBUS;
587 info.si_errno = 0;
588 info.si_code = si_code;
589 info.si_addr = (void __user *)address;
590 force_sig_info(SIGBUS, &info, current);
591 } else {
592 if (regs->pc & 1)
593 die("unaligned program counter", regs, error_code);
594
595#ifndef CONFIG_CPU_SH2A
596 set_fs(KERNEL_DS);
597 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
598 /* Argh. Fault on the instruction itself.
599 This should never happen non-SMP
600 */
601 set_fs(oldfs);
602 die("insn faulting in do_address_error", regs, 0);
603 }
604
605 handle_unaligned_access(instruction, regs);
606 set_fs(oldfs);
607#else
608 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
609 "access\n", current->comm);
610 41
611 force_sig(SIGSEGV, current); 42 force_sig(SIGTRAP, current);
612#endif
613 }
614} 43}
615 44
616#ifdef CONFIG_SH_DSP
617/* 45/*
618 * SH-DSP support gerg@snapgear.com. 46 * Special handler for BUG() traps.
619 */ 47 */
620int is_dsp_inst(struct pt_regs *regs) 48BUILD_TRAP_HANDLER(bug)
621{ 49{
622 unsigned short inst = 0; 50 TRAP_HANDLER_DECL;
623
624 /*
625 * Safe guard if DSP mode is already enabled or we're lacking
626 * the DSP altogether.
627 */
628 if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
629 return 0;
630
631 get_user(inst, ((unsigned short *) regs->pc));
632
633 inst &= 0xf000;
634
635 /* Check for any type of DSP or support instruction */
636 if ((inst == 0xf000) || (inst == 0x4000))
637 return 1;
638
639 return 0;
640}
641#else
642#define is_dsp_inst(regs) (0)
643#endif /* CONFIG_SH_DSP */
644 51
645#ifdef CONFIG_CPU_SH2A 52 /* Rewind */
646asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, 53 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
647 unsigned long r6, unsigned long r7,
648 struct pt_regs __regs)
649{
650 siginfo_t info;
651
652 switch (r4) {
653 case TRAP_DIVZERO_ERROR:
654 info.si_code = FPE_INTDIV;
655 break;
656 case TRAP_DIVOVF_ERROR:
657 info.si_code = FPE_INTOVF;
658 break;
659 }
660
661 force_sig_info(SIGFPE, &info, current);
662}
663#endif
664
665/* arch/sh/kernel/cpu/sh4/fpu.c */
666extern int do_fpu_inst(unsigned short, struct pt_regs *);
667extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
668 unsigned long r6, unsigned long r7, struct pt_regs __regs);
669
670asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
671 unsigned long r6, unsigned long r7,
672 struct pt_regs __regs)
673{
674 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
675 unsigned long error_code;
676 struct task_struct *tsk = current;
677
678#ifdef CONFIG_SH_FPU_EMU
679 unsigned short inst = 0;
680 int err;
681
682 get_user(inst, (unsigned short*)regs->pc);
683
684 err = do_fpu_inst(inst, regs);
685 if (!err) {
686 regs->pc += instruction_size(inst);
687 return;
688 }
689 /* not a FPU inst. */
690#endif
691 54
692#ifdef CONFIG_SH_DSP 55 if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
693 /* Check if it's a DSP instruction */ 56 SIGTRAP) == NOTIFY_STOP)
694 if (is_dsp_inst(regs)) {
695 /* Enable DSP mode, and restart instruction. */
696 regs->sr |= SR_DSP;
697 return; 57 return;
698 }
699#endif
700
701 lookup_exception_vector(error_code);
702
703 local_irq_enable();
704 CHK_REMOTE_DEBUG(regs);
705 force_sig(SIGILL, tsk);
706 die_if_no_fixup("reserved instruction", regs, error_code);
707}
708
709#ifdef CONFIG_SH_FPU_EMU
710static int emulate_branch(unsigned short inst, struct pt_regs* regs)
711{
712 /*
713 * bfs: 8fxx: PC+=d*2+4;
714 * bts: 8dxx: PC+=d*2+4;
715 * bra: axxx: PC+=D*2+4;
716 * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
717 * braf:0x23: PC+=Rn*2+4;
718 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
719 * jmp: 4x2b: PC=Rn;
720 * jsr: 4x0b: PC=Rn after PR=PC+4;
721 * rts: 000b: PC=PR;
722 */
723 if ((inst & 0xfd00) == 0x8d00) {
724 regs->pc += SH_PC_8BIT_OFFSET(inst);
725 return 0;
726 }
727
728 if ((inst & 0xe000) == 0xa000) {
729 regs->pc += SH_PC_12BIT_OFFSET(inst);
730 return 0;
731 }
732
733 if ((inst & 0xf0df) == 0x0003) {
734 regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
735 return 0;
736 }
737
738 if ((inst & 0xf0df) == 0x400b) {
739 regs->pc = regs->regs[(inst & 0x0f00) >> 8];
740 return 0;
741 }
742
743 if ((inst & 0xffff) == 0x000b) {
744 regs->pc = regs->pr;
745 return 0;
746 }
747
748 return 1;
749}
750#endif
751
752asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
753 unsigned long r6, unsigned long r7,
754 struct pt_regs __regs)
755{
756 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
757 unsigned long error_code;
758 struct task_struct *tsk = current;
759#ifdef CONFIG_SH_FPU_EMU
760 unsigned short inst = 0;
761
762 get_user(inst, (unsigned short *)regs->pc + 1);
763 if (!do_fpu_inst(inst, regs)) {
764 get_user(inst, (unsigned short *)regs->pc);
765 if (!emulate_branch(inst, regs))
766 return;
767 /* fault in branch.*/
768 }
769 /* not a FPU inst. */
770#endif
771
772 lookup_exception_vector(error_code);
773
774 local_irq_enable();
775 CHK_REMOTE_DEBUG(regs);
776 force_sig(SIGILL, tsk);
777 die_if_no_fixup("illegal slot instruction", regs, error_code);
778}
779
780asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
781 unsigned long r6, unsigned long r7,
782 struct pt_regs __regs)
783{
784 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
785 long ex;
786
787 lookup_exception_vector(ex);
788 die_if_kernel("exception", regs, ex);
789}
790
791#if defined(CONFIG_SH_STANDARD_BIOS)
792void *gdb_vbr_vector;
793
794static inline void __init gdb_vbr_init(void)
795{
796 register unsigned long vbr;
797
798 /*
799 * Read the old value of the VBR register to initialise
800 * the vector through which debug and BIOS traps are
801 * delegated by the Linux trap handler.
802 */
803 asm volatile("stc vbr, %0" : "=r" (vbr));
804
805 gdb_vbr_vector = (void *)(vbr + 0x100);
806 printk("Setting GDB trap vector to 0x%08lx\n",
807 (unsigned long)gdb_vbr_vector);
808}
809#endif
810
811void __cpuinit per_cpu_trap_init(void)
812{
813 extern void *vbr_base;
814
815#ifdef CONFIG_SH_STANDARD_BIOS
816 if (raw_smp_processor_id() == 0)
817 gdb_vbr_init();
818#endif
819
820 /* NOTE: The VBR value should be at P1
821 (or P2, virtural "fixed" address space).
822 It's definitely should not in physical address. */
823
824 asm volatile("ldc %0, vbr"
825 : /* no output */
826 : "r" (&vbr_base)
827 : "memory");
828}
829
830void *set_exception_table_vec(unsigned int vec, void *handler)
831{
832 extern void *exception_handling_table[];
833 void *old_handler;
834
835 old_handler = exception_handling_table[vec];
836 exception_handling_table[vec] = handler;
837 return old_handler;
838}
839
840extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
841 unsigned long r6, unsigned long r7,
842 struct pt_regs __regs);
843
844void __init trap_init(void)
845{
846 set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
847 set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
848
849#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
850 defined(CONFIG_SH_FPU_EMU)
851 /*
852 * For SH-4 lacking an FPU, treat floating point instructions as
853 * reserved. They'll be handled in the math-emu case, or faulted on
854 * otherwise.
855 */
856 set_exception_table_evt(0x800, do_reserved_inst);
857 set_exception_table_evt(0x820, do_illegal_slot_inst);
858#elif defined(CONFIG_SH_FPU)
859#ifdef CONFIG_CPU_SUBTYPE_SHX3
860 set_exception_table_evt(0xd80, do_fpu_state_restore);
861 set_exception_table_evt(0xda0, do_fpu_state_restore);
862#else
863 set_exception_table_evt(0x800, do_fpu_state_restore);
864 set_exception_table_evt(0x820, do_fpu_state_restore);
865#endif
866#endif
867
868#ifdef CONFIG_CPU_SH2
869 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
870#endif
871#ifdef CONFIG_CPU_SH2A
872 set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
873 set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
874#endif
875
876 /* Setup VBR for boot cpu */
877 per_cpu_trap_init();
878}
879 58
880#ifdef CONFIG_BUG 59#ifdef CONFIG_BUG
881void handle_BUG(struct pt_regs *regs) 60 if (__kernel_text_address(instruction_pointer(regs))) {
882{ 61 opcode_t insn = *(opcode_t *)instruction_pointer(regs);
883 enum bug_trap_type tt; 62 if (insn == TRAPA_BUG_OPCODE)
884 tt = report_bug(regs->pc, regs); 63 handle_BUG(regs);
885 if (tt == BUG_TRAP_TYPE_WARN) {
886 regs->pc += 2;
887 return;
888 } 64 }
889
890 die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
891}
892
893int is_valid_bugaddr(unsigned long addr)
894{
895 return addr >= PAGE_OFFSET;
896}
897#endif
898
899void show_trace(struct task_struct *tsk, unsigned long *sp,
900 struct pt_regs *regs)
901{
902 unsigned long addr;
903
904 if (regs && user_mode(regs))
905 return;
906
907 printk("\nCall trace: ");
908#ifdef CONFIG_KALLSYMS
909 printk("\n");
910#endif 65#endif
911 66
912 while (!kstack_end(sp)) { 67 force_sig(SIGTRAP, current);
913 addr = *sp++;
914 if (kernel_text_address(addr))
915 print_ip_sym(addr);
916 }
917
918 printk("\n");
919
920 if (!tsk)
921 tsk = current;
922
923 debug_show_held_locks(tsk);
924}
925
926void show_stack(struct task_struct *tsk, unsigned long *sp)
927{
928 unsigned long stack;
929
930 if (!tsk)
931 tsk = current;
932 if (tsk == current)
933 sp = (unsigned long *)current_stack_pointer;
934 else
935 sp = (unsigned long *)tsk->thread.sp;
936
937 stack = (unsigned long)sp;
938 dump_mem("Stack: ", stack, THREAD_SIZE +
939 (unsigned long)task_stack_page(tsk));
940 show_trace(tsk, sp, NULL);
941}
942
943void dump_stack(void)
944{
945 show_stack(NULL, NULL);
946} 68}
947EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
new file mode 100644
index 000000000000..2e58f7a6b746
--- /dev/null
+++ b/arch/sh/kernel/traps_32.c
@@ -0,0 +1,919 @@
1/*
2 * 'traps.c' handles hardware traps and faults after we have saved some
3 * state in 'entry.S'.
4 *
5 * SuperH version: Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000 Philipp Rumpf
7 * Copyright (C) 2000 David Howells
8 * Copyright (C) 2002 - 2007 Paul Mundt
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/ptrace.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/kallsyms.h>
20#include <linux/io.h>
21#include <linux/bug.h>
22#include <linux/debug_locks.h>
23#include <linux/kdebug.h>
24#include <linux/kexec.h>
25#include <linux/limits.h>
26#include <asm/system.h>
27#include <asm/uaccess.h>
28
29#ifdef CONFIG_SH_KGDB
30#include <asm/kgdb.h>
31#define CHK_REMOTE_DEBUG(regs) \
32{ \
33 if (kgdb_debug_hook && !user_mode(regs))\
34 (*kgdb_debug_hook)(regs); \
35}
36#else
37#define CHK_REMOTE_DEBUG(regs)
38#endif
39
40#ifdef CONFIG_CPU_SH2
41# define TRAP_RESERVED_INST 4
42# define TRAP_ILLEGAL_SLOT_INST 6
43# define TRAP_ADDRESS_ERROR 9
44# ifdef CONFIG_CPU_SH2A
45# define TRAP_DIVZERO_ERROR 17
46# define TRAP_DIVOVF_ERROR 18
47# endif
48#else
49#define TRAP_RESERVED_INST 12
50#define TRAP_ILLEGAL_SLOT_INST 13
51#endif
52
53static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
54{
55 unsigned long p;
56 int i;
57
58 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
59
60 for (p = bottom & ~31; p < top; ) {
61 printk("%04lx: ", p & 0xffff);
62
63 for (i = 0; i < 8; i++, p += 4) {
64 unsigned int val;
65
66 if (p < bottom || p >= top)
67 printk(" ");
68 else {
69 if (__get_user(val, (unsigned int __user *)p)) {
70 printk("\n");
71 return;
72 }
73 printk("%08x ", val);
74 }
75 }
76 printk("\n");
77 }
78}
79
80static DEFINE_SPINLOCK(die_lock);
81
82void die(const char * str, struct pt_regs * regs, long err)
83{
84 static int die_counter;
85
86 oops_enter();
87
88 console_verbose();
89 spin_lock_irq(&die_lock);
90 bust_spinlocks(1);
91
92 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
93
94 CHK_REMOTE_DEBUG(regs);
95 print_modules();
96 show_regs(regs);
97
98 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
99 task_pid_nr(current), task_stack_page(current) + 1);
100
101 if (!user_mode(regs) || in_interrupt())
102 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
103 (unsigned long)task_stack_page(current));
104
105 bust_spinlocks(0);
106 add_taint(TAINT_DIE);
107 spin_unlock_irq(&die_lock);
108
109 if (kexec_should_crash(current))
110 crash_kexec(regs);
111
112 if (in_interrupt())
113 panic("Fatal exception in interrupt");
114
115 if (panic_on_oops)
116 panic("Fatal exception");
117
118 oops_exit();
119 do_exit(SIGSEGV);
120}
121
122static inline void die_if_kernel(const char *str, struct pt_regs *regs,
123 long err)
124{
125 if (!user_mode(regs))
126 die(str, regs, err);
127}
128
129/*
130 * try and fix up kernelspace address errors
131 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
132 * - kernel/userspace interfaces cause a jump to an appropriate handler
133 * - other kernel errors are bad
134 * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
135 */
136static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
137{
138 if (!user_mode(regs)) {
139 const struct exception_table_entry *fixup;
140 fixup = search_exception_tables(regs->pc);
141 if (fixup) {
142 regs->pc = fixup->fixup;
143 return 0;
144 }
145 die(str, regs, err);
146 }
147 return -EFAULT;
148}
149
150/*
151 * handle an instruction that does an unaligned memory access by emulating the
152 * desired behaviour
153 * - note that PC _may not_ point to the faulting instruction
154 * (if that instruction is in a branch delay slot)
155 * - return 0 if emulation okay, -EFAULT on existential error
156 */
157static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
158{
159 int ret, index, count;
160 unsigned long *rm, *rn;
161 unsigned char *src, *dst;
162
163 index = (instruction>>8)&15; /* 0x0F00 */
164 rn = &regs->regs[index];
165
166 index = (instruction>>4)&15; /* 0x00F0 */
167 rm = &regs->regs[index];
168
169 count = 1<<(instruction&3);
170
171 ret = -EFAULT;
172 switch (instruction>>12) {
173 case 0: /* mov.[bwl] to/from memory via r0+rn */
174 if (instruction & 8) {
175 /* from memory */
176 src = (unsigned char*) *rm;
177 src += regs->regs[0];
178 dst = (unsigned char*) rn;
179 *(unsigned long*)dst = 0;
180
181#ifdef __LITTLE_ENDIAN__
182 if (copy_from_user(dst, src, count))
183 goto fetch_fault;
184
185 if ((count == 2) && dst[1] & 0x80) {
186 dst[2] = 0xff;
187 dst[3] = 0xff;
188 }
189#else
190 dst += 4-count;
191
192 if (__copy_user(dst, src, count))
193 goto fetch_fault;
194
195 if ((count == 2) && dst[2] & 0x80) {
196 dst[0] = 0xff;
197 dst[1] = 0xff;
198 }
199#endif
200 } else {
201 /* to memory */
202 src = (unsigned char*) rm;
203#if !defined(__LITTLE_ENDIAN__)
204 src += 4-count;
205#endif
206 dst = (unsigned char*) *rn;
207 dst += regs->regs[0];
208
209 if (copy_to_user(dst, src, count))
210 goto fetch_fault;
211 }
212 ret = 0;
213 break;
214
215 case 1: /* mov.l Rm,@(disp,Rn) */
216 src = (unsigned char*) rm;
217 dst = (unsigned char*) *rn;
218 dst += (instruction&0x000F)<<2;
219
220 if (copy_to_user(dst,src,4))
221 goto fetch_fault;
222 ret = 0;
223 break;
224
225 case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
226 if (instruction & 4)
227 *rn -= count;
228 src = (unsigned char*) rm;
229 dst = (unsigned char*) *rn;
230#if !defined(__LITTLE_ENDIAN__)
231 src += 4-count;
232#endif
233 if (copy_to_user(dst, src, count))
234 goto fetch_fault;
235 ret = 0;
236 break;
237
238 case 5: /* mov.l @(disp,Rm),Rn */
239 src = (unsigned char*) *rm;
240 src += (instruction&0x000F)<<2;
241 dst = (unsigned char*) rn;
242 *(unsigned long*)dst = 0;
243
244 if (copy_from_user(dst,src,4))
245 goto fetch_fault;
246 ret = 0;
247 break;
248
249 case 6: /* mov.[bwl] from memory, possibly with post-increment */
250 src = (unsigned char*) *rm;
251 if (instruction & 4)
252 *rm += count;
253 dst = (unsigned char*) rn;
254 *(unsigned long*)dst = 0;
255
256#ifdef __LITTLE_ENDIAN__
257 if (copy_from_user(dst, src, count))
258 goto fetch_fault;
259
260 if ((count == 2) && dst[1] & 0x80) {
261 dst[2] = 0xff;
262 dst[3] = 0xff;
263 }
264#else
265 dst += 4-count;
266
267 if (copy_from_user(dst, src, count))
268 goto fetch_fault;
269
270 if ((count == 2) && dst[2] & 0x80) {
271 dst[0] = 0xff;
272 dst[1] = 0xff;
273 }
274#endif
275 ret = 0;
276 break;
277
278 case 8:
279 switch ((instruction&0xFF00)>>8) {
280 case 0x81: /* mov.w R0,@(disp,Rn) */
281 src = (unsigned char*) &regs->regs[0];
282#if !defined(__LITTLE_ENDIAN__)
283 src += 2;
284#endif
285 dst = (unsigned char*) *rm; /* called Rn in the spec */
286 dst += (instruction&0x000F)<<1;
287
288 if (copy_to_user(dst, src, 2))
289 goto fetch_fault;
290 ret = 0;
291 break;
292
293 case 0x85: /* mov.w @(disp,Rm),R0 */
294 src = (unsigned char*) *rm;
295 src += (instruction&0x000F)<<1;
296 dst = (unsigned char*) &regs->regs[0];
297 *(unsigned long*)dst = 0;
298
299#if !defined(__LITTLE_ENDIAN__)
300 dst += 2;
301#endif
302
303 if (copy_from_user(dst, src, 2))
304 goto fetch_fault;
305
306#ifdef __LITTLE_ENDIAN__
307 if (dst[1] & 0x80) {
308 dst[2] = 0xff;
309 dst[3] = 0xff;
310 }
311#else
312 if (dst[2] & 0x80) {
313 dst[0] = 0xff;
314 dst[1] = 0xff;
315 }
316#endif
317 ret = 0;
318 break;
319 }
320 break;
321 }
322 return ret;
323
324 fetch_fault:
325 /* Argh. Address not only misaligned but also non-existent.
326 * Raise an EFAULT and see if it's trapped
327 */
328 return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
329}
330
331/*
332 * emulate the instruction in the delay slot
333 * - fetches the instruction from PC+2
334 */
335static inline int handle_unaligned_delayslot(struct pt_regs *regs)
336{
337 u16 instruction;
338
339 if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
340 /* the instruction-fetch faulted */
341 if (user_mode(regs))
342 return -EFAULT;
343
344 /* kernel */
345 die("delay-slot-insn faulting in handle_unaligned_delayslot",
346 regs, 0);
347 }
348
349 return handle_unaligned_ins(instruction,regs);
350}
351
352/*
353 * handle an instruction that does an unaligned memory access
354 * - have to be careful of branch delay-slot instructions that fault
355 * SH3:
356 * - if the branch would be taken PC points to the branch
357 * - if the branch would not be taken, PC points to delay-slot
358 * SH4:
359 * - PC always points to delayed branch
360 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
361 */
362
363/* Macros to determine offset from current PC for branch instructions */
364/* Explicit type coercion is used to force sign extension where needed */
365#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
366#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
367
368/*
369 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
370 * opcodes..
371 */
372#ifndef CONFIG_CPU_SH2A
373static int handle_unaligned_notify_count = 10;
374
375static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
376{
377 u_int rm;
378 int ret, index;
379
380 index = (instruction>>8)&15; /* 0x0F00 */
381 rm = regs->regs[index];
382
383 /* shout about the first ten userspace fixups */
384 if (user_mode(regs) && handle_unaligned_notify_count>0) {
385 handle_unaligned_notify_count--;
386
387 printk(KERN_NOTICE "Fixing up unaligned userspace access "
388 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
389 current->comm, task_pid_nr(current),
390 (u16 *)regs->pc, instruction);
391 }
392
393 ret = -EFAULT;
394 switch (instruction&0xF000) {
395 case 0x0000:
396 if (instruction==0x000B) {
397 /* rts */
398 ret = handle_unaligned_delayslot(regs);
399 if (ret==0)
400 regs->pc = regs->pr;
401 }
402 else if ((instruction&0x00FF)==0x0023) {
403 /* braf @Rm */
404 ret = handle_unaligned_delayslot(regs);
405 if (ret==0)
406 regs->pc += rm + 4;
407 }
408 else if ((instruction&0x00FF)==0x0003) {
409 /* bsrf @Rm */
410 ret = handle_unaligned_delayslot(regs);
411 if (ret==0) {
412 regs->pr = regs->pc + 4;
413 regs->pc += rm + 4;
414 }
415 }
416 else {
417 /* mov.[bwl] to/from memory via r0+rn */
418 goto simple;
419 }
420 break;
421
422 case 0x1000: /* mov.l Rm,@(disp,Rn) */
423 goto simple;
424
425 case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
426 goto simple;
427
428 case 0x4000:
429 if ((instruction&0x00FF)==0x002B) {
430 /* jmp @Rm */
431 ret = handle_unaligned_delayslot(regs);
432 if (ret==0)
433 regs->pc = rm;
434 }
435 else if ((instruction&0x00FF)==0x000B) {
436 /* jsr @Rm */
437 ret = handle_unaligned_delayslot(regs);
438 if (ret==0) {
439 regs->pr = regs->pc + 4;
440 regs->pc = rm;
441 }
442 }
443 else {
444 /* mov.[bwl] to/from memory via r0+rn */
445 goto simple;
446 }
447 break;
448
449 case 0x5000: /* mov.l @(disp,Rm),Rn */
450 goto simple;
451
452 case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
453 goto simple;
454
455 case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
456 switch (instruction&0x0F00) {
457 case 0x0100: /* mov.w R0,@(disp,Rm) */
458 goto simple;
459 case 0x0500: /* mov.w @(disp,Rm),R0 */
460 goto simple;
461 case 0x0B00: /* bf lab - no delayslot*/
462 break;
463 case 0x0F00: /* bf/s lab */
464 ret = handle_unaligned_delayslot(regs);
465 if (ret==0) {
466#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
467 if ((regs->sr & 0x00000001) != 0)
468 regs->pc += 4; /* next after slot */
469 else
470#endif
471 regs->pc += SH_PC_8BIT_OFFSET(instruction);
472 }
473 break;
474 case 0x0900: /* bt lab - no delayslot */
475 break;
476 case 0x0D00: /* bt/s lab */
477 ret = handle_unaligned_delayslot(regs);
478 if (ret==0) {
479#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
480 if ((regs->sr & 0x00000001) == 0)
481 regs->pc += 4; /* next after slot */
482 else
483#endif
484 regs->pc += SH_PC_8BIT_OFFSET(instruction);
485 }
486 break;
487 }
488 break;
489
490 case 0xA000: /* bra label */
491 ret = handle_unaligned_delayslot(regs);
492 if (ret==0)
493 regs->pc += SH_PC_12BIT_OFFSET(instruction);
494 break;
495
496 case 0xB000: /* bsr label */
497 ret = handle_unaligned_delayslot(regs);
498 if (ret==0) {
499 regs->pr = regs->pc + 4;
500 regs->pc += SH_PC_12BIT_OFFSET(instruction);
501 }
502 break;
503 }
504 return ret;
505
506 /* handle non-delay-slot instruction */
507 simple:
508 ret = handle_unaligned_ins(instruction,regs);
509 if (ret==0)
510 regs->pc += instruction_size(instruction);
511 return ret;
512}
513#endif /* CONFIG_CPU_SH2A */
514
515#ifdef CONFIG_CPU_HAS_SR_RB
516#define lookup_exception_vector(x) \
517 __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
518#else
519#define lookup_exception_vector(x) \
520 __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
521#endif
522
523/*
524 * Handle various address error exceptions:
525 * - instruction address error:
526 * misaligned PC
527 * PC >= 0x80000000 in user mode
528 * - data address error (read and write)
529 * misaligned data access
530 * access to >= 0x80000000 is user mode
531 * Unfortuntaly we can't distinguish between instruction address error
532 * and data address errors caused by read accesses.
533 */
534asmlinkage void do_address_error(struct pt_regs *regs,
535 unsigned long writeaccess,
536 unsigned long address)
537{
538 unsigned long error_code = 0;
539 mm_segment_t oldfs;
540 siginfo_t info;
541#ifndef CONFIG_CPU_SH2A
542 u16 instruction;
543 int tmp;
544#endif
545
546 /* Intentional ifdef */
547#ifdef CONFIG_CPU_HAS_SR_RB
548 lookup_exception_vector(error_code);
549#endif
550
551 oldfs = get_fs();
552
553 if (user_mode(regs)) {
554 int si_code = BUS_ADRERR;
555
556 local_irq_enable();
557
558 /* bad PC is not something we can fix */
559 if (regs->pc & 1) {
560 si_code = BUS_ADRALN;
561 goto uspace_segv;
562 }
563
564#ifndef CONFIG_CPU_SH2A
565 set_fs(USER_DS);
566 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
567 /* Argh. Fault on the instruction itself.
568 This should never happen non-SMP
569 */
570 set_fs(oldfs);
571 goto uspace_segv;
572 }
573
574 tmp = handle_unaligned_access(instruction, regs);
575 set_fs(oldfs);
576
577 if (tmp==0)
578 return; /* sorted */
579#endif
580
581uspace_segv:
582 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
583 "access (PC %lx PR %lx)\n", current->comm, regs->pc,
584 regs->pr);
585
586 info.si_signo = SIGBUS;
587 info.si_errno = 0;
588 info.si_code = si_code;
589 info.si_addr = (void __user *)address;
590 force_sig_info(SIGBUS, &info, current);
591 } else {
592 if (regs->pc & 1)
593 die("unaligned program counter", regs, error_code);
594
595#ifndef CONFIG_CPU_SH2A
596 set_fs(KERNEL_DS);
597 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
598 /* Argh. Fault on the instruction itself.
599 This should never happen non-SMP
600 */
601 set_fs(oldfs);
602 die("insn faulting in do_address_error", regs, 0);
603 }
604
605 handle_unaligned_access(instruction, regs);
606 set_fs(oldfs);
607#else
608 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
609 "access\n", current->comm);
610
611 force_sig(SIGSEGV, current);
612#endif
613 }
614}
615
616#ifdef CONFIG_SH_DSP
617/*
618 * SH-DSP support gerg@snapgear.com.
619 */
620int is_dsp_inst(struct pt_regs *regs)
621{
622 unsigned short inst = 0;
623
624 /*
625 * Safe guard if DSP mode is already enabled or we're lacking
626 * the DSP altogether.
627 */
628 if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
629 return 0;
630
631 get_user(inst, ((unsigned short *) regs->pc));
632
633 inst &= 0xf000;
634
635 /* Check for any type of DSP or support instruction */
636 if ((inst == 0xf000) || (inst == 0x4000))
637 return 1;
638
639 return 0;
640}
641#else
642#define is_dsp_inst(regs) (0)
643#endif /* CONFIG_SH_DSP */
644
645#ifdef CONFIG_CPU_SH2A
646asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
647 unsigned long r6, unsigned long r7,
648 struct pt_regs __regs)
649{
650 siginfo_t info;
651
652 switch (r4) {
653 case TRAP_DIVZERO_ERROR:
654 info.si_code = FPE_INTDIV;
655 break;
656 case TRAP_DIVOVF_ERROR:
657 info.si_code = FPE_INTOVF;
658 break;
659 }
660
661 force_sig_info(SIGFPE, &info, current);
662}
663#endif
664
665asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
666 unsigned long r6, unsigned long r7,
667 struct pt_regs __regs)
668{
669 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
670 unsigned long error_code;
671 struct task_struct *tsk = current;
672
673#ifdef CONFIG_SH_FPU_EMU
674 unsigned short inst = 0;
675 int err;
676
677 get_user(inst, (unsigned short*)regs->pc);
678
679 err = do_fpu_inst(inst, regs);
680 if (!err) {
681 regs->pc += instruction_size(inst);
682 return;
683 }
684 /* not a FPU inst. */
685#endif
686
687#ifdef CONFIG_SH_DSP
688 /* Check if it's a DSP instruction */
689 if (is_dsp_inst(regs)) {
690 /* Enable DSP mode, and restart instruction. */
691 regs->sr |= SR_DSP;
692 return;
693 }
694#endif
695
696 lookup_exception_vector(error_code);
697
698 local_irq_enable();
699 CHK_REMOTE_DEBUG(regs);
700 force_sig(SIGILL, tsk);
701 die_if_no_fixup("reserved instruction", regs, error_code);
702}
703
704#ifdef CONFIG_SH_FPU_EMU
705static int emulate_branch(unsigned short inst, struct pt_regs* regs)
706{
707 /*
708 * bfs: 8fxx: PC+=d*2+4;
709 * bts: 8dxx: PC+=d*2+4;
710 * bra: axxx: PC+=D*2+4;
711 * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
712 * braf:0x23: PC+=Rn*2+4;
713 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
714 * jmp: 4x2b: PC=Rn;
715 * jsr: 4x0b: PC=Rn after PR=PC+4;
716 * rts: 000b: PC=PR;
717 */
718 if ((inst & 0xfd00) == 0x8d00) {
719 regs->pc += SH_PC_8BIT_OFFSET(inst);
720 return 0;
721 }
722
723 if ((inst & 0xe000) == 0xa000) {
724 regs->pc += SH_PC_12BIT_OFFSET(inst);
725 return 0;
726 }
727
728 if ((inst & 0xf0df) == 0x0003) {
729 regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
730 return 0;
731 }
732
733 if ((inst & 0xf0df) == 0x400b) {
734 regs->pc = regs->regs[(inst & 0x0f00) >> 8];
735 return 0;
736 }
737
738 if ((inst & 0xffff) == 0x000b) {
739 regs->pc = regs->pr;
740 return 0;
741 }
742
743 return 1;
744}
745#endif
746
747asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
748 unsigned long r6, unsigned long r7,
749 struct pt_regs __regs)
750{
751 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
752 unsigned long error_code;
753 struct task_struct *tsk = current;
754#ifdef CONFIG_SH_FPU_EMU
755 unsigned short inst = 0;
756
757 get_user(inst, (unsigned short *)regs->pc + 1);
758 if (!do_fpu_inst(inst, regs)) {
759 get_user(inst, (unsigned short *)regs->pc);
760 if (!emulate_branch(inst, regs))
761 return;
762 /* fault in branch.*/
763 }
764 /* not a FPU inst. */
765#endif
766
767 lookup_exception_vector(error_code);
768
769 local_irq_enable();
770 CHK_REMOTE_DEBUG(regs);
771 force_sig(SIGILL, tsk);
772 die_if_no_fixup("illegal slot instruction", regs, error_code);
773}
774
775asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
776 unsigned long r6, unsigned long r7,
777 struct pt_regs __regs)
778{
779 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
780 long ex;
781
782 lookup_exception_vector(ex);
783 die_if_kernel("exception", regs, ex);
784}
785
786#if defined(CONFIG_SH_STANDARD_BIOS)
787void *gdb_vbr_vector;
788
789static inline void __init gdb_vbr_init(void)
790{
791 register unsigned long vbr;
792
793 /*
794 * Read the old value of the VBR register to initialise
795 * the vector through which debug and BIOS traps are
796 * delegated by the Linux trap handler.
797 */
798 asm volatile("stc vbr, %0" : "=r" (vbr));
799
800 gdb_vbr_vector = (void *)(vbr + 0x100);
801 printk("Setting GDB trap vector to 0x%08lx\n",
802 (unsigned long)gdb_vbr_vector);
803}
804#endif
805
806void __cpuinit per_cpu_trap_init(void)
807{
808 extern void *vbr_base;
809
810#ifdef CONFIG_SH_STANDARD_BIOS
811 if (raw_smp_processor_id() == 0)
812 gdb_vbr_init();
813#endif
814
815 /* NOTE: The VBR value should be at P1
816 (or P2, virtural "fixed" address space).
817 It's definitely should not in physical address. */
818
819 asm volatile("ldc %0, vbr"
820 : /* no output */
821 : "r" (&vbr_base)
822 : "memory");
823}
824
825void *set_exception_table_vec(unsigned int vec, void *handler)
826{
827 extern void *exception_handling_table[];
828 void *old_handler;
829
830 old_handler = exception_handling_table[vec];
831 exception_handling_table[vec] = handler;
832 return old_handler;
833}
834
835void __init trap_init(void)
836{
837 set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
838 set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
839
840#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
841 defined(CONFIG_SH_FPU_EMU)
842 /*
843 * For SH-4 lacking an FPU, treat floating point instructions as
844 * reserved. They'll be handled in the math-emu case, or faulted on
845 * otherwise.
846 */
847 set_exception_table_evt(0x800, do_reserved_inst);
848 set_exception_table_evt(0x820, do_illegal_slot_inst);
849#elif defined(CONFIG_SH_FPU)
850#ifdef CONFIG_CPU_SUBTYPE_SHX3
851 set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
852 set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
853#else
854 set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
855 set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
856#endif
857#endif
858
859#ifdef CONFIG_CPU_SH2
860 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
861#endif
862#ifdef CONFIG_CPU_SH2A
863 set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
864 set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
865#endif
866
867 /* Setup VBR for boot cpu */
868 per_cpu_trap_init();
869}
870
871void show_trace(struct task_struct *tsk, unsigned long *sp,
872 struct pt_regs *regs)
873{
874 unsigned long addr;
875
876 if (regs && user_mode(regs))
877 return;
878
879 printk("\nCall trace: ");
880#ifdef CONFIG_KALLSYMS
881 printk("\n");
882#endif
883
884 while (!kstack_end(sp)) {
885 addr = *sp++;
886 if (kernel_text_address(addr))
887 print_ip_sym(addr);
888 }
889
890 printk("\n");
891
892 if (!tsk)
893 tsk = current;
894
895 debug_show_held_locks(tsk);
896}
897
898void show_stack(struct task_struct *tsk, unsigned long *sp)
899{
900 unsigned long stack;
901
902 if (!tsk)
903 tsk = current;
904 if (tsk == current)
905 sp = (unsigned long *)current_stack_pointer;
906 else
907 sp = (unsigned long *)tsk->thread.sp;
908
909 stack = (unsigned long)sp;
910 dump_mem("Stack: ", stack, THREAD_SIZE +
911 (unsigned long)task_stack_page(tsk));
912 show_trace(tsk, sp, NULL);
913}
914
915void dump_stack(void)
916{
917 show_stack(NULL, NULL);
918}
919EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
new file mode 100644
index 000000000000..c0b3c6f6edb5
--- /dev/null
+++ b/arch/sh/kernel/traps_64.c
@@ -0,0 +1,975 @@
1/*
2 * arch/sh/kernel/traps_64.c
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003, 2004 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/spinlock.h>
23#include <linux/kallsyms.h>
24#include <linux/interrupt.h>
25#include <linux/sysctl.h>
26#include <linux/module.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/atomic.h>
31#include <asm/processor.h>
32#include <asm/pgtable.h>
33
34#undef DEBUG_EXCEPTION
35#ifdef DEBUG_EXCEPTION
36/* implemented in ../lib/dbg.c */
37extern void show_excp_regs(char *fname, int trapnr, int signr,
38 struct pt_regs *regs);
39#else
40#define show_excp_regs(a, b, c, d)
41#endif
42
43static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
44 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
45
46#define DO_ERROR(trapnr, signr, str, name, tsk) \
47asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
48{ \
49 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
50}
51
52spinlock_t die_lock;
53
54void die(const char * str, struct pt_regs * regs, long err)
55{
56 console_verbose();
57 spin_lock_irq(&die_lock);
58 printk("%s: %lx\n", str, (err & 0xffffff));
59 show_regs(regs);
60 spin_unlock_irq(&die_lock);
61 do_exit(SIGSEGV);
62}
63
64static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
65{
66 if (!user_mode(regs))
67 die(str, regs, err);
68}
69
70static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
71{
72 if (!user_mode(regs)) {
73 const struct exception_table_entry *fixup;
74 fixup = search_exception_tables(regs->pc);
75 if (fixup) {
76 regs->pc = fixup->fixup;
77 return;
78 }
79 die(str, regs, err);
80 }
81}
82
83DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
84DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
85
86
87/* Implement misaligned load/store handling for kernel (and optionally for user
88 mode too). Limitation : only SHmedia mode code is handled - there is no
89 handling at all for misaligned accesses occurring in SHcompact code yet. */
90
91static int misaligned_fixup(struct pt_regs *regs);
92
93asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
94{
95 if (misaligned_fixup(regs) < 0) {
96 do_unhandled_exception(7, SIGSEGV, "address error(load)",
97 "do_address_error_load",
98 error_code, regs, current);
99 }
100 return;
101}
102
103asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
104{
105 if (misaligned_fixup(regs) < 0) {
106 do_unhandled_exception(8, SIGSEGV, "address error(store)",
107 "do_address_error_store",
108 error_code, regs, current);
109 }
110 return;
111}
112
113#if defined(CONFIG_SH64_ID2815_WORKAROUND)
114
115#define OPCODE_INVALID 0
116#define OPCODE_USER_VALID 1
117#define OPCODE_PRIV_VALID 2
118
119/* getcon/putcon - requires checking which control register is referenced. */
120#define OPCODE_CTRL_REG 3
121
122/* Table of valid opcodes for SHmedia mode.
123 Form a 10-bit value by concatenating the major/minor opcodes i.e.
124 opcode[31:26,20:16]. The 6 MSBs of this value index into the following
125 array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
126 LSBs==4'b0000 etc). */
127static unsigned long shmedia_opcode_table[64] = {
128 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
129 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
130 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
131 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
132 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
133 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
134 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
135 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
136};
137
138void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
139{
140 /* Workaround SH5-101 cut2 silicon defect #2815 :
141 in some situations, inter-mode branches from SHcompact -> SHmedia
142 which should take ITLBMISS or EXECPROT exceptions at the target
143 falsely take RESINST at the target instead. */
144
145 unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
146 unsigned long pc, aligned_pc;
147 int get_user_error;
148 int trapnr = 12;
149 int signr = SIGILL;
150 char *exception_name = "reserved_instruction";
151
152 pc = regs->pc;
153 if ((pc & 3) == 1) {
154 /* SHmedia : check for defect. This requires executable vmas
155 to be readable too. */
156 aligned_pc = pc & ~3;
157 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
158 get_user_error = -EFAULT;
159 } else {
160 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
161 }
162 if (get_user_error >= 0) {
163 unsigned long index, shift;
164 unsigned long major, minor, combined;
165 unsigned long reserved_field;
166 reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
167 major = (opcode >> 26) & 0x3f;
168 minor = (opcode >> 16) & 0xf;
169 combined = (major << 4) | minor;
170 index = major;
171 shift = minor << 1;
172 if (reserved_field == 0) {
173 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
174 switch (opcode_state) {
175 case OPCODE_INVALID:
176 /* Trap. */
177 break;
178 case OPCODE_USER_VALID:
179 /* Restart the instruction : the branch to the instruction will now be from an RTE
180 not from SHcompact so the silicon defect won't be triggered. */
181 return;
182 case OPCODE_PRIV_VALID:
183 if (!user_mode(regs)) {
184 /* Should only ever get here if a module has
185 SHcompact code inside it. If so, the same fix up is needed. */
186 return; /* same reason */
187 }
188 /* Otherwise, user mode trying to execute a privileged instruction -
189 fall through to trap. */
190 break;
191 case OPCODE_CTRL_REG:
192 /* If in privileged mode, return as above. */
193 if (!user_mode(regs)) return;
194 /* In user mode ... */
195 if (combined == 0x9f) { /* GETCON */
196 unsigned long regno = (opcode >> 20) & 0x3f;
197 if (regno >= 62) {
198 return;
199 }
200 /* Otherwise, reserved or privileged control register, => trap */
201 } else if (combined == 0x1bf) { /* PUTCON */
202 unsigned long regno = (opcode >> 4) & 0x3f;
203 if (regno >= 62) {
204 return;
205 }
206 /* Otherwise, reserved or privileged control register, => trap */
207 } else {
208 /* Trap */
209 }
210 break;
211 default:
212 /* Fall through to trap. */
213 break;
214 }
215 }
216 /* fall through to normal resinst processing */
217 } else {
218 /* Error trying to read opcode. This typically means a
219 real fault, not a RESINST any more. So change the
220 codes. */
221 trapnr = 87;
222 exception_name = "address error (exec)";
223 signr = SIGSEGV;
224 }
225 }
226
227 do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
228}
229
230#else /* CONFIG_SH64_ID2815_WORKAROUND */
231
232/* If the workaround isn't needed, this is just a straightforward reserved
233 instruction */
234DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
235
236#endif /* CONFIG_SH64_ID2815_WORKAROUND */
237
238/* Called with interrupts disabled */
239asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
240{
241 show_excp_regs(__FUNCTION__, -1, -1, regs);
242 die_if_kernel("exception", regs, ex);
243}
244
245int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
246{
247 /* Syscall debug */
248 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
249
250 die_if_kernel("unknown trapa", regs, scId);
251
252 return -ENOSYS;
253}
254
255void show_stack(struct task_struct *tsk, unsigned long *sp)
256{
257#ifdef CONFIG_KALLSYMS
258 extern void sh64_unwind(struct pt_regs *regs);
259 struct pt_regs *regs;
260
261 regs = tsk ? tsk->thread.kregs : NULL;
262
263 sh64_unwind(regs);
264#else
265 printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
266#endif
267}
268
269void show_task(unsigned long *sp)
270{
271 show_stack(NULL, sp);
272}
273
274void dump_stack(void)
275{
276 show_task(NULL);
277}
278/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
279EXPORT_SYMBOL(dump_stack);
280
281static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
282 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
283{
284 show_excp_regs(fn_name, trapnr, signr, regs);
285 tsk->thread.error_code = error_code;
286 tsk->thread.trap_no = trapnr;
287
288 if (user_mode(regs))
289 force_sig(signr, tsk);
290
291 die_if_no_fixup(str, regs, error_code);
292}
293
294static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
295{
296 int get_user_error;
297 unsigned long aligned_pc;
298 unsigned long opcode;
299
300 if ((pc & 3) == 1) {
301 /* SHmedia */
302 aligned_pc = pc & ~3;
303 if (from_user_mode) {
304 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
305 get_user_error = -EFAULT;
306 } else {
307 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
308 *result_opcode = opcode;
309 }
310 return get_user_error;
311 } else {
312 /* If the fault was in the kernel, we can either read
313 * this directly, or if not, we fault.
314 */
315 *result_opcode = *(unsigned long *) aligned_pc;
316 return 0;
317 }
318 } else if ((pc & 1) == 0) {
319 /* SHcompact */
320 /* TODO : provide handling for this. We don't really support
321 user-mode SHcompact yet, and for a kernel fault, this would
322 have to come from a module built for SHcompact. */
323 return -EFAULT;
324 } else {
325 /* misaligned */
326 return -EFAULT;
327 }
328}
329
330static int address_is_sign_extended(__u64 a)
331{
332 __u64 b;
333#if (NEFF == 32)
334 b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
335 return (b == a) ? 1 : 0;
336#else
337#error "Sign extend check only works for NEFF==32"
338#endif
339}
340
341static int generate_and_check_address(struct pt_regs *regs,
342 __u32 opcode,
343 int displacement_not_indexed,
344 int width_shift,
345 __u64 *address)
346{
347 /* return -1 for fault, 0 for OK */
348
349 __u64 base_address, addr;
350 int basereg;
351
352 basereg = (opcode >> 20) & 0x3f;
353 base_address = regs->regs[basereg];
354 if (displacement_not_indexed) {
355 __s64 displacement;
356 displacement = (opcode >> 10) & 0x3ff;
357 displacement = ((displacement << 54) >> 54); /* sign extend */
358 addr = (__u64)((__s64)base_address + (displacement << width_shift));
359 } else {
360 __u64 offset;
361 int offsetreg;
362 offsetreg = (opcode >> 10) & 0x3f;
363 offset = regs->regs[offsetreg];
364 addr = base_address + offset;
365 }
366
367 /* Check sign extended */
368 if (!address_is_sign_extended(addr)) {
369 return -1;
370 }
371
372#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
373 /* Check accessible. For misaligned access in the kernel, assume the
374 address is always accessible (and if not, just fault when the
375 load/store gets done.) */
376 if (user_mode(regs)) {
377 if (addr >= TASK_SIZE) {
378 return -1;
379 }
380 /* Do access_ok check later - it depends on whether it's a load or a store. */
381 }
382#endif
383
384 *address = addr;
385 return 0;
386}
387
388/* Default value as for sh */
389#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
390static int user_mode_unaligned_fixup_count = 10;
391static int user_mode_unaligned_fixup_enable = 1;
392#endif
393
394static int kernel_mode_unaligned_fixup_count = 32;
395
396static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
397{
398 unsigned short x;
399 unsigned char *p, *q;
400 p = (unsigned char *) (int) address;
401 q = (unsigned char *) &x;
402 q[0] = p[0];
403 q[1] = p[1];
404
405 if (do_sign_extend) {
406 *result = (__u64)(__s64) *(short *) &x;
407 } else {
408 *result = (__u64) x;
409 }
410}
411
412static void misaligned_kernel_word_store(__u64 address, __u64 value)
413{
414 unsigned short x;
415 unsigned char *p, *q;
416 p = (unsigned char *) (int) address;
417 q = (unsigned char *) &x;
418
419 x = (__u16) value;
420 p[0] = q[0];
421 p[1] = q[1];
422}
423
424static int misaligned_load(struct pt_regs *regs,
425 __u32 opcode,
426 int displacement_not_indexed,
427 int width_shift,
428 int do_sign_extend)
429{
430 /* Return -1 for a fault, 0 for OK */
431 int error;
432 int destreg;
433 __u64 address;
434
435 error = generate_and_check_address(regs, opcode,
436 displacement_not_indexed, width_shift, &address);
437 if (error < 0) {
438 return error;
439 }
440
441 destreg = (opcode >> 4) & 0x3f;
442#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
443 if (user_mode(regs)) {
444 __u64 buffer;
445
446 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
447 return -1;
448 }
449
450 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
451 return -1; /* fault */
452 }
453 switch (width_shift) {
454 case 1:
455 if (do_sign_extend) {
456 regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
457 } else {
458 regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
459 }
460 break;
461 case 2:
462 regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
463 break;
464 case 3:
465 regs->regs[destreg] = buffer;
466 break;
467 default:
468 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
469 width_shift, (unsigned long) regs->pc);
470 break;
471 }
472 } else
473#endif
474 {
475 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
476 __u64 lo, hi;
477
478 switch (width_shift) {
479 case 1:
480 misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
481 break;
482 case 2:
483 asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
484 asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
485 regs->regs[destreg] = lo | hi;
486 break;
487 case 3:
488 asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
489 asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
490 regs->regs[destreg] = lo | hi;
491 break;
492
493 default:
494 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
495 width_shift, (unsigned long) regs->pc);
496 break;
497 }
498 }
499
500 return 0;
501
502}
503
504static int misaligned_store(struct pt_regs *regs,
505 __u32 opcode,
506 int displacement_not_indexed,
507 int width_shift)
508{
509 /* Return -1 for a fault, 0 for OK */
510 int error;
511 int srcreg;
512 __u64 address;
513
514 error = generate_and_check_address(regs, opcode,
515 displacement_not_indexed, width_shift, &address);
516 if (error < 0) {
517 return error;
518 }
519
520 srcreg = (opcode >> 4) & 0x3f;
521#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
522 if (user_mode(regs)) {
523 __u64 buffer;
524
525 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
526 return -1;
527 }
528
529 switch (width_shift) {
530 case 1:
531 *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
532 break;
533 case 2:
534 *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
535 break;
536 case 3:
537 buffer = regs->regs[srcreg];
538 break;
539 default:
540 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
541 width_shift, (unsigned long) regs->pc);
542 break;
543 }
544
545 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
546 return -1; /* fault */
547 }
548 } else
549#endif
550 {
551 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
552 __u64 val = regs->regs[srcreg];
553
554 switch (width_shift) {
555 case 1:
556 misaligned_kernel_word_store(address, val);
557 break;
558 case 2:
559 asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
560 asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
561 break;
562 case 3:
563 asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
564 asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
565 break;
566
567 default:
568 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
569 width_shift, (unsigned long) regs->pc);
570 break;
571 }
572 }
573
574 return 0;
575
576}
577
578#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
579/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
580 error. */
581static int misaligned_fpu_load(struct pt_regs *regs,
582 __u32 opcode,
583 int displacement_not_indexed,
584 int width_shift,
585 int do_paired_load)
586{
587 /* Return -1 for a fault, 0 for OK */
588 int error;
589 int destreg;
590 __u64 address;
591
592 error = generate_and_check_address(regs, opcode,
593 displacement_not_indexed, width_shift, &address);
594 if (error < 0) {
595 return error;
596 }
597
598 destreg = (opcode >> 4) & 0x3f;
599 if (user_mode(regs)) {
600 __u64 buffer;
601 __u32 buflo, bufhi;
602
603 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
604 return -1;
605 }
606
607 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
608 return -1; /* fault */
609 }
610 /* 'current' may be the current owner of the FPU state, so
611 context switch the registers into memory so they can be
612 indexed by register number. */
613 if (last_task_used_math == current) {
614 enable_fpu();
615 save_fpu(current, regs);
616 disable_fpu();
617 last_task_used_math = NULL;
618 regs->sr |= SR_FD;
619 }
620
621 buflo = *(__u32*) &buffer;
622 bufhi = *(1 + (__u32*) &buffer);
623
624 switch (width_shift) {
625 case 2:
626 current->thread.fpu.hard.fp_regs[destreg] = buflo;
627 break;
628 case 3:
629 if (do_paired_load) {
630 current->thread.fpu.hard.fp_regs[destreg] = buflo;
631 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
632 } else {
633#if defined(CONFIG_LITTLE_ENDIAN)
634 current->thread.fpu.hard.fp_regs[destreg] = bufhi;
635 current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
636#else
637 current->thread.fpu.hard.fp_regs[destreg] = buflo;
638 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
639#endif
640 }
641 break;
642 default:
643 printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
644 width_shift, (unsigned long) regs->pc);
645 break;
646 }
647 return 0;
648 } else {
649 die ("Misaligned FPU load inside kernel", regs, 0);
650 return -1;
651 }
652
653
654}
655
656static int misaligned_fpu_store(struct pt_regs *regs,
657 __u32 opcode,
658 int displacement_not_indexed,
659 int width_shift,
660 int do_paired_load)
661{
662 /* Return -1 for a fault, 0 for OK */
663 int error;
664 int srcreg;
665 __u64 address;
666
667 error = generate_and_check_address(regs, opcode,
668 displacement_not_indexed, width_shift, &address);
669 if (error < 0) {
670 return error;
671 }
672
673 srcreg = (opcode >> 4) & 0x3f;
674 if (user_mode(regs)) {
675 __u64 buffer;
676 /* Initialise these to NaNs. */
677 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
678
679 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
680 return -1;
681 }
682
683 /* 'current' may be the current owner of the FPU state, so
684 context switch the registers into memory so they can be
685 indexed by register number. */
686 if (last_task_used_math == current) {
687 enable_fpu();
688 save_fpu(current, regs);
689 disable_fpu();
690 last_task_used_math = NULL;
691 regs->sr |= SR_FD;
692 }
693
694 switch (width_shift) {
695 case 2:
696 buflo = current->thread.fpu.hard.fp_regs[srcreg];
697 break;
698 case 3:
699 if (do_paired_load) {
700 buflo = current->thread.fpu.hard.fp_regs[srcreg];
701 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
702 } else {
703#if defined(CONFIG_LITTLE_ENDIAN)
704 bufhi = current->thread.fpu.hard.fp_regs[srcreg];
705 buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
706#else
707 buflo = current->thread.fpu.hard.fp_regs[srcreg];
708 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
709#endif
710 }
711 break;
712 default:
713 printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
714 width_shift, (unsigned long) regs->pc);
715 break;
716 }
717
718 *(__u32*) &buffer = buflo;
719 *(1 + (__u32*) &buffer) = bufhi;
720 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
721 return -1; /* fault */
722 }
723 return 0;
724 } else {
725 die ("Misaligned FPU load inside kernel", regs, 0);
726 return -1;
727 }
728}
729#endif
730
731static int misaligned_fixup(struct pt_regs *regs)
732{
733 unsigned long opcode;
734 int error;
735 int major, minor;
736
737#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
738 /* Never fixup user mode misaligned accesses without this option enabled. */
739 return -1;
740#else
741 if (!user_mode_unaligned_fixup_enable) return -1;
742#endif
743
744 error = read_opcode(regs->pc, &opcode, user_mode(regs));
745 if (error < 0) {
746 return error;
747 }
748 major = (opcode >> 26) & 0x3f;
749 minor = (opcode >> 16) & 0xf;
750
751#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
752 if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
753 --user_mode_unaligned_fixup_count;
754 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
755 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
756 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
757 } else
758#endif
759 if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
760 --kernel_mode_unaligned_fixup_count;
761 if (in_interrupt()) {
762 printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
763 (__u32)regs->pc, opcode);
764 } else {
765 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
766 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
767 }
768 }
769
770
771 switch (major) {
772 case (0x84>>2): /* LD.W */
773 error = misaligned_load(regs, opcode, 1, 1, 1);
774 break;
775 case (0xb0>>2): /* LD.UW */
776 error = misaligned_load(regs, opcode, 1, 1, 0);
777 break;
778 case (0x88>>2): /* LD.L */
779 error = misaligned_load(regs, opcode, 1, 2, 1);
780 break;
781 case (0x8c>>2): /* LD.Q */
782 error = misaligned_load(regs, opcode, 1, 3, 0);
783 break;
784
785 case (0xa4>>2): /* ST.W */
786 error = misaligned_store(regs, opcode, 1, 1);
787 break;
788 case (0xa8>>2): /* ST.L */
789 error = misaligned_store(regs, opcode, 1, 2);
790 break;
791 case (0xac>>2): /* ST.Q */
792 error = misaligned_store(regs, opcode, 1, 3);
793 break;
794
795 case (0x40>>2): /* indexed loads */
796 switch (minor) {
797 case 0x1: /* LDX.W */
798 error = misaligned_load(regs, opcode, 0, 1, 1);
799 break;
800 case 0x5: /* LDX.UW */
801 error = misaligned_load(regs, opcode, 0, 1, 0);
802 break;
803 case 0x2: /* LDX.L */
804 error = misaligned_load(regs, opcode, 0, 2, 1);
805 break;
806 case 0x3: /* LDX.Q */
807 error = misaligned_load(regs, opcode, 0, 3, 0);
808 break;
809 default:
810 error = -1;
811 break;
812 }
813 break;
814
815 case (0x60>>2): /* indexed stores */
816 switch (minor) {
817 case 0x1: /* STX.W */
818 error = misaligned_store(regs, opcode, 0, 1);
819 break;
820 case 0x2: /* STX.L */
821 error = misaligned_store(regs, opcode, 0, 2);
822 break;
823 case 0x3: /* STX.Q */
824 error = misaligned_store(regs, opcode, 0, 3);
825 break;
826 default:
827 error = -1;
828 break;
829 }
830 break;
831
832#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
833 case (0x94>>2): /* FLD.S */
834 error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
835 break;
836 case (0x98>>2): /* FLD.P */
837 error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
838 break;
839 case (0x9c>>2): /* FLD.D */
840 error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
841 break;
842 case (0x1c>>2): /* floating indexed loads */
843 switch (minor) {
844 case 0x8: /* FLDX.S */
845 error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
846 break;
847 case 0xd: /* FLDX.P */
848 error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
849 break;
850 case 0x9: /* FLDX.D */
851 error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
852 break;
853 default:
854 error = -1;
855 break;
856 }
857 break;
858 case (0xb4>>2): /* FLD.S */
859 error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
860 break;
861 case (0xb8>>2): /* FLD.P */
862 error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
863 break;
864 case (0xbc>>2): /* FLD.D */
865 error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
866 break;
867 case (0x3c>>2): /* floating indexed stores */
868 switch (minor) {
869 case 0x8: /* FSTX.S */
870 error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
871 break;
872 case 0xd: /* FSTX.P */
873 error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
874 break;
875 case 0x9: /* FSTX.D */
876 error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
877 break;
878 default:
879 error = -1;
880 break;
881 }
882 break;
883#endif
884
885 default:
886 /* Fault */
887 error = -1;
888 break;
889 }
890
891 if (error < 0) {
892 return error;
893 } else {
894 regs->pc += 4; /* Skip the instruction that's just been emulated */
895 return 0;
896 }
897
898}
899
900static ctl_table unaligned_table[] = {
901 {
902 .ctl_name = CTL_UNNUMBERED,
903 .procname = "kernel_reports",
904 .data = &kernel_mode_unaligned_fixup_count,
905 .maxlen = sizeof(int),
906 .mode = 0644,
907 .proc_handler = &proc_dointvec
908 },
909#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
910 {
911 .ctl_name = CTL_UNNUMBERED,
912 .procname = "user_reports",
913 .data = &user_mode_unaligned_fixup_count,
914 .maxlen = sizeof(int),
915 .mode = 0644,
916 .proc_handler = &proc_dointvec
917 },
918 {
919 .ctl_name = CTL_UNNUMBERED,
920 .procname = "user_enable",
921 .data = &user_mode_unaligned_fixup_enable,
922 .maxlen = sizeof(int),
923 .mode = 0644,
924 .proc_handler = &proc_dointvec},
925#endif
926 {}
927};
928
929static ctl_table unaligned_root[] = {
930 {
931 .ctl_name = CTL_UNNUMBERED,
932 .procname = "unaligned_fixup",
933 .mode = 0555,
934 unaligned_table
935 },
936 {}
937};
938
939static ctl_table sh64_root[] = {
940 {
941 .ctl_name = CTL_UNNUMBERED,
942 .procname = "sh64",
943 .mode = 0555,
944 .child = unaligned_root
945 },
946 {}
947};
948static struct ctl_table_header *sysctl_header;
949static int __init init_sysctl(void)
950{
951 sysctl_header = register_sysctl_table(sh64_root);
952 return 0;
953}
954
955__initcall(init_sysctl);
956
957
958asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
959{
960 u64 peek_real_address_q(u64 addr);
961 u64 poke_real_address_q(u64 addr, u64 val);
962 unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
963 unsigned long long exp_cause;
964 /* It's not worth ioremapping the debug module registers for the amount
965 of access we make to them - just go direct to their physical
966 addresses. */
967 exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
968 if (exp_cause & ~4) {
969 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
970 (unsigned long)(exp_cause & 0xffffffff));
971 }
972 show_state();
973 /* Clear all DEBUGINT causes */
974 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
975}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 0956fb3681a3..d7d4991f32af 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -1,138 +1,5 @@
1/* 1#ifdef CONFIG_SUPERH32
2 * ld script to make SuperH Linux kernel 2# include "vmlinux_32.lds.S"
3 * Written by Niibe Yutaka
4 */
5#include <asm/thread_info.h>
6#include <asm/cache.h>
7#include <asm-generic/vmlinux.lds.h>
8
9#ifdef CONFIG_CPU_LITTLE_ENDIAN
10OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
11#else 3#else
12OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux") 4# include "vmlinux_64.lds.S"
13#endif 5#endif
14OUTPUT_ARCH(sh)
15ENTRY(_start)
16SECTIONS
17{
18 . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
19 _text = .; /* Text and read-only data */
20
21 .empty_zero_page : {
22 *(.empty_zero_page)
23 } = 0
24
25 .text : {
26 *(.text.head)
27 TEXT_TEXT
28 SCHED_TEXT
29 LOCK_TEXT
30 KPROBES_TEXT
31 *(.fixup)
32 *(.gnu.warning)
33 } = 0x0009
34
35 . = ALIGN(16); /* Exception table */
36 __start___ex_table = .;
37 __ex_table : { *(__ex_table) }
38 __stop___ex_table = .;
39
40 _etext = .; /* End of text section */
41
42 BUG_TABLE
43 NOTES
44 RO_DATA(PAGE_SIZE)
45
46 . = ALIGN(THREAD_SIZE);
47 .data : { /* Data */
48 *(.data.init_task)
49
50 . = ALIGN(L1_CACHE_BYTES);
51 *(.data.cacheline_aligned)
52
53 . = ALIGN(L1_CACHE_BYTES);
54 *(.data.read_mostly)
55
56 . = ALIGN(PAGE_SIZE);
57 *(.data.page_aligned)
58
59 __nosave_begin = .;
60 *(.data.nosave)
61 . = ALIGN(PAGE_SIZE);
62 __nosave_end = .;
63
64 DATA_DATA
65 CONSTRUCTORS
66 }
67
68 _edata = .; /* End of data section */
69
70 . = ALIGN(PAGE_SIZE); /* Init code and data */
71 __init_begin = .;
72 _sinittext = .;
73 .init.text : { *(.init.text) }
74 _einittext = .;
75 .init.data : { *(.init.data) }
76
77 . = ALIGN(16);
78 __setup_start = .;
79 .init.setup : { *(.init.setup) }
80 __setup_end = .;
81
82 __initcall_start = .;
83 .initcall.init : {
84 INITCALLS
85 }
86 __initcall_end = .;
87 __con_initcall_start = .;
88 .con_initcall.init : { *(.con_initcall.init) }
89 __con_initcall_end = .;
90
91 SECURITY_INIT
92
93#ifdef CONFIG_BLK_DEV_INITRD
94 . = ALIGN(PAGE_SIZE);
95 __initramfs_start = .;
96 .init.ramfs : { *(.init.ramfs) }
97 __initramfs_end = .;
98#endif
99
100 . = ALIGN(4);
101 __machvec_start = .;
102 .machvec.init : { *(.machvec.init) }
103 __machvec_end = .;
104
105 PERCPU(PAGE_SIZE)
106
107 /*
108 * .exit.text is discarded at runtime, not link time, to deal with
109 * references from __bug_table
110 */
111 .exit.text : { *(.exit.text) }
112 .exit.data : { *(.exit.data) }
113
114 . = ALIGN(PAGE_SIZE);
115 .bss : {
116 __init_end = .;
117 __bss_start = .; /* BSS */
118 *(.bss.page_aligned)
119 *(.bss)
120 *(COMMON)
121 . = ALIGN(4);
122 _ebss = .; /* uClinux MTD sucks */
123 _end = . ;
124 }
125
126 /*
127 * When something in the kernel is NOT compiled as a module, the
128 * module cleanup code and data are put into these segments. Both
129 * can then be thrown away, as cleanup code is never called unless
130 * it's a module.
131 */
132 /DISCARD/ : {
133 *(.exitcall.exit)
134 }
135
136 STABS_DEBUG
137 DWARF_DEBUG
138}
diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S
new file mode 100644
index 000000000000..d549fac6d3e7
--- /dev/null
+++ b/arch/sh/kernel/vmlinux_32.lds.S
@@ -0,0 +1,152 @@
1/*
2 * ld script to make SuperH Linux kernel
3 * Written by Niibe Yutaka
4 */
5#include <asm/thread_info.h>
6#include <asm/cache.h>
7#include <asm-generic/vmlinux.lds.h>
8
9#ifdef CONFIG_CPU_LITTLE_ENDIAN
10OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
11#else
12OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
13#endif
14OUTPUT_ARCH(sh)
15ENTRY(_start)
16SECTIONS
17{
18#ifdef CONFIG_32BIT
19 . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
20#else
21 . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
22#endif
23
24 _text = .; /* Text and read-only data */
25
26 .empty_zero_page : {
27 *(.empty_zero_page)
28 } = 0
29
30 .text : {
31 *(.text.head)
32 TEXT_TEXT
33 SCHED_TEXT
34 LOCK_TEXT
35 KPROBES_TEXT
36 *(.fixup)
37 *(.gnu.warning)
38 } = 0x0009
39
40 . = ALIGN(16); /* Exception table */
41 __start___ex_table = .;
42 __ex_table : { *(__ex_table) }
43 __stop___ex_table = .;
44
45 _etext = .; /* End of text section */
46
47 BUG_TABLE
48 NOTES
49 RO_DATA(PAGE_SIZE)
50
51 /*
52 * Code which must be executed uncached and the associated data
53 */
54 . = ALIGN(PAGE_SIZE);
55 __uncached_start = .;
56 .uncached.text : { *(.uncached.text) }
57 .uncached.data : { *(.uncached.data) }
58 __uncached_end = .;
59
60 . = ALIGN(THREAD_SIZE);
61 .data : { /* Data */
62 *(.data.init_task)
63
64 . = ALIGN(L1_CACHE_BYTES);
65 *(.data.cacheline_aligned)
66
67 . = ALIGN(L1_CACHE_BYTES);
68 *(.data.read_mostly)
69
70 . = ALIGN(PAGE_SIZE);
71 *(.data.page_aligned)
72
73 __nosave_begin = .;
74 *(.data.nosave)
75 . = ALIGN(PAGE_SIZE);
76 __nosave_end = .;
77
78 DATA_DATA
79 CONSTRUCTORS
80 }
81
82 _edata = .; /* End of data section */
83
84 . = ALIGN(PAGE_SIZE); /* Init code and data */
85 __init_begin = .;
86 _sinittext = .;
87 .init.text : { *(.init.text) }
88 _einittext = .;
89 .init.data : { *(.init.data) }
90
91 . = ALIGN(16);
92 __setup_start = .;
93 .init.setup : { *(.init.setup) }
94 __setup_end = .;
95
96 __initcall_start = .;
97 .initcall.init : {
98 INITCALLS
99 }
100 __initcall_end = .;
101 __con_initcall_start = .;
102 .con_initcall.init : { *(.con_initcall.init) }
103 __con_initcall_end = .;
104
105 SECURITY_INIT
106
107#ifdef CONFIG_BLK_DEV_INITRD
108 . = ALIGN(PAGE_SIZE);
109 __initramfs_start = .;
110 .init.ramfs : { *(.init.ramfs) }
111 __initramfs_end = .;
112#endif
113
114 . = ALIGN(4);
115 __machvec_start = .;
116 .machvec.init : { *(.machvec.init) }
117 __machvec_end = .;
118
119 PERCPU(PAGE_SIZE)
120
121 /*
122 * .exit.text is discarded at runtime, not link time, to deal with
123 * references from __bug_table
124 */
125 .exit.text : { *(.exit.text) }
126 .exit.data : { *(.exit.data) }
127
128 . = ALIGN(PAGE_SIZE);
129 .bss : {
130 __init_end = .;
131 __bss_start = .; /* BSS */
132 *(.bss.page_aligned)
133 *(.bss)
134 *(COMMON)
135 . = ALIGN(4);
136 _ebss = .; /* uClinux MTD sucks */
137 _end = . ;
138 }
139
140 /*
141 * When something in the kernel is NOT compiled as a module, the
142 * module cleanup code and data are put into these segments. Both
143 * can then be thrown away, as cleanup code is never called unless
144 * it's a module.
145 */
146 /DISCARD/ : {
147 *(.exitcall.exit)
148 }
149
150 STABS_DEBUG
151 DWARF_DEBUG
152}
diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S
new file mode 100644
index 000000000000..2fd0f7401484
--- /dev/null
+++ b/arch/sh/kernel/vmlinux_64.lds.S
@@ -0,0 +1,164 @@
1/*
2 * ld script to make SH64 Linux kernel
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 *
6 * benedict.gaster@superh.com: 2nd May 2002
7 * Add definition of empty_zero_page to be the first page of kernel image.
8 *
9 * benedict.gaster@superh.com: 3rd May 2002
10 * Added support for ramdisk, removing statically linked romfs at the
11 * same time.
12 *
13 * lethal@linux-sh.org: 9th May 2003
14 * Kill off GLOBAL_NAME() usage and other CDC-isms.
15 *
16 * lethal@linux-sh.org: 19th May 2003
17 * Remove support for ancient toolchains.
18 *
19 * This file is subject to the terms and conditions of the GNU General Public
20 * License. See the file "COPYING" in the main directory of this archive
21 * for more details.
22 */
23#include <asm/page.h>
24#include <asm/cache.h>
25#include <asm/thread_info.h>
26
27#define LOAD_OFFSET CONFIG_PAGE_OFFSET
28#include <asm-generic/vmlinux.lds.h>
29
30OUTPUT_ARCH(sh:sh5)
31
32#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
33
34ENTRY(__start)
35SECTIONS
36{
37 . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
38 _text = .; /* Text and read-only data */
39
40 .empty_zero_page : C_PHYS(.empty_zero_page) {
41 *(.empty_zero_page)
42 } = 0
43
44 .text : C_PHYS(.text) {
45 *(.text.head)
46 TEXT_TEXT
47 *(.text64)
48 *(.text..SHmedia32)
49 SCHED_TEXT
50 LOCK_TEXT
51 KPROBES_TEXT
52 *(.fixup)
53 *(.gnu.warning)
54#ifdef CONFIG_LITTLE_ENDIAN
55 } = 0x6ff0fff0
56#else
57 } = 0xf0fff06f
58#endif
59
60 /* We likely want __ex_table to be Cache Line aligned */
61 . = ALIGN(L1_CACHE_BYTES); /* Exception table */
62 __start___ex_table = .;
63 __ex_table : C_PHYS(__ex_table) { *(__ex_table) }
64 __stop___ex_table = .;
65
66 _etext = .; /* End of text section */
67
68 BUG_TABLE
69 NOTES
70 RO_DATA(PAGE_SIZE)
71
72 . = ALIGN(THREAD_SIZE);
73 .data : C_PHYS(.data) { /* Data */
74 *(.data.init_task)
75
76 . = ALIGN(L1_CACHE_BYTES);
77 *(.data.cacheline_aligned)
78
79 . = ALIGN(L1_CACHE_BYTES);
80 *(.data.read_mostly)
81
82 . = ALIGN(PAGE_SIZE);
83 *(.data.page_aligned)
84
85 __nosave_begin = .;
86 *(.data.nosave)
87 . = ALIGN(PAGE_SIZE);
88 __nosave_end = .;
89
90 DATA_DATA
91 CONSTRUCTORS
92 }
93
94 _edata = .; /* End of data section */
95
96 . = ALIGN(PAGE_SIZE); /* Init code and data */
97 __init_begin = .;
98 _sinittext = .;
99 .init.text : C_PHYS(.init.text) { *(.init.text) }
100 _einittext = .;
101 .init.data : C_PHYS(.init.data) { *(.init.data) }
102 . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */
103 __setup_start = .;
104 .init.setup : C_PHYS(.init.setup) { *(.init.setup) }
105 __setup_end = .;
106 __initcall_start = .;
107 .initcall.init : C_PHYS(.initcall.init) {
108 INITCALLS
109 }
110 __initcall_end = .;
111 __con_initcall_start = .;
112 .con_initcall.init : C_PHYS(.con_initcall.init) {
113 *(.con_initcall.init)
114 }
115 __con_initcall_end = .;
116
117 SECURITY_INIT
118
119#ifdef CONFIG_BLK_DEV_INITRD
120 . = ALIGN(PAGE_SIZE);
121 __initramfs_start = .;
122 .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
123 __initramfs_end = .;
124#endif
125
126 . = ALIGN(8);
127 __machvec_start = .;
128 .machvec.init : C_PHYS(.machvec.init) { *(.machvec.init) }
129 __machvec_end = .;
130
131 PERCPU(PAGE_SIZE)
132
133 /*
134 * .exit.text is discarded at runtime, not link time, to deal with
135 * references from __bug_table
136 */
137 .exit.text : C_PHYS(.exit.text) { *(.exit.text) }
138 .exit.data : C_PHYS(.exit.data) { *(.exit.data) }
139
140 . = ALIGN(PAGE_SIZE);
141 .bss : C_PHYS(.bss) {
142 __init_end = .;
143 __bss_start = .; /* BSS */
144 *(.bss.page_aligned)
145 *(.bss)
146 *(COMMON)
147 . = ALIGN(4);
148 _ebss = .; /* uClinux MTD sucks */
149 _end = . ;
150 }
151
152 /*
153 * When something in the kernel is NOT compiled as a module, the
154 * module cleanup code and data are put into these segments. Both
155 * can then be thrown away, as cleanup code is never called unless
156 * it's a module.
157 */
158 /DISCARD/ : {
159 *(.exitcall.exit)
160 }
161
162 STABS_DEBUG
163 DWARF_DEBUG
164}