aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sh/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile22
-rw-r--r--arch/sh/kernel/asm-offsets.c32
-rw-r--r--arch/sh/kernel/cf-enabler.c158
-rw-r--r--arch/sh/kernel/cpu/Makefile16
-rw-r--r--arch/sh/kernel/cpu/adc.c36
-rw-r--r--arch/sh/kernel/cpu/bus.c195
-rw-r--r--arch/sh/kernel/cpu/init.c222
-rw-r--r--arch/sh/kernel/cpu/irq_imask.c116
-rw-r--r--arch/sh/kernel/cpu/irq_ipr.c339
-rw-r--r--arch/sh/kernel/cpu/rtc.c136
-rw-r--r--arch/sh/kernel/cpu/sh2/Makefile6
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c39
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile6
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S199
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c97
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile10
-rw-r--r--arch/sh/kernel/cpu/sh4/ex.S384
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c335
-rw-r--r--arch/sh/kernel/cpu/sh4/irq_intc2.c222
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c138
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c453
-rw-r--r--arch/sh/kernel/cpu/ubc.S59
-rw-r--r--arch/sh/kernel/cpufreq.c218
-rw-r--r--arch/sh/kernel/early_printk.c137
-rw-r--r--arch/sh/kernel/entry.S1149
-rw-r--r--arch/sh/kernel/head.S76
-rw-r--r--arch/sh/kernel/init_task.c36
-rw-r--r--arch/sh/kernel/io.c59
-rw-r--r--arch/sh/kernel/io_generic.c243
-rw-r--r--arch/sh/kernel/irq.c106
-rw-r--r--arch/sh/kernel/kgdb_jmp.S33
-rw-r--r--arch/sh/kernel/kgdb_stub.c1491
-rw-r--r--arch/sh/kernel/module.c146
-rw-r--r--arch/sh/kernel/process.c531
-rw-r--r--arch/sh/kernel/ptrace.c320
-rw-r--r--arch/sh/kernel/semaphore.c139
-rw-r--r--arch/sh/kernel/setup.c649
-rw-r--r--arch/sh/kernel/sh_bios.c75
-rw-r--r--arch/sh/kernel/sh_ksyms.c126
-rw-r--r--arch/sh/kernel/signal.c607
-rw-r--r--arch/sh/kernel/smp.c199
-rw-r--r--arch/sh/kernel/sys_sh.c289
-rw-r--r--arch/sh/kernel/time.c657
-rw-r--r--arch/sh/kernel/traps.c712
-rw-r--r--arch/sh/kernel/vmlinux.lds.S155
45 files changed, 11373 insertions, 0 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
new file mode 100644
index 000000000000..8b819698df14
--- /dev/null
+++ b/arch/sh/kernel/Makefile
@@ -0,0 +1,22 @@
1#
2# Makefile for the Linux/SuperH kernel.
3#
4
5extra-y := head.o init_task.o vmlinux.lds
6
7obj-y := process.o signal.o entry.o traps.o irq.o \
8 ptrace.o setup.o time.o sys_sh.o semaphore.o \
9 io.o io_generic.o sh_ksyms.o
10
11obj-y += cpu/
12
13obj-$(CONFIG_SMP) += smp.o
14obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
15obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
16obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
17obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
18obj-$(CONFIG_MODULES) += module.o
19obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
20
21USE_STANDARD_AS_RULE := true
22
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
new file mode 100644
index 000000000000..dc6725c51a89
--- /dev/null
+++ b/arch/sh/kernel/asm-offsets.c
@@ -0,0 +1,32 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#include <linux/stddef.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <asm/thread_info.h>
15
16#define DEFINE(sym, val) \
17 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
18
19#define BLANK() asm volatile("\n->" : : )
20
21int main(void)
22{
23 /* offsets into the thread_info struct */
24 DEFINE(TI_TASK, offsetof(struct thread_info, task));
25 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
26 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
27 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
28 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
29 DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
30
31 return 0;
32}
diff --git a/arch/sh/kernel/cf-enabler.c b/arch/sh/kernel/cf-enabler.c
new file mode 100644
index 000000000000..7a3b18faa277
--- /dev/null
+++ b/arch/sh/kernel/cf-enabler.c
@@ -0,0 +1,158 @@
1/* $Id: cf-enabler.c,v 1.4 2004/02/22 22:44:36 kkojima Exp $
2 *
3 * linux/drivers/block/cf-enabler.c
4 *
5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000 Toshiharu Nozawa
7 * Copyright (C) 2001 A&D Co., Ltd.
8 *
9 * Enable the CF configuration.
10 */
11
12#include <linux/config.h>
13#include <linux/init.h>
14
15#include <asm/io.h>
16#include <asm/irq.h>
17
18/*
19 * You can connect Compact Flash directly to the bus of SuperH.
20 * This is the enabler for that.
21 *
22 * SIM: How generic is this really? It looks pretty board, or at
23 * least SH sub-type, specific to me.
24 * I know it doesn't work on the Overdrive!
25 */
26
27/*
28 * 0xB8000000 : Attribute
29 * 0xB8001000 : Common Memory
30 * 0xBA000000 : I/O
31 */
32#if defined(CONFIG_IDE) && defined(CONFIG_CPU_SH4)
33/* SH4 can't access PCMCIA interface through P2 area.
34 * we must remap it with appropreate attribute bit of the page set.
35 * this part is based on Greg Banks' hd64465_ss.c implementation - Masahiro Abe */
36#include <linux/mm.h>
37#include <linux/vmalloc.h>
38
39#if defined(CONFIG_CF_AREA6)
40#define slot_no 0
41#else
42#define slot_no 1
43#endif
44
45/* defined in mm/ioremap.c */
46extern void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags);
47
48/* use this pointer to access to directly connected compact flash io area*/
49void *cf_io_base;
50
51static int __init allocate_cf_area(void)
52{
53 pgprot_t prot;
54 unsigned long paddrbase, psize;
55
56 /* open I/O area window */
57 paddrbase = virt_to_phys((void*)CONFIG_CF_BASE_ADDR);
58 psize = PAGE_SIZE;
59 prot = PAGE_KERNEL_PCC(slot_no, _PAGE_PCC_IO16);
60 cf_io_base = p3_ioremap(paddrbase, psize, prot.pgprot);
61 if (!cf_io_base) {
62 printk("allocate_cf_area : can't open CF I/O window!\n");
63 return -ENOMEM;
64 }
65/* printk("p3_ioremap(paddr=0x%08lx, psize=0x%08lx, prot=0x%08lx)=0x%08lx\n",
66 paddrbase, psize, prot.pgprot, cf_io_base);*/
67
68 /* XXX : do we need attribute and common-memory area also? */
69
70 return 0;
71}
72#endif
73
74static int __init cf_init_default(void)
75{
76/* You must have enabled the card, and set the level interrupt
77 * before reaching this point. Possibly in boot ROM or boot loader.
78 */
79#if defined(CONFIG_IDE) && defined(CONFIG_CPU_SH4)
80 allocate_cf_area();
81#endif
82#if defined(CONFIG_SH_UNKNOWN)
83 /* This should be done in each board's init_xxx_irq. */
84 make_imask_irq(14);
85 disable_irq(14);
86#endif
87 return 0;
88}
89
90#if defined(CONFIG_SH_SOLUTION_ENGINE)
91#include <asm/se/se.h>
92
93/*
94 * SolutionEngine
95 *
96 * 0xB8400000 : Common Memory
97 * 0xB8500000 : Attribute
98 * 0xB8600000 : I/O
99 */
100
101static int __init cf_init_se(void)
102{
103 if ((ctrl_inw(MRSHPC_CSR) & 0x000c) != 0)
104 return 0; /* Not detected */
105
106 if ((ctrl_inw(MRSHPC_CSR) & 0x0080) == 0) {
107 ctrl_outw(0x0674, MRSHPC_CPWCR); /* Card Vcc is 3.3v? */
108 } else {
109 ctrl_outw(0x0678, MRSHPC_CPWCR); /* Card Vcc is 5V */
110 }
111
112 /*
113 * PC-Card window open
114 * flag == COMMON/ATTRIBUTE/IO
115 */
116 /* common window open */
117 ctrl_outw(0x8a84, MRSHPC_MW0CR1);/* window 0xb8400000 */
118 if((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0)
119 /* common mode & bus width 16bit SWAP = 1*/
120 ctrl_outw(0x0b00, MRSHPC_MW0CR2);
121 else
122 /* common mode & bus width 16bit SWAP = 0*/
123 ctrl_outw(0x0300, MRSHPC_MW0CR2);
124
125 /* attribute window open */
126 ctrl_outw(0x8a85, MRSHPC_MW1CR1);/* window 0xb8500000 */
127 if ((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0)
128 /* attribute mode & bus width 16bit SWAP = 1*/
129 ctrl_outw(0x0a00, MRSHPC_MW1CR2);
130 else
131 /* attribute mode & bus width 16bit SWAP = 0*/
132 ctrl_outw(0x0200, MRSHPC_MW1CR2);
133
134 /* I/O window open */
135 ctrl_outw(0x8a86, MRSHPC_IOWCR1);/* I/O window 0xb8600000 */
136 ctrl_outw(0x0008, MRSHPC_CDCR); /* I/O card mode */
137 if ((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0)
138 ctrl_outw(0x0a00, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 1*/
139 else
140 ctrl_outw(0x0200, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 0*/
141
142 ctrl_outw(0x2000, MRSHPC_ICR);
143 ctrl_outb(0x00, PA_MRSHPC_MW2 + 0x206);
144 ctrl_outb(0x42, PA_MRSHPC_MW2 + 0x200);
145 return 0;
146}
147#endif
148
149int __init cf_init(void)
150{
151#if defined(CONFIG_SH_SOLUTION_ENGINE)
152 if (MACH_SE)
153 return cf_init_se();
154#endif
155 return cf_init_default();
156}
157
158__initcall (cf_init);
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
new file mode 100644
index 000000000000..cd43714df61a
--- /dev/null
+++ b/arch/sh/kernel/cpu/Makefile
@@ -0,0 +1,16 @@
1#
2# Makefile for the Linux/SuperH CPU-specifc backends.
3#
4
5obj-y := irq_ipr.o irq_imask.o init.o bus.o
6
7obj-$(CONFIG_CPU_SH2) += sh2/
8obj-$(CONFIG_CPU_SH3) += sh3/
9obj-$(CONFIG_CPU_SH4) += sh4/
10
11obj-$(CONFIG_SH_RTC) += rtc.o
12obj-$(CONFIG_UBC_WAKEUP) += ubc.o
13obj-$(CONFIG_SH_ADC) += adc.o
14
15USE_STANDARD_AS_RULE := true
16
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c
new file mode 100644
index 000000000000..da3d6877f93d
--- /dev/null
+++ b/arch/sh/kernel/cpu/adc.c
@@ -0,0 +1,36 @@
1/*
2 * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
3 *
4 * Copyright (C) 2004 Andriy Skulysh <askulysh@image.kiev.ua>
5 */
6
7#include <linux/module.h>
8#include <asm/adc.h>
9#include <asm/io.h>
10
11
12int adc_single(unsigned int channel)
13{
14 int off;
15 unsigned char csr;
16
17 if (channel >= 8) return -1;
18
19 off = (channel & 0x03) << 2;
20
21 csr = ctrl_inb(ADCSR);
22 csr = channel | ADCSR_ADST | ADCSR_CKS;
23 ctrl_outb(csr, ADCSR);
24
25 do {
26 csr = ctrl_inb(ADCSR);
27 } while ((csr & ADCSR_ADF) == 0);
28
29 csr &= ~(ADCSR_ADF | ADCSR_ADST);
30 ctrl_outb(csr, ADCSR);
31
32 return (((ctrl_inb(ADDRAH + off) << 8) |
33 ctrl_inb(ADDRAL + off)) >> 6);
34}
35
36EXPORT_SYMBOL(adc_single);
diff --git a/arch/sh/kernel/cpu/bus.c b/arch/sh/kernel/cpu/bus.c
new file mode 100644
index 000000000000..ace82f4b4a59
--- /dev/null
+++ b/arch/sh/kernel/cpu/bus.c
@@ -0,0 +1,195 @@
1/*
2 * arch/sh/kernel/cpu/bus.c
3 *
4 * Virtual bus for SuperH.
5 *
6 * Copyright (C) 2004 Paul Mundt
7 *
8 * Shamelessly cloned from arch/arm/mach-omap/bus.c, which was written
9 * by:
10 *
11 * Copyright (C) 2003 - 2004 Nokia Corporation
12 * Written by Tony Lindgren <tony@atomide.com>
13 * Portions of code based on sa1111.c.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20#include <linux/kernel.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <asm/bus-sh.h>
25
26static int sh_bus_match(struct device *dev, struct device_driver *drv)
27{
28 struct sh_driver *shdrv = to_sh_driver(drv);
29 struct sh_dev *shdev = to_sh_dev(dev);
30
31 return shdev->dev_id == shdrv->dev_id;
32}
33
34static int sh_bus_suspend(struct device *dev, u32 state)
35{
36 struct sh_dev *shdev = to_sh_dev(dev);
37 struct sh_driver *shdrv = to_sh_driver(dev->driver);
38
39 if (shdrv && shdrv->suspend)
40 return shdrv->suspend(shdev, state);
41
42 return 0;
43}
44
45static int sh_bus_resume(struct device *dev)
46{
47 struct sh_dev *shdev = to_sh_dev(dev);
48 struct sh_driver *shdrv = to_sh_driver(dev->driver);
49
50 if (shdrv && shdrv->resume)
51 return shdrv->resume(shdev);
52
53 return 0;
54}
55
56static struct device sh_bus_devices[SH_NR_BUSES] = {
57 {
58 .bus_id = SH_BUS_NAME_VIRT,
59 },
60};
61
62struct bus_type sh_bus_types[SH_NR_BUSES] = {
63 {
64 .name = SH_BUS_NAME_VIRT,
65 .match = sh_bus_match,
66 .suspend = sh_bus_suspend,
67 .resume = sh_bus_resume,
68 },
69};
70
71static int sh_device_probe(struct device *dev)
72{
73 struct sh_dev *shdev = to_sh_dev(dev);
74 struct sh_driver *shdrv = to_sh_driver(dev->driver);
75
76 if (shdrv && shdrv->probe)
77 return shdrv->probe(shdev);
78
79 return -ENODEV;
80}
81
82static int sh_device_remove(struct device *dev)
83{
84 struct sh_dev *shdev = to_sh_dev(dev);
85 struct sh_driver *shdrv = to_sh_driver(dev->driver);
86
87 if (shdrv && shdrv->remove)
88 return shdrv->remove(shdev);
89
90 return 0;
91}
92
93int sh_device_register(struct sh_dev *dev)
94{
95 if (!dev)
96 return -EINVAL;
97
98 if (dev->bus_id < 0 || dev->bus_id >= SH_NR_BUSES) {
99 printk(KERN_ERR "%s: bus_id invalid: %s bus: %d\n",
100 __FUNCTION__, dev->name, dev->bus_id);
101 return -EINVAL;
102 }
103
104 dev->dev.parent = &sh_bus_devices[dev->bus_id];
105 dev->dev.bus = &sh_bus_types[dev->bus_id];
106
107 /* This is needed for USB OHCI to work */
108 if (dev->dma_mask)
109 dev->dev.dma_mask = dev->dma_mask;
110
111 snprintf(dev->dev.bus_id, BUS_ID_SIZE, "%s%u",
112 dev->name, dev->dev_id);
113
114 printk(KERN_INFO "Registering SH device '%s'. Parent at %s\n",
115 dev->dev.bus_id, dev->dev.parent->bus_id);
116
117 return device_register(&dev->dev);
118}
119
120void sh_device_unregister(struct sh_dev *dev)
121{
122 device_unregister(&dev->dev);
123}
124
125int sh_driver_register(struct sh_driver *drv)
126{
127 if (!drv)
128 return -EINVAL;
129
130 if (drv->bus_id < 0 || drv->bus_id >= SH_NR_BUSES) {
131 printk(KERN_ERR "%s: bus_id invalid: bus: %d device %d\n",
132 __FUNCTION__, drv->bus_id, drv->dev_id);
133 return -EINVAL;
134 }
135
136 drv->drv.probe = sh_device_probe;
137 drv->drv.remove = sh_device_remove;
138 drv->drv.bus = &sh_bus_types[drv->bus_id];
139
140 return driver_register(&drv->drv);
141}
142
143void sh_driver_unregister(struct sh_driver *drv)
144{
145 driver_unregister(&drv->drv);
146}
147
148static int __init sh_bus_init(void)
149{
150 int i, ret = 0;
151
152 for (i = 0; i < SH_NR_BUSES; i++) {
153 ret = device_register(&sh_bus_devices[i]);
154 if (ret != 0) {
155 printk(KERN_ERR "Unable to register bus device %s\n",
156 sh_bus_devices[i].bus_id);
157 continue;
158 }
159
160 ret = bus_register(&sh_bus_types[i]);
161 if (ret != 0) {
162 printk(KERN_ERR "Unable to register bus %s\n",
163 sh_bus_types[i].name);
164 device_unregister(&sh_bus_devices[i]);
165 }
166 }
167
168 printk(KERN_INFO "SH Virtual Bus initialized\n");
169
170 return ret;
171}
172
173static void __exit sh_bus_exit(void)
174{
175 int i;
176
177 for (i = 0; i < SH_NR_BUSES; i++) {
178 bus_unregister(&sh_bus_types[i]);
179 device_unregister(&sh_bus_devices[i]);
180 }
181}
182
183module_init(sh_bus_init);
184module_exit(sh_bus_exit);
185
186MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
187MODULE_DESCRIPTION("SH Virtual Bus");
188MODULE_LICENSE("GPL");
189
190EXPORT_SYMBOL(sh_bus_types);
191EXPORT_SYMBOL(sh_device_register);
192EXPORT_SYMBOL(sh_device_unregister);
193EXPORT_SYMBOL(sh_driver_register);
194EXPORT_SYMBOL(sh_driver_unregister);
195
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
new file mode 100644
index 000000000000..cf94e8ef17c5
--- /dev/null
+++ b/arch/sh/kernel/cpu/init.c
@@ -0,0 +1,222 @@
1/*
2 * arch/sh/kernel/cpu/init.c
3 *
4 * CPU init code
5 *
6 * Copyright (C) 2002, 2003 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <asm/processor.h>
15#include <asm/uaccess.h>
16#include <asm/system.h>
17#include <asm/cacheflush.h>
18#include <asm/cache.h>
19#include <asm/io.h>
20
21extern void detect_cpu_and_cache_system(void);
22
23/*
24 * Generic wrapper for command line arguments to disable on-chip
25 * peripherals (nofpu, nodsp, and so forth).
26 */
27#define onchip_setup(x) \
28static int x##_disabled __initdata = 0; \
29 \
30static int __init x##_setup(char *opts) \
31{ \
32 x##_disabled = 1; \
33 return 0; \
34} \
35__setup("no" __stringify(x), x##_setup);
36
37onchip_setup(fpu);
38onchip_setup(dsp);
39
40/*
41 * Generic first-level cache init
42 */
43static void __init cache_init(void)
44{
45 unsigned long ccr, flags;
46
47 if (cpu_data->type == CPU_SH_NONE)
48 panic("Unknown CPU");
49
50 jump_to_P2();
51 ccr = ctrl_inl(CCR);
52
53 /*
54 * If the cache is already enabled .. flush it.
55 */
56 if (ccr & CCR_CACHE_ENABLE) {
57 unsigned long ways, waysize, addrstart;
58
59 waysize = cpu_data->dcache.sets;
60
61 /*
62 * If the OC is already in RAM mode, we only have
63 * half of the entries to flush..
64 */
65 if (ccr & CCR_CACHE_ORA)
66 waysize >>= 1;
67
68 waysize <<= cpu_data->dcache.entry_shift;
69
70#ifdef CCR_CACHE_EMODE
71 /* If EMODE is not set, we only have 1 way to flush. */
72 if (!(ccr & CCR_CACHE_EMODE))
73 ways = 1;
74 else
75#endif
76 ways = cpu_data->dcache.ways;
77
78 addrstart = CACHE_OC_ADDRESS_ARRAY;
79 do {
80 unsigned long addr;
81
82 for (addr = addrstart;
83 addr < addrstart + waysize;
84 addr += cpu_data->dcache.linesz)
85 ctrl_outl(0, addr);
86
87 addrstart += cpu_data->dcache.way_incr;
88 } while (--ways);
89 }
90
91 /*
92 * Default CCR values .. enable the caches
93 * and invalidate them immediately..
94 */
95 flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
96
97#ifdef CCR_CACHE_EMODE
98 /* Force EMODE if possible */
99 if (cpu_data->dcache.ways > 1)
100 flags |= CCR_CACHE_EMODE;
101#endif
102
103#ifdef CONFIG_SH_WRITETHROUGH
104 /* Turn on Write-through caching */
105 flags |= CCR_CACHE_WT;
106#else
107 /* .. or default to Write-back */
108 flags |= CCR_CACHE_CB;
109#endif
110
111#ifdef CONFIG_SH_OCRAM
112 /* Turn on OCRAM -- halve the OC */
113 flags |= CCR_CACHE_ORA;
114 cpu_data->dcache.sets >>= 1;
115#endif
116
117 ctrl_outl(flags, CCR);
118 back_to_P1();
119}
120
121#ifdef CONFIG_SH_DSP
122static void __init release_dsp(void)
123{
124 unsigned long sr;
125
126 /* Clear SR.DSP bit */
127 __asm__ __volatile__ (
128 "stc\tsr, %0\n\t"
129 "and\t%1, %0\n\t"
130 "ldc\t%0, sr\n\t"
131 : "=&r" (sr)
132 : "r" (~SR_DSP)
133 );
134}
135
136static void __init dsp_init(void)
137{
138 unsigned long sr;
139
140 /*
141 * Set the SR.DSP bit, wait for one instruction, and then read
142 * back the SR value.
143 */
144 __asm__ __volatile__ (
145 "stc\tsr, %0\n\t"
146 "or\t%1, %0\n\t"
147 "ldc\t%0, sr\n\t"
148 "nop\n\t"
149 "stc\tsr, %0\n\t"
150 : "=&r" (sr)
151 : "r" (SR_DSP)
152 );
153
154 /* If the DSP bit is still set, this CPU has a DSP */
155 if (sr & SR_DSP)
156 cpu_data->flags |= CPU_HAS_DSP;
157
158 /* Now that we've determined the DSP status, clear the DSP bit. */
159 release_dsp();
160}
161#endif /* CONFIG_SH_DSP */
162
163/**
164 * sh_cpu_init
165 *
166 * This is our initial entry point for each CPU, and is invoked on the boot
167 * CPU prior to calling start_kernel(). For SMP, a combination of this and
168 * start_secondary() will bring up each processor to a ready state prior
169 * to hand forking the idle loop.
170 *
171 * We do all of the basic processor init here, including setting up the
172 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
173 * hit (and subsequently platform_setup()) things like determining the
174 * CPU subtype and initial configuration will all be done.
175 *
176 * Each processor family is still responsible for doing its own probing
177 * and cache configuration in detect_cpu_and_cache_system().
178 */
179asmlinkage void __init sh_cpu_init(void)
180{
181 /* First, probe the CPU */
182 detect_cpu_and_cache_system();
183
184 /* Init the cache */
185 cache_init();
186
187 /* Disable the FPU */
188 if (fpu_disabled) {
189 printk("FPU Disabled\n");
190 cpu_data->flags &= ~CPU_HAS_FPU;
191 disable_fpu();
192 }
193
194 /* FPU initialization */
195 if ((cpu_data->flags & CPU_HAS_FPU)) {
196 clear_thread_flag(TIF_USEDFPU);
197 clear_used_math();
198 }
199
200#ifdef CONFIG_SH_DSP
201 /* Probe for DSP */
202 dsp_init();
203
204 /* Disable the DSP */
205 if (dsp_disabled) {
206 printk("DSP Disabled\n");
207 cpu_data->flags &= ~CPU_HAS_DSP;
208 release_dsp();
209 }
210#endif
211
212#ifdef CONFIG_UBC_WAKEUP
213 /*
214 * Some brain-damaged loaders decided it would be a good idea to put
215 * the UBC to sleep. This causes some issues when it comes to things
216 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
217 * we wake it up and hope that all is well.
218 */
219 ubc_wakeup();
220#endif
221}
222
diff --git a/arch/sh/kernel/cpu/irq_imask.c b/arch/sh/kernel/cpu/irq_imask.c
new file mode 100644
index 000000000000..f76901e732fb
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq_imask.c
@@ -0,0 +1,116 @@
1/* $Id: irq_imask.c,v 1.1.2.1 2002/11/17 10:53:43 mrbrown Exp $
2 *
3 * linux/arch/sh/kernel/irq_imask.c
4 *
5 * Copyright (C) 1999, 2000 Niibe Yutaka
6 *
7 * Simple interrupt handling using IMASK of SR register.
8 *
9 */
10
11/* NOTE: Will not work on level 15 */
12
13
14#include <linux/ptrace.h>
15#include <linux/errno.h>
16#include <linux/kernel_stat.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/interrupt.h>
20#include <linux/init.h>
21#include <linux/bitops.h>
22
23#include <asm/system.h>
24#include <asm/irq.h>
25
26#include <linux/spinlock.h>
27#include <linux/cache.h>
28#include <linux/irq.h>
29
30/* Bitmap of IRQ masked */
31static unsigned long imask_mask = 0x7fff;
32static int interrupt_priority = 0;
33
34static void enable_imask_irq(unsigned int irq);
35static void disable_imask_irq(unsigned int irq);
36static void shutdown_imask_irq(unsigned int irq);
37static void mask_and_ack_imask(unsigned int);
38static void end_imask_irq(unsigned int irq);
39
40#define IMASK_PRIORITY 15
41
42static unsigned int startup_imask_irq(unsigned int irq)
43{
44 /* Nothing to do */
45 return 0; /* never anything pending */
46}
47
48static struct hw_interrupt_type imask_irq_type = {
49 "SR.IMASK",
50 startup_imask_irq,
51 shutdown_imask_irq,
52 enable_imask_irq,
53 disable_imask_irq,
54 mask_and_ack_imask,
55 end_imask_irq
56};
57
58void static inline set_interrupt_registers(int ip)
59{
60 unsigned long __dummy;
61
62 asm volatile("ldc %2, r6_bank\n\t"
63 "stc sr, %0\n\t"
64 "and #0xf0, %0\n\t"
65 "shlr2 %0\n\t"
66 "cmp/eq #0x3c, %0\n\t"
67 "bt/s 1f ! CLI-ed\n\t"
68 " stc sr, %0\n\t"
69 "and %1, %0\n\t"
70 "or %2, %0\n\t"
71 "ldc %0, sr\n"
72 "1:"
73 : "=&z" (__dummy)
74 : "r" (~0xf0), "r" (ip << 4)
75 : "t");
76}
77
78static void disable_imask_irq(unsigned int irq)
79{
80 clear_bit(irq, &imask_mask);
81 if (interrupt_priority < IMASK_PRIORITY - irq)
82 interrupt_priority = IMASK_PRIORITY - irq;
83
84 set_interrupt_registers(interrupt_priority);
85}
86
87static void enable_imask_irq(unsigned int irq)
88{
89 set_bit(irq, &imask_mask);
90 interrupt_priority = IMASK_PRIORITY - ffz(imask_mask);
91
92 set_interrupt_registers(interrupt_priority);
93}
94
95static void mask_and_ack_imask(unsigned int irq)
96{
97 disable_imask_irq(irq);
98}
99
100static void end_imask_irq(unsigned int irq)
101{
102 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
103 enable_imask_irq(irq);
104}
105
106static void shutdown_imask_irq(unsigned int irq)
107{
108 /* Nothing to do */
109}
110
111void make_imask_irq(unsigned int irq)
112{
113 disable_irq_nosync(irq);
114 irq_desc[irq].handler = &imask_irq_type;
115 enable_irq(irq);
116}
diff --git a/arch/sh/kernel/cpu/irq_ipr.c b/arch/sh/kernel/cpu/irq_ipr.c
new file mode 100644
index 000000000000..7ea3d2d030e5
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq_ipr.c
@@ -0,0 +1,339 @@
1/* $Id: irq_ipr.c,v 1.1.2.1 2002/11/17 10:53:43 mrbrown Exp $
2 *
3 * linux/arch/sh/kernel/irq_ipr.c
4 *
5 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
6 * Copyright (C) 2000 Kazumoto Kojima
7 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
8 *
9 * Interrupt handling for IPR-based IRQ.
10 *
11 * Supported system:
12 * On-chip supporting modules (TMU, RTC, etc.).
13 * On-chip supporting modules for SH7709/SH7709A/SH7729/SH7300.
14 * Hitachi SolutionEngine external I/O:
15 * MS7709SE01, MS7709ASE01, and MS7750SE01
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/init.h>
21#include <linux/irq.h>
22#include <linux/module.h>
23
24#include <asm/system.h>
25#include <asm/io.h>
26#include <asm/machvec.h>
27
28struct ipr_data {
29 unsigned int addr; /* Address of Interrupt Priority Register */
30 int shift; /* Shifts of the 16-bit data */
31 int priority; /* The priority */
32};
33static struct ipr_data ipr_data[NR_IRQS];
34
35static void enable_ipr_irq(unsigned int irq);
36static void disable_ipr_irq(unsigned int irq);
37
38/* shutdown is same as "disable" */
39#define shutdown_ipr_irq disable_ipr_irq
40
41static void mask_and_ack_ipr(unsigned int);
42static void end_ipr_irq(unsigned int irq);
43
44static unsigned int startup_ipr_irq(unsigned int irq)
45{
46 enable_ipr_irq(irq);
47 return 0; /* never anything pending */
48}
49
50static struct hw_interrupt_type ipr_irq_type = {
51 "IPR-IRQ",
52 startup_ipr_irq,
53 shutdown_ipr_irq,
54 enable_ipr_irq,
55 disable_ipr_irq,
56 mask_and_ack_ipr,
57 end_ipr_irq
58};
59
60static void disable_ipr_irq(unsigned int irq)
61{
62 unsigned long val, flags;
63 unsigned int addr = ipr_data[irq].addr;
64 unsigned short mask = 0xffff ^ (0x0f << ipr_data[irq].shift);
65
66 /* Set the priority in IPR to 0 */
67 local_irq_save(flags);
68 val = ctrl_inw(addr);
69 val &= mask;
70 ctrl_outw(val, addr);
71 local_irq_restore(flags);
72}
73
74static void enable_ipr_irq(unsigned int irq)
75{
76 unsigned long val, flags;
77 unsigned int addr = ipr_data[irq].addr;
78 int priority = ipr_data[irq].priority;
79 unsigned short value = (priority << ipr_data[irq].shift);
80
81 /* Set priority in IPR back to original value */
82 local_irq_save(flags);
83 val = ctrl_inw(addr);
84 val |= value;
85 ctrl_outw(val, addr);
86 local_irq_restore(flags);
87}
88
89static void mask_and_ack_ipr(unsigned int irq)
90{
91 disable_ipr_irq(irq);
92
93#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
94 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
95 /* This is needed when we use edge triggered setting */
96 /* XXX: Is it really needed? */
97 if (IRQ0_IRQ <= irq && irq <= IRQ5_IRQ) {
98 /* Clear external interrupt request */
99 int a = ctrl_inb(INTC_IRR0);
100 a &= ~(1 << (irq - IRQ0_IRQ));
101 ctrl_outb(a, INTC_IRR0);
102 }
103#endif
104}
105
106static void end_ipr_irq(unsigned int irq)
107{
108 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
109 enable_ipr_irq(irq);
110}
111
112void make_ipr_irq(unsigned int irq, unsigned int addr, int pos, int priority)
113{
114 disable_irq_nosync(irq);
115 ipr_data[irq].addr = addr;
116 ipr_data[irq].shift = pos*4; /* POSition (0-3) x 4 means shift */
117 ipr_data[irq].priority = priority;
118
119 irq_desc[irq].handler = &ipr_irq_type;
120 disable_ipr_irq(irq);
121}
122
123#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
124 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
125 defined(CONFIG_CPU_SUBTYPE_SH7709)
126static unsigned char pint_map[256];
127static unsigned long portcr_mask = 0;
128
129static void enable_pint_irq(unsigned int irq);
130static void disable_pint_irq(unsigned int irq);
131
132/* shutdown is same as "disable" */
133#define shutdown_pint_irq disable_pint_irq
134
135static void mask_and_ack_pint(unsigned int);
136static void end_pint_irq(unsigned int irq);
137
138static unsigned int startup_pint_irq(unsigned int irq)
139{
140 enable_pint_irq(irq);
141 return 0; /* never anything pending */
142}
143
144static struct hw_interrupt_type pint_irq_type = {
145 "PINT-IRQ",
146 startup_pint_irq,
147 shutdown_pint_irq,
148 enable_pint_irq,
149 disable_pint_irq,
150 mask_and_ack_pint,
151 end_pint_irq
152};
153
154static void disable_pint_irq(unsigned int irq)
155{
156 unsigned long val, flags;
157
158 local_irq_save(flags);
159 val = ctrl_inw(INTC_INTER);
160 val &= ~(1 << (irq - PINT_IRQ_BASE));
161 ctrl_outw(val, INTC_INTER); /* disable PINTn */
162 portcr_mask &= ~(3 << (irq - PINT_IRQ_BASE)*2);
163 local_irq_restore(flags);
164}
165
166static void enable_pint_irq(unsigned int irq)
167{
168 unsigned long val, flags;
169
170 local_irq_save(flags);
171 val = ctrl_inw(INTC_INTER);
172 val |= 1 << (irq - PINT_IRQ_BASE);
173 ctrl_outw(val, INTC_INTER); /* enable PINTn */
174 portcr_mask |= 3 << (irq - PINT_IRQ_BASE)*2;
175 local_irq_restore(flags);
176}
177
178static void mask_and_ack_pint(unsigned int irq)
179{
180 disable_pint_irq(irq);
181}
182
183static void end_pint_irq(unsigned int irq)
184{
185 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
186 enable_pint_irq(irq);
187}
188
189void make_pint_irq(unsigned int irq)
190{
191 disable_irq_nosync(irq);
192 irq_desc[irq].handler = &pint_irq_type;
193 disable_pint_irq(irq);
194}
195#endif
196
197void __init init_IRQ(void)
198{
199#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
200 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
201 defined(CONFIG_CPU_SUBTYPE_SH7709)
202 int i;
203#endif
204
205 make_ipr_irq(TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY);
206 make_ipr_irq(TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY);
207#if defined(CONFIG_SH_RTC)
208 make_ipr_irq(RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY);
209#endif
210
211#ifdef SCI_ERI_IRQ
212 make_ipr_irq(SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
213 make_ipr_irq(SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
214 make_ipr_irq(SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
215#endif
216
217#ifdef SCIF1_ERI_IRQ
218 make_ipr_irq(SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
219 make_ipr_irq(SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
220 make_ipr_irq(SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
221 make_ipr_irq(SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
222#endif
223
224#if defined(CONFIG_CPU_SUBTYPE_SH7300)
225 make_ipr_irq(SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY);
226 make_ipr_irq(DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY);
227 make_ipr_irq(DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY);
228 make_ipr_irq(VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY);
229#endif
230
231#ifdef SCIF_ERI_IRQ
232 make_ipr_irq(SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
233 make_ipr_irq(SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
234 make_ipr_irq(SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
235 make_ipr_irq(SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
236#endif
237
238#ifdef IRDA_ERI_IRQ
239 make_ipr_irq(IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
240 make_ipr_irq(IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
241 make_ipr_irq(IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
242 make_ipr_irq(IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
243#endif
244
245#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
246 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
247 /*
248 * Initialize the Interrupt Controller (INTC)
249 * registers to their power on values
250 */
251
252 /*
253 * Enable external irq (INTC IRQ mode).
254 * You should set corresponding bits of PFC to "00"
255 * to enable these interrupts.
256 */
257 make_ipr_irq(IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY);
258 make_ipr_irq(IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY);
259 make_ipr_irq(IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY);
260 make_ipr_irq(IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY);
261 make_ipr_irq(IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY);
262 make_ipr_irq(IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY);
263#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
264 make_ipr_irq(PINT0_IRQ, PINT0_IPR_ADDR, PINT0_IPR_POS, PINT0_PRIORITY);
265 make_ipr_irq(PINT8_IRQ, PINT8_IPR_ADDR, PINT8_IPR_POS, PINT8_PRIORITY);
266 enable_ipr_irq(PINT0_IRQ);
267 enable_ipr_irq(PINT8_IRQ);
268
269 for(i = 0; i < 16; i++)
270 make_pint_irq(PINT_IRQ_BASE + i);
271 for(i = 0; i < 256; i++)
272 {
273 if(i & 1) pint_map[i] = 0;
274 else if(i & 2) pint_map[i] = 1;
275 else if(i & 4) pint_map[i] = 2;
276 else if(i & 8) pint_map[i] = 3;
277 else if(i & 0x10) pint_map[i] = 4;
278 else if(i & 0x20) pint_map[i] = 5;
279 else if(i & 0x40) pint_map[i] = 6;
280 else if(i & 0x80) pint_map[i] = 7;
281 }
282#endif /* !CONFIG_CPU_SUBTYPE_SH7300 */
283#endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 || CONFIG_CPU_SUBTYPE_SH7300*/
284
285#ifdef CONFIG_CPU_SUBTYPE_ST40
286 init_IRQ_intc2();
287#endif
288
289 /* Perform the machine specific initialisation */
290 if (sh_mv.mv_init_irq != NULL) {
291 sh_mv.mv_init_irq();
292 }
293}
294#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
295 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
296int ipr_irq_demux(int irq)
297{
298#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
299 unsigned long creg, dreg, d, sav;
300
301 if(irq == PINT0_IRQ)
302 {
303#if defined(CONFIG_CPU_SUBTYPE_SH7707)
304 creg = PORT_PACR;
305 dreg = PORT_PADR;
306#else
307 creg = PORT_PCCR;
308 dreg = PORT_PCDR;
309#endif
310 sav = ctrl_inw(creg);
311 ctrl_outw(sav | portcr_mask, creg);
312 d = (~ctrl_inb(dreg) ^ ctrl_inw(INTC_ICR2)) & ctrl_inw(INTC_INTER) & 0xff;
313 ctrl_outw(sav, creg);
314 if(d == 0) return irq;
315 return PINT_IRQ_BASE + pint_map[d];
316 }
317 else if(irq == PINT8_IRQ)
318 {
319#if defined(CONFIG_CPU_SUBTYPE_SH7707)
320 creg = PORT_PBCR;
321 dreg = PORT_PBDR;
322#else
323 creg = PORT_PFCR;
324 dreg = PORT_PFDR;
325#endif
326 sav = ctrl_inw(creg);
327 ctrl_outw(sav | (portcr_mask >> 16), creg);
328 d = (~ctrl_inb(dreg) ^ (ctrl_inw(INTC_ICR2) >> 8)) & (ctrl_inw(INTC_INTER) >> 8) & 0xff;
329 ctrl_outw(sav, creg);
330 if(d == 0) return irq;
331 return PINT_IRQ_BASE + 8 + pint_map[d];
332 }
333#endif
334 return irq;
335}
336#endif
337
338EXPORT_SYMBOL(make_ipr_irq);
339
diff --git a/arch/sh/kernel/cpu/rtc.c b/arch/sh/kernel/cpu/rtc.c
new file mode 100644
index 000000000000..f8361f5e788b
--- /dev/null
+++ b/arch/sh/kernel/cpu/rtc.c
@@ -0,0 +1,136 @@
1/*
2 * linux/arch/sh/kernel/rtc.c -- SH3 / SH4 on-chip RTC support
3 *
4 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
5 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
6 */
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/time.h>
12
13#include <asm/io.h>
14#include <asm/rtc.h>
15
16#ifndef BCD_TO_BIN
17#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
18#endif
19
20#ifndef BIN_TO_BCD
21#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
22#endif
23
24void sh_rtc_gettimeofday(struct timespec *ts)
25{
26 unsigned int sec128, sec, sec2, min, hr, wk, day, mon, yr, yr100, cf_bit;
27 unsigned long flags;
28
29 again:
30 do {
31 local_irq_save(flags);
32 ctrl_outb(0, RCR1); /* Clear CF-bit */
33 sec128 = ctrl_inb(R64CNT);
34 sec = ctrl_inb(RSECCNT);
35 min = ctrl_inb(RMINCNT);
36 hr = ctrl_inb(RHRCNT);
37 wk = ctrl_inb(RWKCNT);
38 day = ctrl_inb(RDAYCNT);
39 mon = ctrl_inb(RMONCNT);
40#if defined(CONFIG_CPU_SH4)
41 yr = ctrl_inw(RYRCNT);
42 yr100 = (yr >> 8);
43 yr &= 0xff;
44#else
45 yr = ctrl_inb(RYRCNT);
46 yr100 = (yr == 0x99) ? 0x19 : 0x20;
47#endif
48 sec2 = ctrl_inb(R64CNT);
49 cf_bit = ctrl_inb(RCR1) & RCR1_CF;
50 local_irq_restore(flags);
51 } while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0);
52
53 BCD_TO_BIN(yr100);
54 BCD_TO_BIN(yr);
55 BCD_TO_BIN(mon);
56 BCD_TO_BIN(day);
57 BCD_TO_BIN(hr);
58 BCD_TO_BIN(min);
59 BCD_TO_BIN(sec);
60
61 if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
62 hr > 23 || min > 59 || sec > 59) {
63 printk(KERN_ERR
64 "SH RTC: invalid value, resetting to 1 Jan 2000\n");
65 local_irq_save(flags);
66 ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
67 ctrl_outb(0, RSECCNT);
68 ctrl_outb(0, RMINCNT);
69 ctrl_outb(0, RHRCNT);
70 ctrl_outb(6, RWKCNT);
71 ctrl_outb(1, RDAYCNT);
72 ctrl_outb(1, RMONCNT);
73#if defined(CONFIG_CPU_SH4)
74 ctrl_outw(0x2000, RYRCNT);
75#else
76 ctrl_outb(0, RYRCNT);
77#endif
78 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
79 goto again;
80 }
81
82#if RTC_BIT_INVERTED != 0
83 if ((sec128 & RTC_BIT_INVERTED))
84 sec--;
85#endif
86
87 ts->tv_sec = mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
88 ts->tv_nsec = ((sec128 * 1000000) / 128) * 1000;
89}
90
91/*
92 * Changed to only care about tv_sec, and not the full timespec struct
93 * (i.e. tv_nsec). It can easily be switched to timespec for future cpus
94 * that support setting usec or nsec RTC values.
95 */
96int sh_rtc_settimeofday(const time_t secs)
97{
98 int retval = 0;
99 int real_seconds, real_minutes, cmos_minutes;
100 unsigned long flags;
101
102 local_irq_save(flags);
103 ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
104
105 cmos_minutes = ctrl_inb(RMINCNT);
106 BCD_TO_BIN(cmos_minutes);
107
108 /*
109 * since we're only adjusting minutes and seconds,
110 * don't interfere with hour overflow. This avoids
111 * messing with unknown time zones but requires your
112 * RTC not to be off by more than 15 minutes
113 */
114 real_seconds = secs % 60;
115 real_minutes = secs / 60;
116 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
117 real_minutes += 30; /* correct for half hour time zone */
118 real_minutes %= 60;
119
120 if (abs(real_minutes - cmos_minutes) < 30) {
121 BIN_TO_BCD(real_seconds);
122 BIN_TO_BCD(real_minutes);
123 ctrl_outb(real_seconds, RSECCNT);
124 ctrl_outb(real_minutes, RMINCNT);
125 } else {
126 printk(KERN_WARNING
127 "set_rtc_time: can't update from %d to %d\n",
128 cmos_minutes, real_minutes);
129 retval = -1;
130 }
131
132 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
133 local_irq_restore(flags);
134
135 return retval;
136}
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile
new file mode 100644
index 000000000000..389353fba608
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Linux/SuperH SH-2 backends.
3#
4
5obj-y := probe.o
6
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
new file mode 100644
index 000000000000..f17a2a0d588e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -0,0 +1,39 @@
1/*
2 * arch/sh/kernel/cpu/sh2/probe.c
3 *
4 * CPU Subtype Probing for SH-2.
5 *
6 * Copyright (C) 2002 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13
14#include <linux/init.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17
18int __init detect_cpu_and_cache_system(void)
19{
20 /*
21 * For now, assume SH7604 .. fix this later.
22 */
23 cpu_data->type = CPU_SH7604;
24 cpu_data->dcache.ways = 4;
25 cpu_data->dcache.way_shift = 6;
26 cpu_data->dcache.sets = 64;
27 cpu_data->dcache.entry_shift = 4;
28 cpu_data->dcache.linesz = L1_CACHE_BYTES;
29 cpu_data->dcache.flags = 0;
30
31 /*
32 * SH-2 doesn't have separate caches
33 */
34 cpu_data->dcache.flags |= SH_CACHE_COMBINED;
35 cpu_data->icache = cpu_data->dcache;
36
37 return 0;
38}
39
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
new file mode 100644
index 000000000000..a64532e4dc63
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Linux/SuperH SH-3 backends.
3#
4
5obj-y := ex.o probe.o
6
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
new file mode 100644
index 000000000000..966c0858b714
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -0,0 +1,199 @@
1/*
2 * arch/sh/kernel/cpu/sh3/ex.S
3 *
4 * The SH-3 exception vector table.
5
6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 */
14#include <linux/linkage.h>
15#include <linux/config.h>
16
17 .align 2
18 .data
19
20ENTRY(exception_handling_table)
21 .long exception_error /* 000 */
22 .long exception_error
23#if defined(CONFIG_MMU)
24 .long tlb_miss_load /* 040 */
25 .long tlb_miss_store
26 .long initial_page_write
27 .long tlb_protection_violation_load
28 .long tlb_protection_violation_store
29 .long address_error_load
30 .long address_error_store /* 100 */
31#else
32 .long exception_error ! tlb miss load /* 040 */
33 .long exception_error ! tlb miss store
34 .long exception_error ! initial page write
35 .long exception_error ! tlb prot violation load
36 .long exception_error ! tlb prot violation store
37 .long exception_error ! address error load
38 .long exception_error ! address error store /* 100 */
39#endif
40 .long exception_error ! fpu_exception /* 120 */
41 .long exception_error /* 140 */
42 .long system_call ! Unconditional Trap /* 160 */
43 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
44 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
45ENTRY(nmi_slot)
46#if defined (CONFIG_KGDB_NMI)
47 .long debug_enter /* 1C0 */ ! Allow trap to debugger
48#else
49 .long exception_none /* 1C0 */ ! Not implemented yet
50#endif
51ENTRY(user_break_point_trap)
52 .long break_point_trap /* 1E0 */
53ENTRY(interrupt_table)
54 ! external hardware
55 .long do_IRQ ! 0000 /* 200 */
56 .long do_IRQ ! 0001
57 .long do_IRQ ! 0010
58 .long do_IRQ ! 0011
59 .long do_IRQ ! 0100
60 .long do_IRQ ! 0101
61 .long do_IRQ ! 0110
62 .long do_IRQ ! 0111
63 .long do_IRQ ! 1000 /* 300 */
64 .long do_IRQ ! 1001
65 .long do_IRQ ! 1010
66 .long do_IRQ ! 1011
67 .long do_IRQ ! 1100
68 .long do_IRQ ! 1101
69 .long do_IRQ ! 1110
70 .long exception_error
71 ! Internal hardware
72 .long do_IRQ ! TMU0 tuni0 /* 400 */
73 .long do_IRQ ! TMU1 tuni1
74 .long do_IRQ ! TMU2 tuni2
75 .long do_IRQ ! ticpi2
76 .long do_IRQ ! RTC ati
77 .long do_IRQ ! pri
78 .long do_IRQ ! cui
79 .long do_IRQ ! SCI eri
80 .long do_IRQ ! rxi /* 500 */
81 .long do_IRQ ! txi
82 .long do_IRQ ! tei
83 .long do_IRQ ! WDT iti /* 560 */
84 .long do_IRQ ! REF rcmi
85 .long do_IRQ ! rovi
86 .long do_IRQ
87 .long do_IRQ /* 5E0 */
88#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
89 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
90 .long do_IRQ ! 32 IRQ irq0 /* 600 */
91 .long do_IRQ ! 33 irq1
92 .long do_IRQ ! 34 irq2
93 .long do_IRQ ! 35 irq3
94 .long do_IRQ ! 36 irq4
95 .long do_IRQ ! 37 irq5
96 .long do_IRQ ! 38
97 .long do_IRQ ! 39
98 .long do_IRQ ! 40 PINT pint0-7 /* 700 */
99 .long do_IRQ ! 41 pint8-15
100 .long do_IRQ ! 42
101 .long do_IRQ ! 43
102 .long do_IRQ ! 44
103 .long do_IRQ ! 45
104 .long do_IRQ ! 46
105 .long do_IRQ ! 47
106 .long do_IRQ ! 48 DMAC dei0 /* 800 */
107 .long do_IRQ ! 49 dei1
108 .long do_IRQ ! 50 dei2
109 .long do_IRQ ! 51 dei3
110 .long do_IRQ ! 52 IrDA eri1
111 .long do_IRQ ! 53 rxi1
112 .long do_IRQ ! 54 bri1
113 .long do_IRQ ! 55 txi1
114 .long do_IRQ ! 56 SCIF eri2
115 .long do_IRQ ! 57 rxi2
116 .long do_IRQ ! 58 bri2
117 .long do_IRQ ! 59 txi2
118 .long do_IRQ ! 60 ADC adi /* 980 */
119#if defined(CONFIG_CPU_SUBTYPE_SH7705)
120 .long exception_none ! 61 /* 9A0 */
121 .long exception_none ! 62
122 .long exception_none ! 63
123 .long exception_none ! 64 /* A00 */
124 .long do_IRQ ! 65 USB usi0
125 .long do_IRQ ! 66 usi1
126 .long exception_none ! 67
127 .long exception_none ! 68
128 .long exception_none ! 69
129 .long exception_none ! 70
130 .long exception_none ! 71
131 .long exception_none ! 72 /* B00 */
132 .long exception_none ! 73
133 .long exception_none ! 74
134 .long exception_none ! 75
135 .long exception_none ! 76
136 .long exception_none ! 77
137 .long exception_none ! 78
138 .long exception_none ! 79
139 .long do_IRQ ! 80 TPU0 tpi0 /* C00 */
140 .long do_IRQ ! 81 TPU1 tpi1
141 .long exception_none ! 82
142 .long exception_none ! 83
143 .long do_IRQ ! 84 TPU2 tpi2
144 .long do_IRQ ! 85 TPU3 tpi3 /* CA0 */
145#endif
146#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7300)
147 .long do_IRQ ! 61 LCDC lcdi /* 9A0 */
148 .long do_IRQ ! 62 PCC pcc0i
149 .long do_IRQ ! 63 pcc1i /* 9E0 */
150#endif
151#if defined(CONFIG_CPU_SUBTYPE_SH7300)
152 .long do_IRQ ! 64
153 .long do_IRQ ! 65
154 .long do_IRQ ! 66
155 .long do_IRQ ! 67
156 .long do_IRQ ! 68
157 .long do_IRQ ! 69
158 .long do_IRQ ! 70
159 .long do_IRQ ! 71
160 .long do_IRQ ! 72
161 .long do_IRQ ! 73
162 .long do_IRQ ! 74
163 .long do_IRQ ! 75
164 .long do_IRQ ! 76
165 .long do_IRQ ! 77
166 .long do_IRQ ! 78
167 .long do_IRQ ! 79
168 .long do_IRQ ! 80 SCIF0(SH7300)
169 .long do_IRQ ! 81
170 .long do_IRQ ! 82
171 .long do_IRQ ! 83
172 .long do_IRQ ! 84
173 .long do_IRQ ! 85
174 .long do_IRQ ! 86
175 .long do_IRQ ! 87
176 .long do_IRQ ! 88
177 .long do_IRQ ! 89
178 .long do_IRQ ! 90
179 .long do_IRQ ! 91
180 .long do_IRQ ! 92
181 .long do_IRQ ! 93
182 .long do_IRQ ! 94
183 .long do_IRQ ! 95
184 .long do_IRQ ! 96
185 .long do_IRQ ! 97
186 .long do_IRQ ! 98
187 .long do_IRQ ! 99
188 .long do_IRQ ! 100
189 .long do_IRQ ! 101
190 .long do_IRQ ! 102
191 .long do_IRQ ! 103
192 .long do_IRQ ! 104
193 .long do_IRQ ! 105
194 .long do_IRQ ! 106
195 .long do_IRQ ! 107
196 .long do_IRQ ! 108
197#endif
198#endif
199
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
new file mode 100644
index 000000000000..5cdc88638601
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -0,0 +1,97 @@
1/*
2 * arch/sh/kernel/cpu/sh3/probe.c
3 *
4 * CPU Subtype Probing for SH-3.
5 *
6 * Copyright (C) 1999, 2000 Niibe Yutaka
7 * Copyright (C) 2002 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/init.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/io.h>
18
19int __init detect_cpu_and_cache_system(void)
20{
21 unsigned long addr0, addr1, data0, data1, data2, data3;
22
23 jump_to_P2();
24 /*
25 * Check if the entry shadows or not.
26 * When shadowed, it's 128-entry system.
27 * Otherwise, it's 256-entry system.
28 */
29 addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
30 addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
31
32 /* First, write back & invalidate */
33 data0 = ctrl_inl(addr0);
34 ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
35 data1 = ctrl_inl(addr1);
36 ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
37
38 /* Next, check if there's shadow or not */
39 data0 = ctrl_inl(addr0);
40 data0 ^= SH_CACHE_VALID;
41 ctrl_outl(data0, addr0);
42 data1 = ctrl_inl(addr1);
43 data2 = data1 ^ SH_CACHE_VALID;
44 ctrl_outl(data2, addr1);
45 data3 = ctrl_inl(addr0);
46
47 /* Lastly, invaliate them. */
48 ctrl_outl(data0&~SH_CACHE_VALID, addr0);
49 ctrl_outl(data2&~SH_CACHE_VALID, addr1);
50
51 back_to_P1();
52
53 cpu_data->dcache.ways = 4;
54 cpu_data->dcache.entry_shift = 4;
55 cpu_data->dcache.linesz = L1_CACHE_BYTES;
56 cpu_data->dcache.flags = 0;
57
58 /*
59 * 7709A/7729 has 16K cache (256-entry), while 7702 has only
60 * 2K(direct) 7702 is not supported (yet)
61 */
62 if (data0 == data1 && data2 == data3) { /* Shadow */
63 cpu_data->dcache.way_incr = (1 << 11);
64 cpu_data->dcache.entry_mask = 0x7f0;
65 cpu_data->dcache.sets = 128;
66 cpu_data->type = CPU_SH7708;
67
68 cpu_data->flags |= CPU_HAS_MMU_PAGE_ASSOC;
69 } else { /* 7709A or 7729 */
70 cpu_data->dcache.way_incr = (1 << 12);
71 cpu_data->dcache.entry_mask = 0xff0;
72 cpu_data->dcache.sets = 256;
73 cpu_data->type = CPU_SH7729;
74
75#if defined(CONFIG_CPU_SUBTYPE_SH7705)
76 cpu_data->type = CPU_SH7705;
77
78#if defined(CONFIG_SH7705_CACHE_32KB)
79 cpu_data->dcache.way_incr = (1 << 13);
80 cpu_data->dcache.entry_mask = 0x1ff0;
81 cpu_data->dcache.sets = 512;
82 ctrl_outl(CCR_CACHE_32KB, CCR3);
83#else
84 ctrl_outl(CCR_CACHE_16KB, CCR3);
85#endif
86#endif
87 }
88
89 /*
90 * SH-3 doesn't have separate caches
91 */
92 cpu_data->dcache.flags |= SH_CACHE_COMBINED;
93 cpu_data->icache = cpu_data->dcache;
94
95 return 0;
96}
97
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
new file mode 100644
index 000000000000..ead1071eac73
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Linux/SuperH SH-4 backends.
3#
4
5obj-y := ex.o probe.o
6
7obj-$(CONFIG_SH_FPU) += fpu.o
8obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += irq_intc2.o
9obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
10
diff --git a/arch/sh/kernel/cpu/sh4/ex.S b/arch/sh/kernel/cpu/sh4/ex.S
new file mode 100644
index 000000000000..8221e9d15515
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/ex.S
@@ -0,0 +1,384 @@
1/*
2 * arch/sh/kernel/cpu/sh4/ex.S
3 *
4 * The SH-4 exception vector table.
5
6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 */
14#include <linux/linkage.h>
15#include <linux/config.h>
16
17 .align 2
18 .data
19
20ENTRY(exception_handling_table)
21 .long exception_error /* 000 */
22 .long exception_error
23#if defined(CONFIG_MMU)
24 .long tlb_miss_load /* 040 */
25 .long tlb_miss_store
26 .long initial_page_write
27 .long tlb_protection_violation_load
28 .long tlb_protection_violation_store
29 .long address_error_load
30 .long address_error_store /* 100 */
31#else
32 .long exception_error ! tlb miss load /* 040 */
33 .long exception_error ! tlb miss store
34 .long exception_error ! initial page write
35 .long exception_error ! tlb prot violation load
36 .long exception_error ! tlb prot violation store
37 .long exception_error ! address error load
38 .long exception_error ! address error store /* 100 */
39#endif
40#if defined(CONFIG_SH_FPU)
41 .long do_fpu_error /* 120 */
42#else
43 .long exception_error /* 120 */
44#endif
45 .long exception_error /* 140 */
46 .long system_call ! Unconditional Trap /* 160 */
47 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
48 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
49ENTRY(nmi_slot)
50#if defined (CONFIG_KGDB_NMI)
51 .long debug_enter /* 1C0 */ ! Allow trap to debugger
52#else
53 .long exception_none /* 1C0 */ ! Not implemented yet
54#endif
55ENTRY(user_break_point_trap)
56 .long break_point_trap /* 1E0 */
57ENTRY(interrupt_table)
58 ! external hardware
59 .long do_IRQ ! 0000 /* 200 */
60 .long do_IRQ ! 0001
61 .long do_IRQ ! 0010
62 .long do_IRQ ! 0011
63 .long do_IRQ ! 0100
64 .long do_IRQ ! 0101
65 .long do_IRQ ! 0110
66 .long do_IRQ ! 0111
67 .long do_IRQ ! 1000 /* 300 */
68 .long do_IRQ ! 1001
69 .long do_IRQ ! 1010
70 .long do_IRQ ! 1011
71 .long do_IRQ ! 1100
72 .long do_IRQ ! 1101
73 .long do_IRQ ! 1110
74 .long exception_error
75 ! Internal hardware
76 .long do_IRQ ! TMU0 tuni0 /* 400 */
77 .long do_IRQ ! TMU1 tuni1
78 .long do_IRQ ! TMU2 tuni2
79 .long do_IRQ ! ticpi2
80#if defined(CONFIG_CPU_SUBTYPE_SH7760)
81 .long exception_error
82 .long exception_error
83 .long exception_error
84 .long exception_error
85 .long exception_error /* 500 */
86 .long exception_error
87 .long exception_error
88#else
89 .long do_IRQ ! RTC ati
90 .long do_IRQ ! pri
91 .long do_IRQ ! cui
92 .long do_IRQ ! SCI eri
93 .long do_IRQ ! rxi /* 500 */
94 .long do_IRQ ! txi
95 .long do_IRQ ! tei
96#endif
97 .long do_IRQ ! WDT iti /* 560 */
98 .long do_IRQ ! REF rcmi
99 .long do_IRQ ! rovi
100 .long do_IRQ
101 .long do_IRQ /* 5E0 */
102 .long do_IRQ ! 32 Hitachi UDI /* 600 */
103 .long do_IRQ ! 33 GPIO
104 .long do_IRQ ! 34 DMAC dmte0
105 .long do_IRQ ! 35 dmte1
106 .long do_IRQ ! 36 dmte2
107 .long do_IRQ ! 37 dmte3
108 .long do_IRQ ! 38 dmae
109 .long exception_error ! 39 /* 6E0 */
110#if defined(CONFIG_CPU_SUBTYPE_SH7760)
111 .long exception_error /* 700 */
112 .long exception_error
113 .long exception_error
114 .long exception_error /* 760 */
115#else
116 .long do_IRQ ! 40 SCIF eri /* 700 */
117 .long do_IRQ ! 41 rxi
118 .long do_IRQ ! 42 bri
119 .long do_IRQ ! 43 txi
120#endif
121#if CONFIG_NR_ONCHIP_DMA_CHANNELS == 8
122 .long do_IRQ ! 44 DMAC dmte4 /* 780 */
123 .long do_IRQ ! 45 dmte5
124 .long do_IRQ ! 46 dmte6
125 .long do_IRQ ! 47 dmte7 /* 7E0 */
126#else
127 .long exception_error ! 44 /* 780 */
128 .long exception_error ! 45
129 .long exception_error ! 46
130 .long exception_error ! 47
131#endif
132#if defined(CONFIG_SH_FPU)
133 .long do_fpu_state_restore ! 48 /* 800 */
134 .long do_fpu_state_restore ! 49 /* 820 */
135#else
136 .long exception_error
137 .long exception_error
138#endif
139#if defined(CONFIG_CPU_SUBTYPE_SH7751)
140 .long exception_error /* 840 */
141 .long exception_error
142 .long exception_error
143 .long exception_error
144 .long exception_error
145 .long exception_error
146 .long exception_error /* 900 */
147 .long exception_error
148 .long exception_error
149 .long exception_error
150 .long exception_error
151 .long exception_error
152 .long exception_error
153 .long exception_error
154 .long do_IRQ ! PCI serr /* A00 */
155 .long do_IRQ ! dma3
156 .long do_IRQ ! dma2
157 .long do_IRQ ! dma1
158 .long do_IRQ ! dma0
159 .long do_IRQ ! pwon
160 .long do_IRQ ! pwdwn
161 .long do_IRQ ! err
162 .long do_IRQ ! TMU3 tuni3 /* B00 */
163 .long exception_error
164 .long exception_error
165 .long exception_error
166 .long do_IRQ ! TMU4 tuni4 /* B80 */
167#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
168 .long do_IRQ ! IRQ irq6 /* 840 */
169 .long do_IRQ ! irq7
170 .long do_IRQ ! SCIF eri0
171 .long do_IRQ ! rxi0
172 .long do_IRQ ! bri0
173 .long do_IRQ ! txi0
174 .long do_IRQ ! HCAN2 cani0 /* 900 */
175 .long do_IRQ ! cani1
176 .long do_IRQ ! SSI ssii0
177 .long do_IRQ ! ssii1
178 .long do_IRQ ! HAC haci0
179 .long do_IRQ ! haci1
180 .long do_IRQ ! IIC iici0
181 .long do_IRQ ! iici1
182 .long do_IRQ ! USB usbi /* A00 */
183 .long do_IRQ ! LCDC vint
184 .long exception_error
185 .long exception_error
186 .long do_IRQ ! DMABRG dmabrgi0
187 .long do_IRQ ! dmabrgi1
188 .long do_IRQ ! dmabrgi2
189 .long exception_error
190 .long do_IRQ ! SCIF eri1 /* B00 */
191 .long do_IRQ ! rxi1
192 .long do_IRQ ! bri1
193 .long do_IRQ ! txi1
194 .long do_IRQ ! eri2
195 .long do_IRQ ! rxi2
196 .long do_IRQ ! bri2
197 .long do_IRQ ! txi2
198 .long do_IRQ ! SIM simeri /* C00 */
199 .long do_IRQ ! simrxi
200 .long do_IRQ ! simtxi
201 .long do_IRQ ! simtei
202 .long do_IRQ ! HSPI spii
203 .long exception_error
204 .long exception_error
205 .long exception_error
206 .long do_IRQ ! MMCIF mmci0 /* D00 */
207 .long do_IRQ ! mmci1
208 .long do_IRQ ! mmci2
209 .long do_IRQ ! mmci3
210 .long exception_error
211 .long exception_error
212 .long exception_error
213 .long exception_error
214 .long exception_error /* E00 */
215 .long exception_error
216 .long exception_error
217 .long exception_error
218 .long do_IRQ ! MFI mfii
219 .long exception_error
220 .long exception_error
221 .long exception_error
222 .long exception_error /* F00 */
223 .long exception_error
224 .long exception_error
225 .long exception_error
226 .long do_IRQ ! ADC adi
227 .long do_IRQ ! CMT cmti /* FA0 */
228#elif defined(CONFIG_CPU_SUBTYPE_SH73180)
229 .long do_IRQ ! 50 0x840
230 .long do_IRQ ! 51 0x860
231 .long do_IRQ ! 52 0x880
232 .long do_IRQ ! 53 0x8a0
233 .long do_IRQ ! 54 0x8c0
234 .long do_IRQ ! 55 0x8e0
235 .long do_IRQ ! 56 0x900
236 .long do_IRQ ! 57 0x920
237 .long do_IRQ ! 58 0x940
238 .long do_IRQ ! 59 0x960
239 .long do_IRQ ! 60 0x980
240 .long do_IRQ ! 61 0x9a0
241 .long do_IRQ ! 62 0x9c0
242 .long do_IRQ ! 63 0x9e0
243 .long do_IRQ ! 64 0xa00
244 .long do_IRQ ! 65 0xa20
245 .long do_IRQ ! 66 0xa40
246 .long do_IRQ ! 67 0xa60
247 .long do_IRQ ! 68 0xa80
248 .long do_IRQ ! 69 0xaa0
249 .long do_IRQ ! 70 0xac0
250 .long do_IRQ ! 71 0xae0
251 .long do_IRQ ! 72 0xb00
252 .long do_IRQ ! 73 0xb20
253 .long do_IRQ ! 74 0xb40
254 .long do_IRQ ! 75 0xb60
255 .long do_IRQ ! 76 0xb80
256 .long do_IRQ ! 77 0xba0
257 .long do_IRQ ! 78 0xbc0
258 .long do_IRQ ! 79 0xbe0
259 .long do_IRQ ! 80 0xc00
260 .long do_IRQ ! 81 0xc20
261 .long do_IRQ ! 82 0xc40
262 .long do_IRQ ! 83 0xc60
263 .long do_IRQ ! 84 0xc80
264 .long do_IRQ ! 85 0xca0
265 .long do_IRQ ! 86 0xcc0
266 .long do_IRQ ! 87 0xce0
267 .long do_IRQ ! 88 0xd00
268 .long do_IRQ ! 89 0xd20
269 .long do_IRQ ! 90 0xd40
270 .long do_IRQ ! 91 0xd60
271 .long do_IRQ ! 92 0xd80
272 .long do_IRQ ! 93 0xda0
273 .long do_IRQ ! 94 0xdc0
274 .long do_IRQ ! 95 0xde0
275 .long do_IRQ ! 96 0xe00
276 .long do_IRQ ! 97 0xe20
277 .long do_IRQ ! 98 0xe40
278 .long do_IRQ ! 99 0xe60
279 .long do_IRQ ! 100 0xe80
280 .long do_IRQ ! 101 0xea0
281 .long do_IRQ ! 102 0xec0
282 .long do_IRQ ! 103 0xee0
283 .long do_IRQ ! 104 0xf00
284 .long do_IRQ ! 105 0xf20
285 .long do_IRQ ! 106 0xf40
286 .long do_IRQ ! 107 0xf60
287 .long do_IRQ ! 108 0xf80
288#elif defined(CONFIG_CPU_SUBTYPE_ST40STB1)
289 .long exception_error ! 50 0x840
290 .long exception_error ! 51 0x860
291 .long exception_error ! 52 0x880
292 .long exception_error ! 53 0x8a0
293 .long exception_error ! 54 0x8c0
294 .long exception_error ! 55 0x8e0
295 .long exception_error ! 56 0x900
296 .long exception_error ! 57 0x920
297 .long exception_error ! 58 0x940
298 .long exception_error ! 59 0x960
299 .long exception_error ! 60 0x980
300 .long exception_error ! 61 0x9a0
301 .long exception_error ! 62 0x9c0
302 .long exception_error ! 63 0x9e0
303 .long do_IRQ ! 64 0xa00 PCI serr
304 .long do_IRQ ! 65 0xa20 err
305 .long do_IRQ ! 66 0xa40 ad
306 .long do_IRQ ! 67 0xa60 pwr_dwn
307 .long exception_error ! 68 0xa80
308 .long exception_error ! 69 0xaa0
309 .long exception_error ! 70 0xac0
310 .long exception_error ! 71 0xae0
311 .long do_IRQ ! 72 0xb00 DMA INT0
312 .long do_IRQ ! 73 0xb20 INT1
313 .long do_IRQ ! 74 0xb40 INT2
314 .long do_IRQ ! 75 0xb60 INT3
315 .long do_IRQ ! 76 0xb80 INT4
316 .long exception_error ! 77 0xba0
317 .long do_IRQ ! 78 0xbc0 DMA ERR
318 .long exception_error ! 79 0xbe0
319 .long do_IRQ ! 80 0xc00 PIO0
320 .long do_IRQ ! 81 0xc20 PIO1
321 .long do_IRQ ! 82 0xc40 PIO2
322 .long exception_error ! 83 0xc60
323 .long exception_error ! 84 0xc80
324 .long exception_error ! 85 0xca0
325 .long exception_error ! 86 0xcc0
326 .long exception_error ! 87 0xce0
327 .long exception_error ! 88 0xd00
328 .long exception_error ! 89 0xd20
329 .long exception_error ! 90 0xd40
330 .long exception_error ! 91 0xd60
331 .long exception_error ! 92 0xd80
332 .long exception_error ! 93 0xda0
333 .long exception_error ! 94 0xdc0
334 .long exception_error ! 95 0xde0
335 .long exception_error ! 96 0xe00
336 .long exception_error ! 97 0xe20
337 .long exception_error ! 98 0xe40
338 .long exception_error ! 99 0xe60
339 .long exception_error ! 100 0xe80
340 .long exception_error ! 101 0xea0
341 .long exception_error ! 102 0xec0
342 .long exception_error ! 103 0xee0
343 .long exception_error ! 104 0xf00
344 .long exception_error ! 105 0xf20
345 .long exception_error ! 106 0xf40
346 .long exception_error ! 107 0xf60
347 .long exception_error ! 108 0xf80
348 .long exception_error ! 109 0xfa0
349 .long exception_error ! 110 0xfc0
350 .long exception_error ! 111 0xfe0
351 .long do_IRQ ! 112 0x1000 Mailbox
352 .long exception_error ! 113 0x1020
353 .long exception_error ! 114 0x1040
354 .long exception_error ! 115 0x1060
355 .long exception_error ! 116 0x1080
356 .long exception_error ! 117 0x10a0
357 .long exception_error ! 118 0x10c0
358 .long exception_error ! 119 0x10e0
359 .long exception_error ! 120 0x1100
360 .long exception_error ! 121 0x1120
361 .long exception_error ! 122 0x1140
362 .long exception_error ! 123 0x1160
363 .long exception_error ! 124 0x1180
364 .long exception_error ! 125 0x11a0
365 .long exception_error ! 126 0x11c0
366 .long exception_error ! 127 0x11e0
367 .long exception_error ! 128 0x1200
368 .long exception_error ! 129 0x1220
369 .long exception_error ! 130 0x1240
370 .long exception_error ! 131 0x1260
371 .long exception_error ! 132 0x1280
372 .long exception_error ! 133 0x12a0
373 .long exception_error ! 134 0x12c0
374 .long exception_error ! 135 0x12e0
375 .long exception_error ! 136 0x1300
376 .long exception_error ! 137 0x1320
377 .long exception_error ! 138 0x1340
378 .long exception_error ! 139 0x1360
379 .long do_IRQ ! 140 0x1380 EMPI INV_ADDR
380 .long exception_error ! 141 0x13a0
381 .long exception_error ! 142 0x13c0
382 .long exception_error ! 143 0x13e0
383#endif
384
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
new file mode 100644
index 000000000000..f486c07e10e2
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -0,0 +1,335 @@
1/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
2 *
3 * linux/arch/sh/kernel/fpu.c
4 *
5 * Save/restore floating point context for signal handlers.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
12 *
13 * FIXME! These routines can be optimized in big endian case.
14 */
15
16#include <linux/sched.h>
17#include <linux/signal.h>
18#include <asm/processor.h>
19#include <asm/io.h>
20
21/* The PR (precision) bit in the FP Status Register must be clear when
22 * an frchg instruction is executed, otherwise the instruction is undefined.
23 * Executing frchg with PR set causes a trap on some SH4 implementations.
24 */
25
26#define FPSCR_RCHG 0x00000000
27
28
29/*
30 * Save FPU registers onto task structure.
31 * Assume called with FPU enabled (SR.FD=0).
32 */
33void
34save_fpu(struct task_struct *tsk, struct pt_regs *regs)
35{
36 unsigned long dummy;
37
38 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
39 enable_fpu();
40 asm volatile("sts.l fpul, @-%0\n\t"
41 "sts.l fpscr, @-%0\n\t"
42 "lds %2, fpscr\n\t"
43 "frchg\n\t"
44 "fmov.s fr15, @-%0\n\t"
45 "fmov.s fr14, @-%0\n\t"
46 "fmov.s fr13, @-%0\n\t"
47 "fmov.s fr12, @-%0\n\t"
48 "fmov.s fr11, @-%0\n\t"
49 "fmov.s fr10, @-%0\n\t"
50 "fmov.s fr9, @-%0\n\t"
51 "fmov.s fr8, @-%0\n\t"
52 "fmov.s fr7, @-%0\n\t"
53 "fmov.s fr6, @-%0\n\t"
54 "fmov.s fr5, @-%0\n\t"
55 "fmov.s fr4, @-%0\n\t"
56 "fmov.s fr3, @-%0\n\t"
57 "fmov.s fr2, @-%0\n\t"
58 "fmov.s fr1, @-%0\n\t"
59 "fmov.s fr0, @-%0\n\t"
60 "frchg\n\t"
61 "fmov.s fr15, @-%0\n\t"
62 "fmov.s fr14, @-%0\n\t"
63 "fmov.s fr13, @-%0\n\t"
64 "fmov.s fr12, @-%0\n\t"
65 "fmov.s fr11, @-%0\n\t"
66 "fmov.s fr10, @-%0\n\t"
67 "fmov.s fr9, @-%0\n\t"
68 "fmov.s fr8, @-%0\n\t"
69 "fmov.s fr7, @-%0\n\t"
70 "fmov.s fr6, @-%0\n\t"
71 "fmov.s fr5, @-%0\n\t"
72 "fmov.s fr4, @-%0\n\t"
73 "fmov.s fr3, @-%0\n\t"
74 "fmov.s fr2, @-%0\n\t"
75 "fmov.s fr1, @-%0\n\t"
76 "fmov.s fr0, @-%0\n\t"
77 "lds %3, fpscr\n\t"
78 : "=r" (dummy)
79 : "0" ((char *)(&tsk->thread.fpu.hard.status)),
80 "r" (FPSCR_RCHG),
81 "r" (FPSCR_INIT)
82 : "memory");
83
84 disable_fpu();
85 release_fpu(regs);
86}
87
88static void
89restore_fpu(struct task_struct *tsk)
90{
91 unsigned long dummy;
92
93 enable_fpu();
94 asm volatile("lds %2, fpscr\n\t"
95 "fmov.s @%0+, fr0\n\t"
96 "fmov.s @%0+, fr1\n\t"
97 "fmov.s @%0+, fr2\n\t"
98 "fmov.s @%0+, fr3\n\t"
99 "fmov.s @%0+, fr4\n\t"
100 "fmov.s @%0+, fr5\n\t"
101 "fmov.s @%0+, fr6\n\t"
102 "fmov.s @%0+, fr7\n\t"
103 "fmov.s @%0+, fr8\n\t"
104 "fmov.s @%0+, fr9\n\t"
105 "fmov.s @%0+, fr10\n\t"
106 "fmov.s @%0+, fr11\n\t"
107 "fmov.s @%0+, fr12\n\t"
108 "fmov.s @%0+, fr13\n\t"
109 "fmov.s @%0+, fr14\n\t"
110 "fmov.s @%0+, fr15\n\t"
111 "frchg\n\t"
112 "fmov.s @%0+, fr0\n\t"
113 "fmov.s @%0+, fr1\n\t"
114 "fmov.s @%0+, fr2\n\t"
115 "fmov.s @%0+, fr3\n\t"
116 "fmov.s @%0+, fr4\n\t"
117 "fmov.s @%0+, fr5\n\t"
118 "fmov.s @%0+, fr6\n\t"
119 "fmov.s @%0+, fr7\n\t"
120 "fmov.s @%0+, fr8\n\t"
121 "fmov.s @%0+, fr9\n\t"
122 "fmov.s @%0+, fr10\n\t"
123 "fmov.s @%0+, fr11\n\t"
124 "fmov.s @%0+, fr12\n\t"
125 "fmov.s @%0+, fr13\n\t"
126 "fmov.s @%0+, fr14\n\t"
127 "fmov.s @%0+, fr15\n\t"
128 "frchg\n\t"
129 "lds.l @%0+, fpscr\n\t"
130 "lds.l @%0+, fpul\n\t"
131 : "=r" (dummy)
132 : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
133 : "memory");
134 disable_fpu();
135}
136
137/*
138 * Load the FPU with signalling NANS. This bit pattern we're using
139 * has the property that no matter wether considered as single or as
140 * double precission represents signaling NANS.
141 */
142
143static void
144fpu_init(void)
145{
146 enable_fpu();
147 asm volatile("lds %0, fpul\n\t"
148 "lds %1, fpscr\n\t"
149 "fsts fpul, fr0\n\t"
150 "fsts fpul, fr1\n\t"
151 "fsts fpul, fr2\n\t"
152 "fsts fpul, fr3\n\t"
153 "fsts fpul, fr4\n\t"
154 "fsts fpul, fr5\n\t"
155 "fsts fpul, fr6\n\t"
156 "fsts fpul, fr7\n\t"
157 "fsts fpul, fr8\n\t"
158 "fsts fpul, fr9\n\t"
159 "fsts fpul, fr10\n\t"
160 "fsts fpul, fr11\n\t"
161 "fsts fpul, fr12\n\t"
162 "fsts fpul, fr13\n\t"
163 "fsts fpul, fr14\n\t"
164 "fsts fpul, fr15\n\t"
165 "frchg\n\t"
166 "fsts fpul, fr0\n\t"
167 "fsts fpul, fr1\n\t"
168 "fsts fpul, fr2\n\t"
169 "fsts fpul, fr3\n\t"
170 "fsts fpul, fr4\n\t"
171 "fsts fpul, fr5\n\t"
172 "fsts fpul, fr6\n\t"
173 "fsts fpul, fr7\n\t"
174 "fsts fpul, fr8\n\t"
175 "fsts fpul, fr9\n\t"
176 "fsts fpul, fr10\n\t"
177 "fsts fpul, fr11\n\t"
178 "fsts fpul, fr12\n\t"
179 "fsts fpul, fr13\n\t"
180 "fsts fpul, fr14\n\t"
181 "fsts fpul, fr15\n\t"
182 "frchg\n\t"
183 "lds %2, fpscr\n\t"
184 : /* no output */
185 : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
186 disable_fpu();
187}
188
189/**
190 * denormal_to_double - Given denormalized float number,
191 * store double float
192 *
193 * @fpu: Pointer to sh_fpu_hard structure
194 * @n: Index to FP register
195 */
196static void
197denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
198{
199 unsigned long du, dl;
200 unsigned long x = fpu->fpul;
201 int exp = 1023 - 126;
202
203 if (x != 0 && (x & 0x7f800000) == 0) {
204 du = (x & 0x80000000);
205 while ((x & 0x00800000) == 0) {
206 x <<= 1;
207 exp--;
208 }
209 x &= 0x007fffff;
210 du |= (exp << 20) | (x >> 3);
211 dl = x << 29;
212
213 fpu->fp_regs[n] = du;
214 fpu->fp_regs[n+1] = dl;
215 }
216}
217
218/**
219 * ieee_fpe_handler - Handle denormalized number exception
220 *
221 * @regs: Pointer to register structure
222 *
223 * Returns 1 when it's handled (should not cause exception).
224 */
225static int
226ieee_fpe_handler (struct pt_regs *regs)
227{
228 unsigned short insn = *(unsigned short *) regs->pc;
229 unsigned short finsn;
230 unsigned long nextpc;
231 int nib[4] = {
232 (insn >> 12) & 0xf,
233 (insn >> 8) & 0xf,
234 (insn >> 4) & 0xf,
235 insn & 0xf};
236
237 if (nib[0] == 0xb ||
238 (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
239 regs->pr = regs->pc + 4;
240
241 if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
242 nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
243 finsn = *(unsigned short *) (regs->pc + 2);
244 } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
245 if (regs->sr & 1)
246 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
247 else
248 nextpc = regs->pc + 4;
249 finsn = *(unsigned short *) (regs->pc + 2);
250 } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
251 if (regs->sr & 1)
252 nextpc = regs->pc + 4;
253 else
254 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
255 finsn = *(unsigned short *) (regs->pc + 2);
256 } else if (nib[0] == 0x4 && nib[3] == 0xb &&
257 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
258 nextpc = regs->regs[nib[1]];
259 finsn = *(unsigned short *) (regs->pc + 2);
260 } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
261 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
262 nextpc = regs->pc + 4 + regs->regs[nib[1]];
263 finsn = *(unsigned short *) (regs->pc + 2);
264 } else if (insn == 0x000b) { /* rts */
265 nextpc = regs->pr;
266 finsn = *(unsigned short *) (regs->pc + 2);
267 } else {
268 nextpc = regs->pc + 2;
269 finsn = insn;
270 }
271
272 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
273 struct task_struct *tsk = current;
274
275 save_fpu(tsk, regs);
276 if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
277 /* FPU error */
278 denormal_to_double (&tsk->thread.fpu.hard,
279 (finsn >> 8) & 0xf);
280 tsk->thread.fpu.hard.fpscr &=
281 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
282 grab_fpu(regs);
283 restore_fpu(tsk);
284 set_tsk_thread_flag(tsk, TIF_USEDFPU);
285 } else {
286 tsk->thread.trap_no = 11;
287 tsk->thread.error_code = 0;
288 force_sig(SIGFPE, tsk);
289 }
290
291 regs->pc = nextpc;
292 return 1;
293 }
294
295 return 0;
296}
297
298asmlinkage void
299do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
300 struct pt_regs regs)
301{
302 struct task_struct *tsk = current;
303
304 if (ieee_fpe_handler (&regs))
305 return;
306
307 regs.pc += 2;
308 save_fpu(tsk, &regs);
309 tsk->thread.trap_no = 11;
310 tsk->thread.error_code = 0;
311 force_sig(SIGFPE, tsk);
312}
313
314asmlinkage void
315do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
316 unsigned long r7, struct pt_regs regs)
317{
318 struct task_struct *tsk = current;
319
320 grab_fpu(&regs);
321 if (!user_mode(&regs)) {
322 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
323 return;
324 }
325
326 if (used_math()) {
327 /* Using the FPU again. */
328 restore_fpu(tsk);
329 } else {
330 /* First time FPU user. */
331 fpu_init();
332 set_used_math();
333 }
334 set_tsk_thread_flag(tsk, TIF_USEDFPU);
335}
diff --git a/arch/sh/kernel/cpu/sh4/irq_intc2.c b/arch/sh/kernel/cpu/sh4/irq_intc2.c
new file mode 100644
index 000000000000..099ebbf89745
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/irq_intc2.c
@@ -0,0 +1,222 @@
1/*
2 * linux/arch/sh/kernel/irq_intc2.c
3 *
4 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Interrupt handling for INTC2-based IRQ.
10 *
11 * These are the "new Hitachi style" interrupts, as present on the
12 * Hitachi 7751 and the STM ST40 STB1.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/irq.h>
18
19#include <asm/system.h>
20#include <asm/io.h>
21#include <asm/machvec.h>
22
23
24struct intc2_data {
25 unsigned char msk_offset;
26 unsigned char msk_shift;
27#ifdef CONFIG_CPU_SUBTYPE_ST40
28 int (*clear_irq) (int);
29#endif
30};
31
32
33static struct intc2_data intc2_data[NR_INTC2_IRQS];
34
35static void enable_intc2_irq(unsigned int irq);
36static void disable_intc2_irq(unsigned int irq);
37
38/* shutdown is same as "disable" */
39#define shutdown_intc2_irq disable_intc2_irq
40
41static void mask_and_ack_intc2(unsigned int);
42static void end_intc2_irq(unsigned int irq);
43
44static unsigned int startup_intc2_irq(unsigned int irq)
45{
46 enable_intc2_irq(irq);
47 return 0; /* never anything pending */
48}
49
50static struct hw_interrupt_type intc2_irq_type = {
51 "INTC2-IRQ",
52 startup_intc2_irq,
53 shutdown_intc2_irq,
54 enable_intc2_irq,
55 disable_intc2_irq,
56 mask_and_ack_intc2,
57 end_intc2_irq
58};
59
60static void disable_intc2_irq(unsigned int irq)
61{
62 int irq_offset = irq - INTC2_FIRST_IRQ;
63 int msk_shift, msk_offset;
64
65 // Sanity check
66 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
67 return;
68
69 msk_shift = intc2_data[irq_offset].msk_shift;
70 msk_offset = intc2_data[irq_offset].msk_offset;
71
72 ctrl_outl(1<<msk_shift,
73 INTC2_BASE+INTC2_INTMSK_OFFSET+msk_offset);
74}
75
76static void enable_intc2_irq(unsigned int irq)
77{
78 int irq_offset = irq - INTC2_FIRST_IRQ;
79 int msk_shift, msk_offset;
80
81 /* Sanity check */
82 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
83 return;
84
85 msk_shift = intc2_data[irq_offset].msk_shift;
86 msk_offset = intc2_data[irq_offset].msk_offset;
87
88 ctrl_outl(1<<msk_shift,
89 INTC2_BASE+INTC2_INTMSKCLR_OFFSET+msk_offset);
90}
91
92static void mask_and_ack_intc2(unsigned int irq)
93{
94 disable_intc2_irq(irq);
95}
96
97static void end_intc2_irq(unsigned int irq)
98{
99 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
100 enable_intc2_irq(irq);
101
102#ifdef CONFIG_CPU_SUBTYPE_ST40
103 if (intc2_data[irq - INTC2_FIRST_IRQ].clear_irq)
104 intc2_data[irq - INTC2_FIRST_IRQ].clear_irq (irq);
105#endif
106}
107
108/*
109 * Setup an INTC2 style interrupt.
110 * NOTE: Unlike IPR interrupts, parameters are not shifted by this code,
111 * allowing the use of the numbers straight out of the datasheet.
112 * For example:
113 * PIO1 which is INTPRI00[19,16] and INTMSK00[13]
114 * would be: ^ ^ ^ ^
115 * | | | |
116 * make_intc2_irq(84, 0, 16, 0, 13);
117 */
118void make_intc2_irq(unsigned int irq,
119 unsigned int ipr_offset, unsigned int ipr_shift,
120 unsigned int msk_offset, unsigned int msk_shift,
121 unsigned int priority)
122{
123 int irq_offset = irq - INTC2_FIRST_IRQ;
124 unsigned int flags;
125 unsigned long ipr;
126
127 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
128 return;
129
130 disable_irq_nosync(irq);
131
132 /* Fill the data we need */
133 intc2_data[irq_offset].msk_offset = msk_offset;
134 intc2_data[irq_offset].msk_shift = msk_shift;
135#ifdef CONFIG_CPU_SUBTYPE_ST40
136 intc2_data[irq_offset].clear_irq = NULL;
137#endif
138
139 /* Set the priority level */
140 local_irq_save(flags);
141
142 ipr=ctrl_inl(INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
143 ipr&=~(0xf<<ipr_shift);
144 ipr|=(priority)<<ipr_shift;
145 ctrl_outl(ipr, INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
146
147 local_irq_restore(flags);
148
149 irq_desc[irq].handler=&intc2_irq_type;
150
151 disable_intc2_irq(irq);
152}
153
154#ifdef CONFIG_CPU_SUBTYPE_ST40
155
156struct intc2_init {
157 unsigned short irq;
158 unsigned char ipr_offset, ipr_shift;
159 unsigned char msk_offset, msk_shift;
160};
161
162static struct intc2_init intc2_init_data[] __initdata = {
163 {64, 0, 0, 0, 0}, /* PCI serr */
164 {65, 0, 4, 0, 1}, /* PCI err */
165 {66, 0, 4, 0, 2}, /* PCI ad */
166 {67, 0, 4, 0, 3}, /* PCI pwd down */
167 {72, 0, 8, 0, 5}, /* DMAC INT0 */
168 {73, 0, 8, 0, 6}, /* DMAC INT1 */
169 {74, 0, 8, 0, 7}, /* DMAC INT2 */
170 {75, 0, 8, 0, 8}, /* DMAC INT3 */
171 {76, 0, 8, 0, 9}, /* DMAC INT4 */
172 {78, 0, 8, 0, 11}, /* DMAC ERR */
173 {80, 0, 12, 0, 12}, /* PIO0 */
174 {84, 0, 16, 0, 13}, /* PIO1 */
175 {88, 0, 20, 0, 14}, /* PIO2 */
176 {112, 4, 0, 4, 0}, /* Mailbox */
177#ifdef CONFIG_CPU_SUBTYPE_ST40GX1
178 {116, 4, 4, 4, 4}, /* SSC0 */
179 {120, 4, 8, 4, 8}, /* IR Blaster */
180 {124, 4, 12, 4, 12}, /* USB host */
181 {128, 4, 16, 4, 16}, /* Video processor BLITTER */
182 {132, 4, 20, 4, 20}, /* UART0 */
183 {134, 4, 20, 4, 22}, /* UART2 */
184 {136, 4, 24, 4, 24}, /* IO_PIO0 */
185 {140, 4, 28, 4, 28}, /* EMPI */
186 {144, 8, 0, 8, 0}, /* MAFE */
187 {148, 8, 4, 8, 4}, /* PWM */
188 {152, 8, 8, 8, 8}, /* SSC1 */
189 {156, 8, 12, 8, 12}, /* IO_PIO1 */
190 {160, 8, 16, 8, 16}, /* USB target */
191 {164, 8, 20, 8, 20}, /* UART1 */
192 {168, 8, 24, 8, 24}, /* Teletext */
193 {172, 8, 28, 8, 28}, /* VideoSync VTG */
194 {173, 8, 28, 8, 29}, /* VideoSync DVP0 */
195 {174, 8, 28, 8, 30}, /* VideoSync DVP1 */
196#endif
197};
198
199void __init init_IRQ_intc2(void)
200{
201 struct intc2_init *p;
202
203 printk(KERN_ALERT "init_IRQ_intc2\n");
204
205 for (p = intc2_init_data;
206 p<intc2_init_data+ARRAY_SIZE(intc2_init_data);
207 p++) {
208 make_intc2_irq(p->irq, p->ipr_offset, p->ipr_shift,
209 p-> msk_offset, p->msk_shift, 13);
210 }
211}
212
213/* Adds a termination callback to the interrupt */
214void intc2_add_clear_irq(int irq, int (*fn)(int))
215{
216 if (irq < INTC2_FIRST_IRQ)
217 return;
218
219 intc2_data[irq - INTC2_FIRST_IRQ].clear_irq = fn;
220}
221
222#endif /* CONFIG_CPU_SUBTYPE_ST40 */
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
new file mode 100644
index 000000000000..42427b79697b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -0,0 +1,138 @@
1/*
2 * arch/sh/kernel/cpu/sh4/probe.c
3 *
4 * CPU Subtype Probing for SH-4.
5 *
6 * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/init.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/io.h>
18
19int __init detect_cpu_and_cache_system(void)
20{
21 unsigned long pvr, prr, cvr;
22 unsigned long size;
23
24 static unsigned long sizes[16] = {
25 [1] = (1 << 12),
26 [2] = (1 << 13),
27 [4] = (1 << 14),
28 [8] = (1 << 15),
29 [9] = (1 << 16)
30 };
31
32 pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffff;
33 prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
34 cvr = (ctrl_inl(CCN_CVR));
35
36 /*
37 * Setup some sane SH-4 defaults for the icache
38 */
39 cpu_data->icache.way_incr = (1 << 13);
40 cpu_data->icache.entry_shift = 5;
41 cpu_data->icache.entry_mask = 0x1fe0;
42 cpu_data->icache.sets = 256;
43 cpu_data->icache.ways = 1;
44 cpu_data->icache.linesz = L1_CACHE_BYTES;
45
46 /*
47 * And again for the dcache ..
48 */
49 cpu_data->dcache.way_incr = (1 << 14);
50 cpu_data->dcache.entry_shift = 5;
51 cpu_data->dcache.entry_mask = 0x3fe0;
52 cpu_data->dcache.sets = 512;
53 cpu_data->dcache.ways = 1;
54 cpu_data->dcache.linesz = L1_CACHE_BYTES;
55
56 /* Set the FPU flag, virtually all SH-4's have one */
57 cpu_data->flags |= CPU_HAS_FPU;
58
59 /*
60 * Probe the underlying processor version/revision and
61 * adjust cpu_data setup accordingly.
62 */
63 switch (pvr) {
64 case 0x205:
65 cpu_data->type = CPU_SH7750;
66 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
67 break;
68 case 0x206:
69 cpu_data->type = CPU_SH7750S;
70 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
71 break;
72 case 0x1100:
73 cpu_data->type = CPU_SH7751;
74 break;
75 case 0x2000:
76 cpu_data->type = CPU_SH73180;
77 cpu_data->icache.ways = 4;
78 cpu_data->dcache.ways = 4;
79 cpu_data->flags &= ~CPU_HAS_FPU;
80 break;
81 case 0x8000:
82 cpu_data->type = CPU_ST40RA;
83 break;
84 case 0x8100:
85 cpu_data->type = CPU_ST40GX1;
86 break;
87 case 0x700:
88 cpu_data->type = CPU_SH4_501;
89 cpu_data->icache.ways = 2;
90 cpu_data->dcache.ways = 2;
91
92 /* No FPU on the SH4-500 series.. */
93 cpu_data->flags &= ~CPU_HAS_FPU;
94 break;
95 case 0x600:
96 cpu_data->type = CPU_SH4_202;
97 cpu_data->icache.ways = 2;
98 cpu_data->dcache.ways = 2;
99 break;
100 case 0x500 ... 0x501:
101 switch (prr) {
102 case 0x10: cpu_data->type = CPU_SH7750R; break;
103 case 0x11: cpu_data->type = CPU_SH7751R; break;
104 case 0x50: cpu_data->type = CPU_SH7760; break;
105 }
106
107 cpu_data->icache.ways = 2;
108 cpu_data->dcache.ways = 2;
109
110 break;
111 default:
112 cpu_data->type = CPU_SH_NONE;
113 break;
114 }
115
116 /*
117 * On anything that's not a direct-mapped cache, look to the CVR
118 * for I/D-cache specifics.
119 */
120 if (cpu_data->icache.ways > 1) {
121 size = sizes[(cvr >> 20) & 0xf];
122 cpu_data->icache.way_incr = (size >> 1);
123 cpu_data->icache.sets = (size >> 6);
124 cpu_data->icache.entry_mask =
125 (cpu_data->icache.way_incr - (1 << 5));
126 }
127
128 if (cpu_data->dcache.ways > 1) {
129 size = sizes[(cvr >> 16) & 0xf];
130 cpu_data->dcache.way_incr = (size >> 1);
131 cpu_data->dcache.sets = (size >> 6);
132 cpu_data->dcache.entry_mask =
133 (cpu_data->dcache.way_incr - (1 << 5));
134 }
135
136 return 0;
137}
138
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
new file mode 100644
index 000000000000..8437ea7430fe
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -0,0 +1,453 @@
1/*
2 * arch/sh/kernel/cpu/sq.c
3 *
4 * General management API for SH-4 integrated Store Queues
5 *
6 * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
7 * Copyright (C) 2001, 2002 M. R. Brown
8 *
9 * Some of this code has been adopted directly from the old arch/sh/mm/sq.c
10 * hack that was part of the LinuxDC project. For all intents and purposes,
11 * this is a completely new interface that really doesn't have much in common
12 * with the old zone-based approach at all. In fact, it's only listed here for
13 * general completeness.
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/config.h>
23#include <linux/slab.h>
24#include <linux/list.h>
25#include <linux/proc_fs.h>
26#include <linux/miscdevice.h>
27#include <linux/vmalloc.h>
28
29#include <asm/io.h>
30#include <asm/page.h>
31#include <asm/mmu_context.h>
32#include <asm/cpu/sq.h>
33
34static LIST_HEAD(sq_mapping_list);
35static DEFINE_SPINLOCK(sq_mapping_lock);
36
37/**
38 * sq_flush - Flush (prefetch) the store queue cache
39 * @addr: the store queue address to flush
40 *
41 * Executes a prefetch instruction on the specified store queue cache,
42 * so that the cached data is written to physical memory.
43 */
44inline void sq_flush(void *addr)
45{
46 __asm__ __volatile__ ("pref @%0" : : "r" (addr) : "memory");
47}
48
49/**
50 * sq_flush_range - Flush (prefetch) a specific SQ range
51 * @start: the store queue address to start flushing from
52 * @len: the length to flush
53 *
54 * Flushes the store queue cache from @start to @start + @len in a
55 * linear fashion.
56 */
57void sq_flush_range(unsigned long start, unsigned int len)
58{
59 volatile unsigned long *sq = (unsigned long *)start;
60 unsigned long dummy;
61
62 /* Flush the queues */
63 for (len >>= 5; len--; sq += 8)
64 sq_flush((void *)sq);
65
66 /* Wait for completion */
67 dummy = ctrl_inl(P4SEG_STORE_QUE);
68
69 ctrl_outl(0, P4SEG_STORE_QUE + 0);
70 ctrl_outl(0, P4SEG_STORE_QUE + 8);
71}
72
73static struct sq_mapping *__sq_alloc_mapping(unsigned long virt, unsigned long phys, unsigned long size, const char *name)
74{
75 struct sq_mapping *map;
76
77 if (virt + size > SQ_ADDRMAX)
78 return ERR_PTR(-ENOSPC);
79
80 map = kmalloc(sizeof(struct sq_mapping), GFP_KERNEL);
81 if (!map)
82 return ERR_PTR(-ENOMEM);
83
84 INIT_LIST_HEAD(&map->list);
85
86 map->sq_addr = virt;
87 map->addr = phys;
88 map->size = size + 1;
89 map->name = name;
90
91 list_add(&map->list, &sq_mapping_list);
92
93 return map;
94}
95
96static unsigned long __sq_get_next_addr(void)
97{
98 if (!list_empty(&sq_mapping_list)) {
99 struct list_head *pos, *tmp;
100
101 /*
102 * Read one off the list head, as it will have the highest
103 * mapped allocation. Set the next one up right above it.
104 *
105 * This is somewhat sub-optimal, as we don't look at
106 * gaps between allocations or anything lower then the
107 * highest-level allocation.
108 *
109 * However, in the interest of performance and the general
110 * lack of desire to do constant list rebalancing, we don't
111 * worry about it.
112 */
113 list_for_each_safe(pos, tmp, &sq_mapping_list) {
114 struct sq_mapping *entry;
115
116 entry = list_entry(pos, typeof(*entry), list);
117
118 return entry->sq_addr + entry->size;
119 }
120 }
121
122 return P4SEG_STORE_QUE;
123}
124
125/**
126 * __sq_remap - Perform a translation from the SQ to a phys addr
127 * @map: sq mapping containing phys and store queue addresses.
128 *
129 * Maps the store queue address specified in the mapping to the physical
130 * address specified in the mapping.
131 */
132static struct sq_mapping *__sq_remap(struct sq_mapping *map)
133{
134 unsigned long flags, pteh, ptel;
135 struct vm_struct *vma;
136 pgprot_t pgprot;
137
138 /*
139 * Without an MMU (or with it turned off), this is much more
140 * straightforward, as we can just load up each queue's QACR with
141 * the physical address appropriately masked.
142 */
143
144 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
145 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
146
147#ifdef CONFIG_MMU
148 /*
149 * With an MMU on the other hand, things are slightly more involved.
150 * Namely, we have to have a direct mapping between the SQ addr and
151 * the associated physical address in the UTLB by way of setting up
152 * a virt<->phys translation by hand. We do this by simply specifying
153 * the SQ addr in UTLB.VPN and the associated physical address in
154 * UTLB.PPN.
155 *
156 * Notably, even though this is a special case translation, and some
157 * of the configuration bits are meaningless, we're still required
158 * to have a valid ASID context in PTEH.
159 *
160 * We could also probably get by without explicitly setting PTEA, but
161 * we do it here just for good measure.
162 */
163 spin_lock_irqsave(&sq_mapping_lock, flags);
164
165 pteh = map->sq_addr;
166 ctrl_outl((pteh & MMU_VPN_MASK) | get_asid(), MMU_PTEH);
167
168 ptel = map->addr & PAGE_MASK;
169 ctrl_outl(((ptel >> 28) & 0xe) | (ptel & 0x1), MMU_PTEA);
170
171 pgprot = pgprot_noncached(PAGE_KERNEL);
172
173 ptel &= _PAGE_FLAGS_HARDWARE_MASK;
174 ptel |= pgprot_val(pgprot);
175 ctrl_outl(ptel, MMU_PTEL);
176
177 __asm__ __volatile__ ("ldtlb" : : : "memory");
178
179 spin_unlock_irqrestore(&sq_mapping_lock, flags);
180
181 /*
182 * Next, we need to map ourselves in the kernel page table, so that
183 * future accesses after a TLB flush will be handled when we take a
184 * page fault.
185 *
186 * Theoretically we could just do this directly and not worry about
187 * setting up the translation by hand ahead of time, but for the
188 * cases where we want a one-shot SQ mapping followed by a quick
189 * writeout before we hit the TLB flush, we do it anyways. This way
190 * we at least save ourselves the initial page fault overhead.
191 */
192 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
193 if (!vma)
194 return ERR_PTR(-ENOMEM);
195
196 vma->phys_addr = map->addr;
197
198 if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
199 map->size, pgprot_val(pgprot))) {
200 vunmap(vma->addr);
201 return NULL;
202 }
203#endif /* CONFIG_MMU */
204
205 return map;
206}
207
208/**
209 * sq_remap - Map a physical address through the Store Queues
210 * @phys: Physical address of mapping.
211 * @size: Length of mapping.
212 * @name: User invoking mapping.
213 *
214 * Remaps the physical address @phys through the next available store queue
215 * address of @size length. @name is logged at boot time as well as through
216 * the procfs interface.
217 *
218 * A pre-allocated and filled sq_mapping pointer is returned, and must be
219 * cleaned up with a call to sq_unmap() when the user is done with the
220 * mapping.
221 */
222struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *name)
223{
224 struct sq_mapping *map;
225 unsigned long virt, end;
226 unsigned int psz;
227
228 /* Don't allow wraparound or zero size */
229 end = phys + size - 1;
230 if (!size || end < phys)
231 return NULL;
232 /* Don't allow anyone to remap normal memory.. */
233 if (phys < virt_to_phys(high_memory))
234 return NULL;
235
236 phys &= PAGE_MASK;
237
238 size = PAGE_ALIGN(end + 1) - phys;
239 virt = __sq_get_next_addr();
240 psz = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
241 map = __sq_alloc_mapping(virt, phys, size, name);
242
243 printk("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
244 map->name ? map->name : "???",
245 psz, psz == 1 ? " " : "s",
246 map->sq_addr, map->addr);
247
248 return __sq_remap(map);
249}
250
251/**
252 * sq_unmap - Unmap a Store Queue allocation
253 * @map: Pre-allocated Store Queue mapping.
254 *
255 * Unmaps the store queue allocation @map that was previously created by
256 * sq_remap(). Also frees up the pte that was previously inserted into
257 * the kernel page table and discards the UTLB translation.
258 */
259void sq_unmap(struct sq_mapping *map)
260{
261 if (map->sq_addr > (unsigned long)high_memory)
262 vfree((void *)(map->sq_addr & PAGE_MASK));
263
264 list_del(&map->list);
265 kfree(map);
266}
267
268/**
269 * sq_clear - Clear a store queue range
270 * @addr: Address to start clearing from.
271 * @len: Length to clear.
272 *
273 * A quick zero-fill implementation for clearing out memory that has been
274 * remapped through the store queues.
275 */
276void sq_clear(unsigned long addr, unsigned int len)
277{
278 int i;
279
280 /* Clear out both queues linearly */
281 for (i = 0; i < 8; i++) {
282 ctrl_outl(0, addr + i + 0);
283 ctrl_outl(0, addr + i + 8);
284 }
285
286 sq_flush_range(addr, len);
287}
288
289/**
290 * sq_vma_unmap - Unmap a VMA range
291 * @area: VMA containing range.
292 * @addr: Start of range.
293 * @len: Length of range.
294 *
295 * Searches the sq_mapping_list for a mapping matching the sq addr @addr,
296 * and subsequently frees up the entry. Further cleanup is done by generic
297 * code.
298 */
299static void sq_vma_unmap(struct vm_area_struct *area,
300 unsigned long addr, size_t len)
301{
302 struct list_head *pos, *tmp;
303
304 list_for_each_safe(pos, tmp, &sq_mapping_list) {
305 struct sq_mapping *entry;
306
307 entry = list_entry(pos, typeof(*entry), list);
308
309 if (entry->sq_addr == addr) {
310 /*
311 * We could probably get away without doing the tlb flush
312 * here, as generic code should take care of most of this
313 * when unmapping the rest of the VMA range for us. Leave
314 * it in for added sanity for the time being..
315 */
316 __flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK);
317
318 list_del(&entry->list);
319 kfree(entry);
320
321 return;
322 }
323 }
324}
325
326/**
327 * sq_vma_sync - Sync a VMA range
328 * @area: VMA containing range.
329 * @start: Start of range.
330 * @len: Length of range.
331 * @flags: Additional flags.
332 *
333 * Synchronizes an sq mapped range by flushing the store queue cache for
334 * the duration of the mapping.
335 *
336 * Used internally for user mappings, which must use msync() to prefetch
337 * the store queue cache.
338 */
339static int sq_vma_sync(struct vm_area_struct *area,
340 unsigned long start, size_t len, unsigned int flags)
341{
342 sq_flush_range(start, len);
343
344 return 0;
345}
346
347static struct vm_operations_struct sq_vma_ops = {
348 .unmap = sq_vma_unmap,
349 .sync = sq_vma_sync,
350};
351
352/**
353 * sq_mmap - mmap() for /dev/cpu/sq
354 * @file: unused.
355 * @vma: VMA to remap.
356 *
357 * Remap the specified vma @vma through the store queues, and setup associated
358 * information for the new mapping. Also build up the page tables for the new
359 * area.
360 */
361static int sq_mmap(struct file *file, struct vm_area_struct *vma)
362{
363 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
364 unsigned long size = vma->vm_end - vma->vm_start;
365 struct sq_mapping *map;
366
367 /*
368 * We're not interested in any arbitrary virtual address that has
369 * been stuck in the VMA, as we already know what addresses we
370 * want. Save off the size, and reposition the VMA to begin at
371 * the next available sq address.
372 */
373 vma->vm_start = __sq_get_next_addr();
374 vma->vm_end = vma->vm_start + size;
375
376 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
377
378 vma->vm_flags |= VM_IO | VM_RESERVED;
379
380 map = __sq_alloc_mapping(vma->vm_start, offset, size, "Userspace");
381
382 if (io_remap_pfn_range(vma, map->sq_addr, map->addr >> PAGE_SHIFT,
383 size, vma->vm_page_prot))
384 return -EAGAIN;
385
386 vma->vm_ops = &sq_vma_ops;
387
388 return 0;
389}
390
391#ifdef CONFIG_PROC_FS
392static int sq_mapping_read_proc(char *buf, char **start, off_t off,
393 int len, int *eof, void *data)
394{
395 struct list_head *pos;
396 char *p = buf;
397
398 list_for_each_prev(pos, &sq_mapping_list) {
399 struct sq_mapping *entry;
400
401 entry = list_entry(pos, typeof(*entry), list);
402
403 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr,
404 entry->sq_addr + entry->size - 1, entry->addr,
405 entry->name);
406 }
407
408 return p - buf;
409}
410#endif
411
412static struct file_operations sq_fops = {
413 .owner = THIS_MODULE,
414 .mmap = sq_mmap,
415};
416
417static struct miscdevice sq_dev = {
418 .minor = STORE_QUEUE_MINOR,
419 .name = "sq",
420 .devfs_name = "cpu/sq",
421 .fops = &sq_fops,
422};
423
424static int __init sq_api_init(void)
425{
426 printk(KERN_NOTICE "sq: Registering store queue API.\n");
427
428#ifdef CONFIG_PROC_FS
429 create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0);
430#endif
431
432 return misc_register(&sq_dev);
433}
434
435static void __exit sq_api_exit(void)
436{
437 misc_deregister(&sq_dev);
438}
439
440module_init(sq_api_init);
441module_exit(sq_api_exit);
442
443MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
444MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
445MODULE_LICENSE("GPL");
446MODULE_ALIAS_MISCDEV(STORE_QUEUE_MINOR);
447
448EXPORT_SYMBOL(sq_remap);
449EXPORT_SYMBOL(sq_unmap);
450EXPORT_SYMBOL(sq_clear);
451EXPORT_SYMBOL(sq_flush);
452EXPORT_SYMBOL(sq_flush_range);
453
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S
new file mode 100644
index 000000000000..0c569b20e1c1
--- /dev/null
+++ b/arch/sh/kernel/cpu/ubc.S
@@ -0,0 +1,59 @@
1/*
2 * arch/sh/kernel/ubc.S
3 *
4 * Set of management routines for the User Break Controller (UBC)
5 *
6 * Copyright (C) 2002 Paul Mundt
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#include <linux/linkage.h>
14#include <asm/ubc.h>
15
16#define STBCR2 0xffc00010
17
18ENTRY(ubc_sleep)
19 mov #0, r0
20
21 mov.l 1f, r1 ! Zero out UBC_BBRA ..
22 mov.w r0, @r1
23
24 mov.l 2f, r1 ! .. same for BBRB ..
25 mov.w r0, @r1
26
27 mov.l 3f, r1 ! .. and again for BRCR.
28 mov.w r0, @r1
29
30 mov.w @r1, r0 ! Dummy read BRCR
31
32 mov.l 4f, r1 ! Set MSTP5 in STBCR2
33 mov.b @r1, r0
34 or #0x01, r0
35 mov.b r0, @r1
36
37 mov.b @r1, r0 ! Two dummy reads ..
38 mov.b @r1, r0
39
40 rts
41 nop
42
43ENTRY(ubc_wakeup)
44 mov.l 4f, r1 ! Clear MSTP5
45 mov.b @r1, r0
46 and #0xfe, r0
47 mov.b r0, @r1
48
49 mov.b @r1, r0 ! Two more dummy reads ..
50 mov.b @r1, r0
51
52 rts
53 nop
54
551: .long UBC_BBRA
562: .long UBC_BBRB
573: .long UBC_BRCR
584: .long STBCR2
59
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
new file mode 100644
index 000000000000..e0b384bef55f
--- /dev/null
+++ b/arch/sh/kernel/cpufreq.c
@@ -0,0 +1,218 @@
1/*
2 * arch/sh/kernel/cpufreq.c
3 *
4 * cpufreq driver for the SuperH processors.
5 *
6 * Copyright (C) 2002, 2003, 2004, 2005 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14#include <linux/types.h>
15#include <linux/cpufreq.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/cpumask.h>
22#include <linux/smp.h>
23
24#include <asm/processor.h>
25#include <asm/watchdog.h>
26#include <asm/freq.h>
27#include <asm/io.h>
28
29/*
30 * For SuperH, each policy change requires that we change the IFC, BFC, and
31 * PFC at the same time. Here we define sane values that won't trash the
32 * system.
33 *
34 * Note the max set is computed at runtime, we use the divisors that we booted
35 * with to setup our maximum operating frequencies.
36 */
37struct clock_set {
38 unsigned int ifc;
39 unsigned int bfc;
40 unsigned int pfc;
41} clock_sets[] = {
42#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH2)
43 { 0, 0, 0 }, /* not implemented yet */
44#elif defined(CONFIG_CPU_SH4)
45 { 4, 8, 8 }, /* min - IFC: 1/4, BFC: 1/8, PFC: 1/8 */
46 { 1, 2, 2 }, /* max - IFC: 1, BFC: 1/2, PFC: 1/2 */
47#endif
48};
49
50#define MIN_CLOCK_SET 0
51#define MAX_CLOCK_SET (ARRAY_SIZE(clock_sets) - 1)
52
53/*
54 * For the time being, we only support two frequencies, which in turn are
55 * aimed at the POWERSAVE and PERFORMANCE policies, which in turn are derived
56 * directly from the respective min/max clock sets. Technically we could
57 * support a wider range of frequencies, but these vary far too much for each
58 * CPU subtype (and we'd have to construct a frequency table for each subtype).
59 *
60 * Maybe something to implement in the future..
61 */
62#define SH_FREQ_MAX 0
63#define SH_FREQ_MIN 1
64
65static struct cpufreq_frequency_table sh_freqs[] = {
66 { SH_FREQ_MAX, 0 },
67 { SH_FREQ_MIN, 0 },
68 { 0, CPUFREQ_TABLE_END },
69};
70
71static void sh_cpufreq_update_clocks(unsigned int set)
72{
73 current_cpu_data.cpu_clock = current_cpu_data.master_clock / clock_sets[set].ifc;
74 current_cpu_data.bus_clock = current_cpu_data.master_clock / clock_sets[set].bfc;
75 current_cpu_data.module_clock = current_cpu_data.master_clock / clock_sets[set].pfc;
76 current_cpu_data.loops_per_jiffy = loops_per_jiffy;
77}
78
79/* XXX: This needs to be split out per CPU and CPU subtype. */
80/*
81 * Here we notify other drivers of the proposed change and the final change.
82 */
83static int sh_cpufreq_setstate(unsigned int cpu, unsigned int set)
84{
85 unsigned short frqcr = ctrl_inw(FRQCR);
86 cpumask_t cpus_allowed;
87 struct cpufreq_freqs freqs;
88
89 if (!cpu_online(cpu))
90 return -ENODEV;
91
92 cpus_allowed = current->cpus_allowed;
93 set_cpus_allowed(current, cpumask_of_cpu(cpu));
94
95 BUG_ON(smp_processor_id() != cpu);
96
97 freqs.cpu = cpu;
98 freqs.old = current_cpu_data.cpu_clock / 1000;
99 freqs.new = (current_cpu_data.master_clock / clock_sets[set].ifc) / 1000;
100
101 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
102#if defined(CONFIG_CPU_SH3)
103 frqcr |= (newstate & 0x4000) << 14;
104 frqcr |= (newstate & 0x000c) << 2;
105#elif defined(CONFIG_CPU_SH4)
106 /*
107 * FRQCR.PLL2EN is 1, we need to allow the PLL to stabilize by
108 * initializing the WDT.
109 */
110 if (frqcr & (1 << 9)) {
111 __u8 csr;
112
113 /*
114 * Set the overflow period to the highest available,
115 * in this case a 1/4096 division ratio yields a 5.25ms
116 * overflow period. See asm-sh/watchdog.h for more
117 * information and a range of other divisors.
118 */
119 csr = sh_wdt_read_csr();
120 csr |= WTCSR_CKS_4096;
121 sh_wdt_write_csr(csr);
122
123 sh_wdt_write_cnt(0);
124 }
125 frqcr &= 0x0e00; /* Clear ifc, bfc, pfc */
126 frqcr |= get_ifc_value(clock_sets[set].ifc) << 6;
127 frqcr |= get_bfc_value(clock_sets[set].bfc) << 3;
128 frqcr |= get_pfc_value(clock_sets[set].pfc);
129#endif
130 ctrl_outw(frqcr, FRQCR);
131 sh_cpufreq_update_clocks(set);
132
133 set_cpus_allowed(current, cpus_allowed);
134 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
135
136 return 0;
137}
138
139static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
140{
141 unsigned int min_freq, max_freq;
142 unsigned int ifc, bfc, pfc;
143
144 if (!cpu_online(policy->cpu))
145 return -ENODEV;
146
147 /* Update our maximum clock set */
148 get_current_frequency_divisors(&ifc, &bfc, &pfc);
149 clock_sets[MAX_CLOCK_SET].ifc = ifc;
150 clock_sets[MAX_CLOCK_SET].bfc = bfc;
151 clock_sets[MAX_CLOCK_SET].pfc = pfc;
152
153 /* Convert from Hz to kHz */
154 max_freq = current_cpu_data.cpu_clock / 1000;
155 min_freq = (current_cpu_data.master_clock / clock_sets[MIN_CLOCK_SET].ifc) / 1000;
156
157 sh_freqs[SH_FREQ_MAX].frequency = max_freq;
158 sh_freqs[SH_FREQ_MIN].frequency = min_freq;
159
160 /* cpuinfo and default policy values */
161 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
162 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
163 policy->cur = max_freq;
164
165 return cpufreq_frequency_table_cpuinfo(policy, &sh_freqs[0]);
166}
167
168static int sh_cpufreq_verify(struct cpufreq_policy *policy)
169{
170 return cpufreq_frequency_table_verify(policy, &sh_freqs[0]);
171}
172
173static int sh_cpufreq_target(struct cpufreq_policy *policy,
174 unsigned int target_freq,
175 unsigned int relation)
176{
177 unsigned int set, idx = 0;
178
179 if (cpufreq_frequency_table_target(policy, &sh_freqs[0], target_freq, relation, &idx))
180 return -EINVAL;
181
182 set = (idx == SH_FREQ_MIN) ? MIN_CLOCK_SET : MAX_CLOCK_SET;
183
184 sh_cpufreq_setstate(policy->cpu, set);
185
186 return 0;
187}
188
189static struct cpufreq_driver sh_cpufreq_driver = {
190 .owner = THIS_MODULE,
191 .name = "SH cpufreq",
192 .init = sh_cpufreq_cpu_init,
193 .verify = sh_cpufreq_verify,
194 .target = sh_cpufreq_target,
195};
196
197static int __init sh_cpufreq_init(void)
198{
199 if (!current_cpu_data.cpu_clock)
200 return -EINVAL;
201 if (cpufreq_register_driver(&sh_cpufreq_driver))
202 return -EINVAL;
203
204 return 0;
205}
206
207static void __exit sh_cpufreq_exit(void)
208{
209 cpufreq_unregister_driver(&sh_cpufreq_driver);
210}
211
212module_init(sh_cpufreq_init);
213module_exit(sh_cpufreq_exit);
214
215MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
216MODULE_DESCRIPTION("cpufreq driver for SuperH");
217MODULE_LICENSE("GPL");
218
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
new file mode 100644
index 000000000000..1378db375e17
--- /dev/null
+++ b/arch/sh/kernel/early_printk.c
@@ -0,0 +1,137 @@
1/*
2 * arch/sh/kernel/early_printk.c
3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2002 M. R. Brown
6 * Copyright (C) 2004 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/console.h>
13#include <linux/tty.h>
14#include <linux/init.h>
15#include <asm/io.h>
16
17#ifdef CONFIG_SH_STANDARD_BIOS
18#include <asm/sh_bios.h>
19
20/*
21 * Print a string through the BIOS
22 */
23static void sh_console_write(struct console *co, const char *s,
24 unsigned count)
25{
26 sh_bios_console_write(s, count);
27}
28
29/*
30 * Setup initial baud/bits/parity. We do two things here:
31 * - construct a cflag setting for the first rs_open()
32 * - initialize the serial port
33 * Return non-zero if we didn't find a serial port.
34 */
35static int __init sh_console_setup(struct console *co, char *options)
36{
37 int cflag = CREAD | HUPCL | CLOCAL;
38
39 /*
40 * Now construct a cflag setting.
41 * TODO: this is a totally bogus cflag, as we have
42 * no idea what serial settings the BIOS is using, or
43 * even if its using the serial port at all.
44 */
45 cflag |= B115200 | CS8 | /*no parity*/0;
46
47 co->cflag = cflag;
48
49 return 0;
50}
51
52static struct console early_console = {
53 .name = "bios",
54 .write = sh_console_write,
55 .setup = sh_console_setup,
56 .flags = CON_PRINTBUFFER,
57 .index = -1,
58};
59#endif
60
61#ifdef CONFIG_EARLY_SCIF_CONSOLE
62#define SCIF_REG 0xffe80000
63
64static void scif_sercon_putc(int c)
65{
66 while (!(ctrl_inw(SCIF_REG + 0x10) & 0x20)) ;
67
68 ctrl_outb(c, SCIF_REG + 12);
69 ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0x9f), SCIF_REG + 0x10);
70
71 if (c == '\n')
72 scif_sercon_putc('\r');
73}
74
75static void scif_sercon_flush(void)
76{
77 ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0xbf), SCIF_REG + 0x10);
78
79 while (!(ctrl_inw(SCIF_REG + 0x10) & 0x40)) ;
80
81 ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0xbf), SCIF_REG + 0x10);
82}
83
84static void scif_sercon_write(struct console *con, const char *s, unsigned count)
85{
86 while (count-- > 0)
87 scif_sercon_putc(*s++);
88
89 scif_sercon_flush();
90}
91
92static int __init scif_sercon_setup(struct console *con, char *options)
93{
94 con->cflag = CREAD | HUPCL | CLOCAL | B115200 | CS8;
95
96 return 0;
97}
98
99static struct console early_console = {
100 .name = "sercon",
101 .write = scif_sercon_write,
102 .setup = scif_sercon_setup,
103 .flags = CON_PRINTBUFFER,
104 .index = -1,
105};
106
107void scif_sercon_init(int baud)
108{
109 ctrl_outw(0, SCIF_REG + 8);
110 ctrl_outw(0, SCIF_REG);
111
112 /* Set baud rate */
113 ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) /
114 (32 * baud) - 1, SCIF_REG + 4);
115
116 ctrl_outw(12, SCIF_REG + 24);
117 ctrl_outw(8, SCIF_REG + 24);
118 ctrl_outw(0, SCIF_REG + 32);
119 ctrl_outw(0x60, SCIF_REG + 16);
120 ctrl_outw(0, SCIF_REG + 36);
121 ctrl_outw(0x30, SCIF_REG + 8);
122}
123#endif
124
125void __init enable_early_printk(void)
126{
127#ifdef CONFIG_EARLY_SCIF_CONSOLE
128 scif_sercon_init(115200);
129#endif
130 register_console(&early_console);
131}
132
133void disable_early_printk(void)
134{
135 unregister_console(&early_console);
136}
137
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
new file mode 100644
index 000000000000..6615e4838ee4
--- /dev/null
+++ b/arch/sh/kernel/entry.S
@@ -0,0 +1,1149 @@
1/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $
2 *
3 * linux/arch/sh/entry.S
4 *
5 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
6 * Copyright (C) 2003 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 */
13
14#include <linux/sys.h>
15#include <linux/linkage.h>
16#include <linux/config.h>
17#include <asm/asm-offsets.h>
18#include <asm/thread_info.h>
19#include <asm/unistd.h>
20
21#if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE)
22#define sys_nfsservctl sys_ni_syscall
23#endif
24
25#if !defined(CONFIG_MMU)
26#define sys_madvise sys_ni_syscall
27#define sys_readahead sys_ni_syscall
28#define sys_mprotect sys_ni_syscall
29#define sys_msync sys_ni_syscall
30#define sys_mlock sys_ni_syscall
31#define sys_munlock sys_ni_syscall
32#define sys_mlockall sys_ni_syscall
33#define sys_munlockall sys_ni_syscall
34#define sys_mremap sys_ni_syscall
35#define sys_mincore sys_ni_syscall
36#define sys_remap_file_pages sys_ni_syscall
37#endif
38
39! NOTE:
40! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
41! to be jumped is too far, but it causes illegal slot exception.
42
43/*
44 * entry.S contains the system-call and fault low-level handling routines.
45 * This also contains the timer-interrupt handler, as well as all interrupts
46 * and faults that can result in a task-switch.
47 *
48 * NOTE: This code handles signal-recognition, which happens every time
49 * after a timer-interrupt and after each system call.
50 *
51 * NOTE: This code uses a convention that instructions in the delay slot
52 * of a transfer-control instruction are indented by an extra space, thus:
53 *
54 * jmp @k0 ! control-transfer instruction
55 * ldc k1, ssr ! delay slot
56 *
57 * Stack layout in 'ret_from_syscall':
58 * ptrace needs to have all regs on the stack.
59 * if the order here is changed, it needs to be
60 * updated in ptrace.c and ptrace.h
61 *
62 * r0
63 * ...
64 * r15 = stack pointer
65 * spc
66 * pr
67 * ssr
68 * gbr
69 * mach
70 * macl
71 * syscall #
72 *
73 */
74
75ENOSYS = 38
76EINVAL = 22
77
78#if defined(CONFIG_CPU_SH3)
79TRA = 0xffffffd0
80EXPEVT = 0xffffffd4
81#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
82 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
83INTEVT = 0xa4000000 ! INTEVTE2(0xa4000000)
84#else
85INTEVT = 0xffffffd8
86#endif
87MMU_TEA = 0xfffffffc ! TLB Exception Address Register
88#elif defined(CONFIG_CPU_SH4)
89TRA = 0xff000020
90EXPEVT = 0xff000024
91INTEVT = 0xff000028
92MMU_TEA = 0xff00000c ! TLB Exception Address Register
93#endif
94
95#if defined(CONFIG_KGDB_NMI)
96NMI_VEC = 0x1c0 ! Must catch early for debounce
97#endif
98
99/* Offsets to the stack */
100OFF_R0 = 0 /* Return value. New ABI also arg4 */
101OFF_R1 = 4 /* New ABI: arg5 */
102OFF_R2 = 8 /* New ABI: arg6 */
103OFF_R3 = 12 /* New ABI: syscall_nr */
104OFF_R4 = 16 /* New ABI: arg0 */
105OFF_R5 = 20 /* New ABI: arg1 */
106OFF_R6 = 24 /* New ABI: arg2 */
107OFF_R7 = 28 /* New ABI: arg3 */
108OFF_SP = (15*4)
109OFF_PC = (16*4)
110OFF_SR = (16*4+8)
111OFF_TRA = (16*4+6*4)
112
113
114#define k0 r0
115#define k1 r1
116#define k2 r2
117#define k3 r3
118#define k4 r4
119
120#define k_ex_code r2_bank /* r2_bank1 */
121#define g_imask r6 /* r6_bank1 */
122#define k_g_imask r6_bank /* r6_bank1 */
123#define current r7 /* r7_bank1 */
124
125/*
126 * Kernel mode register usage:
127 * k0 scratch
128 * k1 scratch
129 * k2 scratch (Exception code)
130 * k3 scratch (Return address)
131 * k4 scratch
132 * k5 reserved
133 * k6 Global Interrupt Mask (0--15 << 4)
134 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
135 */
136
137!
138! TLB Miss / Initial Page write exception handling
139! _and_
140! TLB hits, but the access violate the protection.
141! It can be valid access, such as stack grow and/or C-O-W.
142!
143!
144! Find the pmd/pte entry and loadtlb
145! If it's not found, cause address error (SEGV)
146!
147! Although this could be written in assembly language (and it'd be faster),
148! this first version depends *much* on C implementation.
149!
150
151#define CLI() \
152 stc sr, r0; \
153 or #0xf0, r0; \
154 ldc r0, sr
155
156#define STI() \
157 mov.l __INV_IMASK, r11; \
158 stc sr, r10; \
159 and r11, r10; \
160 stc k_g_imask, r11; \
161 or r11, r10; \
162 ldc r10, sr
163
164#if defined(CONFIG_PREEMPT)
165# define preempt_stop() CLI()
166#else
167# define preempt_stop()
168# define resume_kernel restore_all
169#endif
170
171#if defined(CONFIG_MMU)
172 .align 2
173ENTRY(tlb_miss_load)
174 bra call_dpf
175 mov #0, r5
176
177 .align 2
178ENTRY(tlb_miss_store)
179 bra call_dpf
180 mov #1, r5
181
182 .align 2
183ENTRY(initial_page_write)
184 bra call_dpf
185 mov #1, r5
186
187 .align 2
188ENTRY(tlb_protection_violation_load)
189 bra call_dpf
190 mov #0, r5
191
192 .align 2
193ENTRY(tlb_protection_violation_store)
194 bra call_dpf
195 mov #1, r5
196
197call_dpf:
198 mov.l 1f, r0
199 mov r5, r8
200 mov.l @r0, r6
201 mov r6, r9
202 mov.l 2f, r0
203 sts pr, r10
204 jsr @r0
205 mov r15, r4
206 !
207 tst r0, r0
208 bf/s 0f
209 lds r10, pr
210 rts
211 nop
2120: STI()
213 mov.l 3f, r0
214 mov r9, r6
215 mov r8, r5
216 jmp @r0
217 mov r15, r4
218
219 .align 2
2201: .long MMU_TEA
2212: .long __do_page_fault
2223: .long do_page_fault
223
224 .align 2
225ENTRY(address_error_load)
226 bra call_dae
227 mov #0,r5 ! writeaccess = 0
228
229 .align 2
230ENTRY(address_error_store)
231 bra call_dae
232 mov #1,r5 ! writeaccess = 1
233
234 .align 2
235call_dae:
236 mov.l 1f, r0
237 mov.l @r0, r6 ! address
238 mov.l 2f, r0
239 jmp @r0
240 mov r15, r4 ! regs
241
242 .align 2
2431: .long MMU_TEA
2442: .long do_address_error
245#endif /* CONFIG_MMU */
246
247#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
248! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
249! If both are configured, handle the debug traps (breakpoints) in SW,
250! but still allow BIOS traps to FW.
251
252 .align 2
253debug_kernel:
254#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
255 /* Force BIOS call to FW (debug_trap put TRA in r8) */
256 mov r8,r0
257 shlr2 r0
258 cmp/eq #0x3f,r0
259 bt debug_kernel_fw
260#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
261
262debug_enter:
263#if defined(CONFIG_SH_KGDB)
264 /* Jump to kgdb, pass stacked regs as arg */
265debug_kernel_sw:
266 mov.l 3f, r0
267 jmp @r0
268 mov r15, r4
269 .align 2
2703: .long kgdb_handle_exception
271#endif /* CONFIG_SH_KGDB */
272
273#if defined(CONFIG_SH_STANDARD_BIOS)
274 /* Unwind the stack and jmp to the debug entry */
275debug_kernel_fw:
276 mov.l @r15+, r0
277 mov.l @r15+, r1
278 mov.l @r15+, r2
279 mov.l @r15+, r3
280 mov.l @r15+, r4
281 mov.l @r15+, r5
282 mov.l @r15+, r6
283 mov.l @r15+, r7
284 stc sr, r8
285 mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
286 or r9, r8
287 ldc r8, sr ! here, change the register bank
288 mov.l @r15+, r8
289 mov.l @r15+, r9
290 mov.l @r15+, r10
291 mov.l @r15+, r11
292 mov.l @r15+, r12
293 mov.l @r15+, r13
294 mov.l @r15+, r14
295 mov.l @r15+, k0
296 ldc.l @r15+, spc
297 lds.l @r15+, pr
298 mov.l @r15+, k1
299 ldc.l @r15+, gbr
300 lds.l @r15+, mach
301 lds.l @r15+, macl
302 mov k0, r15
303 !
304 mov.l 2f, k0
305 mov.l @k0, k0
306 jmp @k0
307 ldc k1, ssr
308 .align 2
3091: .long 0x300000f0
3102: .long gdb_vbr_vector
311#endif /* CONFIG_SH_STANDARD_BIOS */
312
313#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
314
315
316 .align 2
317debug_trap:
318#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
319 mov #OFF_SR, r0
320 mov.l @(r0,r15), r0 ! get status register
321 shll r0
322 shll r0 ! kernel space?
323 bt/s debug_kernel
324#endif
325 mov.l @r15, r0 ! Restore R0 value
326 mov.l 1f, r8
327 jmp @r8
328 nop
329
330 .align 2
331ENTRY(exception_error)
332 !
333 STI()
334 mov.l 2f, r0
335 jmp @r0
336 nop
337
338!
339 .align 2
3401: .long break_point_trap_software
3412: .long do_exception_error
342
343 .align 2
344ret_from_exception:
345 preempt_stop()
346ret_from_irq:
347 !
348 mov #OFF_SR, r0
349 mov.l @(r0,r15), r0 ! get status register
350 shll r0
351 shll r0 ! kernel space?
352 bt/s resume_kernel ! Yes, it's from kernel, go back soon
353 GET_THREAD_INFO(r8)
354
355#ifdef CONFIG_PREEMPT
356 bra resume_userspace
357 nop
358ENTRY(resume_kernel)
359 mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
360 tst r0, r0
361 bf noresched
362need_resched:
363 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
364 tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
365 bt noresched
366
367 mov #OFF_SR, r0
368 mov.l @(r0,r15), r0 ! get status register
369 and #0xf0, r0 ! interrupts off (exception path)?
370 cmp/eq #0xf0, r0
371 bt noresched
372
373 mov.l 1f, r0
374 mov.l r0, @(TI_PRE_COUNT,r8)
375
376 STI()
377 mov.l 2f, r0
378 jsr @r0
379 nop
380 mov #0, r0
381 mov.l r0, @(TI_PRE_COUNT,r8)
382 CLI()
383
384 bra need_resched
385 nop
386noresched:
387 bra restore_all
388 nop
389
390 .align 2
3911: .long PREEMPT_ACTIVE
3922: .long schedule
393#endif
394
395ENTRY(resume_userspace)
396 ! r8: current_thread_info
397 CLI()
398 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
399 tst #_TIF_WORK_MASK, r0
400 bt/s restore_all
401 tst #_TIF_NEED_RESCHED, r0
402
403 .align 2
404work_pending:
405 ! r0: current_thread_info->flags
406 ! r8: current_thread_info
407 ! t: result of "tst #_TIF_NEED_RESCHED, r0"
408 bf/s work_resched
409 tst #_TIF_SIGPENDING, r0
410work_notifysig:
411 bt/s restore_all
412 mov r15, r4
413 mov #0, r5
414 mov.l 2f, r1
415 mova restore_all, r0
416 jmp @r1
417 lds r0, pr
418work_resched:
419#ifndef CONFIG_PREEMPT
420 ! gUSA handling
421 mov.l @(OFF_SP,r15), r0 ! get user space stack pointer
422 mov r0, r1
423 shll r0
424 bf/s 1f
425 shll r0
426 bf/s 1f
427 mov #OFF_PC, r0
428 ! SP >= 0xc0000000 : gUSA mark
429 mov.l @(r0,r15), r2 ! get user space PC (program counter)
430 mov.l @(OFF_R0,r15), r3 ! end point
431 cmp/hs r3, r2 ! r2 >= r3?
432 bt 1f
433 add r3, r1 ! rewind point #2
434 mov.l r1, @(r0,r15) ! reset PC to rewind point #2
435 !
4361:
437#endif
438 mov.l 1f, r1
439 jsr @r1 ! schedule
440 nop
441 CLI()
442 !
443 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
444 tst #_TIF_WORK_MASK, r0
445 bt restore_all
446 bra work_pending
447 tst #_TIF_NEED_RESCHED, r0
448
449 .align 2
4501: .long schedule
4512: .long do_signal
452
453 .align 2
454syscall_exit_work:
455 ! r0: current_thread_info->flags
456 ! r8: current_thread_info
457 tst #_TIF_SYSCALL_TRACE, r0
458 bt/s work_pending
459 tst #_TIF_NEED_RESCHED, r0
460 STI()
461 ! XXX setup arguments...
462 mov.l 4f, r0 ! do_syscall_trace
463 jsr @r0
464 nop
465 bra resume_userspace
466 nop
467
468 .align 2
469syscall_trace_entry:
470 ! Yes it is traced.
471 ! XXX setup arguments...
472 mov.l 4f, r11 ! Call do_syscall_trace which notifies
473 jsr @r11 ! superior (will chomp R[0-7])
474 nop
475 ! Reload R0-R4 from kernel stack, where the
476 ! parent may have modified them using
477 ! ptrace(POKEUSR). (Note that R0-R2 are
478 ! used by the system call handler directly
479 ! from the kernel stack anyway, so don't need
480 ! to be reloaded here.) This allows the parent
481 ! to rewrite system calls and args on the fly.
482 mov.l @(OFF_R4,r15), r4 ! arg0
483 mov.l @(OFF_R5,r15), r5
484 mov.l @(OFF_R6,r15), r6
485 mov.l @(OFF_R7,r15), r7 ! arg3
486 mov.l @(OFF_R3,r15), r3 ! syscall_nr
487 ! Arrange for do_syscall_trace to be called
488 ! again as the system call returns.
489 mov.l 2f, r10 ! Number of syscalls
490 cmp/hs r10, r3
491 bf syscall_call
492 mov #-ENOSYS, r0
493 bra syscall_exit
494 mov.l r0, @(OFF_R0,r15) ! Return value
495
496/*
497 * Syscall interface:
498 *
499 * Syscall #: R3
500 * Arguments #0 to #3: R4--R7
501 * Arguments #4 to #6: R0, R1, R2
502 * TRA: (number of arguments + 0x10) x 4
503 *
504 * This code also handles delegating other traps to the BIOS/gdb stub
505 * according to:
506 *
507 * Trap number
508 * (TRA>>2) Purpose
509 * -------- -------
510 * 0x0-0xf old syscall ABI
511 * 0x10-0x1f new syscall ABI
512 * 0x20-0xff delegated through debug_trap to BIOS/gdb stub.
513 *
514 * Note: When we're first called, the TRA value must be shifted
515 * right 2 bits in order to get the value that was used as the "trapa"
516 * argument.
517 */
518
519 .align 2
520 .globl ret_from_fork
521ret_from_fork:
522 mov.l 1f, r8
523 jsr @r8
524 mov r0, r4
525 bra syscall_exit
526 nop
527 .align 2
5281: .long schedule_tail
529 !
530ENTRY(system_call)
531 mov.l 1f, r9
532 mov.l @r9, r8 ! Read from TRA (Trap Address) Register
533 !
534 ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
535 mov #0x7f, r9
536 cmp/hi r9, r8
537 bt/s 0f
538 mov #OFF_TRA, r9
539 add r15, r9
540 !
541 mov.l r8, @r9 ! set TRA value to tra
542 STI()
543 ! Call the system call handler through the table.
544 ! First check for bad syscall number
545 mov r3, r9
546 mov.l 2f, r8 ! Number of syscalls
547 cmp/hs r8, r9
548 bf/s good_system_call
549 GET_THREAD_INFO(r8)
550syscall_badsys: ! Bad syscall number
551 mov #-ENOSYS, r0
552 bra resume_userspace
553 mov.l r0, @(OFF_R0,r15) ! Return value
554 !
5550:
556 bra debug_trap
557 nop
558 !
559good_system_call: ! Good syscall number
560 mov.l @(TI_FLAGS,r8), r8
561 mov #_TIF_SYSCALL_TRACE, r10
562 tst r10, r8
563 bf syscall_trace_entry
564 !
565syscall_call:
566 shll2 r9 ! x4
567 mov.l 3f, r8 ! Load the address of sys_call_table
568 add r8, r9
569 mov.l @r9, r8
570 jsr @r8 ! jump to specific syscall handler
571 nop
572 mov.l r0, @(OFF_R0,r15) ! save the return value
573 !
574syscall_exit:
575 CLI()
576 !
577 GET_THREAD_INFO(r8)
578 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
579 tst #_TIF_ALLWORK_MASK, r0
580 bf syscall_exit_work
581restore_all:
582 mov.l @r15+, r0
583 mov.l @r15+, r1
584 mov.l @r15+, r2
585 mov.l @r15+, r3
586 mov.l @r15+, r4
587 mov.l @r15+, r5
588 mov.l @r15+, r6
589 mov.l @r15+, r7
590 !
591 stc sr, r8
592 mov.l 7f, r9
593 or r9, r8 ! BL =1, RB=1
594 ldc r8, sr ! here, change the register bank
595 !
596 mov.l @r15+, r8
597 mov.l @r15+, r9
598 mov.l @r15+, r10
599 mov.l @r15+, r11
600 mov.l @r15+, r12
601 mov.l @r15+, r13
602 mov.l @r15+, r14
603 mov.l @r15+, k4 ! original stack pointer
604 ldc.l @r15+, spc
605 lds.l @r15+, pr
606 mov.l @r15+, k3 ! original SR
607 ldc.l @r15+, gbr
608 lds.l @r15+, mach
609 lds.l @r15+, macl
610 add #4, r15 ! Skip syscall number
611 !
612#ifdef CONFIG_SH_DSP
613 mov.l @r15+, k0 ! DSP mode marker
614 mov.l 5f, k1
615 cmp/eq k0, k1 ! Do we have a DSP stack frame?
616 bf skip_restore
617
618 stc sr, k0 ! Enable CPU DSP mode
619 or k1, k0 ! (within kernel it may be disabled)
620 ldc k0, sr
621 mov r2, k0 ! Backup r2
622
623 ! Restore DSP registers from stack
624 mov r15, r2
625 movs.l @r2+, a1
626 movs.l @r2+, a0g
627 movs.l @r2+, a1g
628 movs.l @r2+, m0
629 movs.l @r2+, m1
630 mov r2, r15
631
632 lds.l @r15+, a0
633 lds.l @r15+, x0
634 lds.l @r15+, x1
635 lds.l @r15+, y0
636 lds.l @r15+, y1
637 lds.l @r15+, dsr
638 ldc.l @r15+, rs
639 ldc.l @r15+, re
640 ldc.l @r15+, mod
641
642 mov k0, r2 ! Restore r2
643skip_restore:
644#endif
645 !
646 ! Calculate new SR value
647 mov k3, k2 ! original SR value
648 mov.l 9f, k1
649 and k1, k2 ! Mask orignal SR value
650 !
651 mov k3, k0 ! Calculate IMASK-bits
652 shlr2 k0
653 and #0x3c, k0
654 cmp/eq #0x3c, k0
655 bt/s 6f
656 shll2 k0
657 mov g_imask, k0
658 !
6596: or k0, k2 ! Set the IMASK-bits
660 ldc k2, ssr
661 !
662#if defined(CONFIG_KGDB_NMI)
663 ! Clear in_nmi
664 mov.l 4f, k0
665 mov #0, k1
666 mov.b k1, @k0
667#endif
668 mov.l @r15+, k2 ! restore EXPEVT
669 mov k4, r15
670 rte
671 nop
672
673 .align 2
6741: .long TRA
6752: .long NR_syscalls
6763: .long sys_call_table
6774: .long do_syscall_trace
6785: .long 0x00001000 ! DSP
6797: .long 0x30000000
6809:
681__INV_IMASK:
682 .long 0xffffff0f ! ~(IMASK)
683
684! Exception Vector Base
685!
686! Should be aligned page boundary.
687!
688 .balign 4096,0,4096
689ENTRY(vbr_base)
690 .long 0
691!
692 .balign 256,0,256
693general_exception:
694 mov.l 1f, k2
695 mov.l 2f, k3
696 bra handle_exception
697 mov.l @k2, k2
698 .align 2
6991: .long EXPEVT
7002: .long ret_from_exception
701!
702!
703 .balign 1024,0,1024
704tlb_miss:
705 mov.l 1f, k2
706 mov.l 4f, k3
707 bra handle_exception
708 mov.l @k2, k2
709!
710 .balign 512,0,512
711interrupt:
712 mov.l 2f, k2
713 mov.l 3f, k3
714#if defined(CONFIG_KGDB_NMI)
715 ! Debounce (filter nested NMI)
716 mov.l @k2, k0
717 mov.l 5f, k1
718 cmp/eq k1, k0
719 bf 0f
720 mov.l 6f, k1
721 tas.b @k1
722 bt 0f
723 rte
724 nop
725 .align 2
7265: .long NMI_VEC
7276: .long in_nmi
7280:
729#endif /* defined(CONFIG_KGDB_NMI) */
730 bra handle_exception
731 mov.l @k2, k2
732
733 .align 2
7341: .long EXPEVT
7352: .long INTEVT
7363: .long ret_from_irq
7374: .long ret_from_exception
738
739!
740!
741 .align 2
742handle_exception:
743 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
744 ! save all registers onto stack.
745 !
746 stc ssr, k0 ! Is it from kernel space?
747 shll k0 ! Check MD bit (bit30) by shifting it into...
748 shll k0 ! ...the T bit
749 bt/s 1f ! It's a kernel to kernel transition.
750 mov r15, k0 ! save original stack to k0
751 /* User space to kernel */
752 mov #0x20, k1
753 shll8 k1 ! k1 := 8192 (== THREAD_SIZE)
754 add current, k1
755 mov k1, r15 ! change to kernel stack
756 !
7571: mov #-1, k4
758 mov.l 2f, k1
759 !
760#ifdef CONFIG_SH_DSP
761 mov.l r2, @-r15 ! Save r2, we need another reg
762 stc sr, k4
763 mov.l 1f, r2
764 tst r2, k4 ! Check if in DSP mode
765 mov.l @r15+, r2 ! Restore r2 now
766 bt/s skip_save
767 mov #0, k4 ! Set marker for no stack frame
768
769 mov r2, k4 ! Backup r2 (in k4) for later
770
771 ! Save DSP registers on stack
772 stc.l mod, @-r15
773 stc.l re, @-r15
774 stc.l rs, @-r15
775 sts.l dsr, @-r15
776 sts.l y1, @-r15
777 sts.l y0, @-r15
778 sts.l x1, @-r15
779 sts.l x0, @-r15
780 sts.l a0, @-r15
781
782 ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
783
784 ! FIXME: Make sure that this is still the case with newer toolchains,
785 ! as we're not at all interested in supporting ancient toolchains at
786 ! this point. -- PFM.
787
788 mov r15, r2
789 .word 0xf653 ! movs.l a1, @-r2
790 .word 0xf6f3 ! movs.l a0g, @-r2
791 .word 0xf6d3 ! movs.l a1g, @-r2
792 .word 0xf6c3 ! movs.l m0, @-r2
793 .word 0xf6e3 ! movs.l m1, @-r2
794 mov r2, r15
795
796 mov k4, r2 ! Restore r2
797 mov.l 1f, k4 ! Force DSP stack frame
798skip_save:
799 mov.l k4, @-r15 ! Push DSP mode marker onto stack
800#endif
801 ! Save the user registers on the stack.
802 mov.l k2, @-r15 ! EXPEVT
803 mov.l k4, @-r15 ! set TRA (default: -1)
804 !
805 sts.l macl, @-r15
806 sts.l mach, @-r15
807 stc.l gbr, @-r15
808 stc.l ssr, @-r15
809 sts.l pr, @-r15
810 stc.l spc, @-r15
811 !
812 lds k3, pr ! Set the return address to pr
813 !
814 mov.l k0, @-r15 ! save orignal stack
815 mov.l r14, @-r15
816 mov.l r13, @-r15
817 mov.l r12, @-r15
818 mov.l r11, @-r15
819 mov.l r10, @-r15
820 mov.l r9, @-r15
821 mov.l r8, @-r15
822 !
823 stc sr, r8 ! Back to normal register bank, and
824 or k1, r8 ! Block all interrupts
825 mov.l 3f, k1
826 and k1, r8 ! ...
827 ldc r8, sr ! ...changed here.
828 !
829 mov.l r7, @-r15
830 mov.l r6, @-r15
831 mov.l r5, @-r15
832 mov.l r4, @-r15
833 mov.l r3, @-r15
834 mov.l r2, @-r15
835 mov.l r1, @-r15
836 mov.l r0, @-r15
837 ! Then, dispatch to the handler, according to the exception code.
838 stc k_ex_code, r8
839 shlr2 r8
840 shlr r8
841 mov.l 4f, r9
842 add r8, r9
843 mov.l @r9, r9
844 jmp @r9
845 nop
846
847 .align 2
8481: .long 0x00001000 ! DSP=1
8492: .long 0x000080f0 ! FD=1, IMASK=15
8503: .long 0xcfffffff ! RB=0, BL=0
8514: .long exception_handling_table
852
853 .align 2
854ENTRY(exception_none)
855 rts
856 nop
857
858 .data
859ENTRY(sys_call_table)
860 .long sys_ni_syscall /* 0 - old "setup()" system call*/
861 .long sys_exit
862 .long sys_fork
863 .long sys_read
864 .long sys_write
865 .long sys_open /* 5 */
866 .long sys_close
867 .long sys_waitpid
868 .long sys_creat
869 .long sys_link
870 .long sys_unlink /* 10 */
871 .long sys_execve
872 .long sys_chdir
873 .long sys_time
874 .long sys_mknod
875 .long sys_chmod /* 15 */
876 .long sys_lchown16
877 .long sys_ni_syscall /* old break syscall holder */
878 .long sys_stat
879 .long sys_lseek
880 .long sys_getpid /* 20 */
881 .long sys_mount
882 .long sys_oldumount
883 .long sys_setuid16
884 .long sys_getuid16
885 .long sys_stime /* 25 */
886 .long sys_ptrace
887 .long sys_alarm
888 .long sys_fstat
889 .long sys_pause
890 .long sys_utime /* 30 */
891 .long sys_ni_syscall /* old stty syscall holder */
892 .long sys_ni_syscall /* old gtty syscall holder */
893 .long sys_access
894 .long sys_nice
895 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
896 .long sys_sync
897 .long sys_kill
898 .long sys_rename
899 .long sys_mkdir
900 .long sys_rmdir /* 40 */
901 .long sys_dup
902 .long sys_pipe
903 .long sys_times
904 .long sys_ni_syscall /* old prof syscall holder */
905 .long sys_brk /* 45 */
906 .long sys_setgid16
907 .long sys_getgid16
908 .long sys_signal
909 .long sys_geteuid16
910 .long sys_getegid16 /* 50 */
911 .long sys_acct
912 .long sys_umount /* recycled never used phys() */
913 .long sys_ni_syscall /* old lock syscall holder */
914 .long sys_ioctl
915 .long sys_fcntl /* 55 */
916 .long sys_ni_syscall /* old mpx syscall holder */
917 .long sys_setpgid
918 .long sys_ni_syscall /* old ulimit syscall holder */
919 .long sys_ni_syscall /* sys_olduname */
920 .long sys_umask /* 60 */
921 .long sys_chroot
922 .long sys_ustat
923 .long sys_dup2
924 .long sys_getppid
925 .long sys_getpgrp /* 65 */
926 .long sys_setsid
927 .long sys_sigaction
928 .long sys_sgetmask
929 .long sys_ssetmask
930 .long sys_setreuid16 /* 70 */
931 .long sys_setregid16
932 .long sys_sigsuspend
933 .long sys_sigpending
934 .long sys_sethostname
935 .long sys_setrlimit /* 75 */
936 .long sys_old_getrlimit
937 .long sys_getrusage
938 .long sys_gettimeofday
939 .long sys_settimeofday
940 .long sys_getgroups16 /* 80 */
941 .long sys_setgroups16
942 .long sys_ni_syscall /* sys_oldselect */
943 .long sys_symlink
944 .long sys_lstat
945 .long sys_readlink /* 85 */
946 .long sys_uselib
947 .long sys_swapon
948 .long sys_reboot
949 .long old_readdir
950 .long old_mmap /* 90 */
951 .long sys_munmap
952 .long sys_truncate
953 .long sys_ftruncate
954 .long sys_fchmod
955 .long sys_fchown16 /* 95 */
956 .long sys_getpriority
957 .long sys_setpriority
958 .long sys_ni_syscall /* old profil syscall holder */
959 .long sys_statfs
960 .long sys_fstatfs /* 100 */
961 .long sys_ni_syscall /* ioperm */
962 .long sys_socketcall
963 .long sys_syslog
964 .long sys_setitimer
965 .long sys_getitimer /* 105 */
966 .long sys_newstat
967 .long sys_newlstat
968 .long sys_newfstat
969 .long sys_uname
970 .long sys_ni_syscall /* 110 */ /* iopl */
971 .long sys_vhangup
972 .long sys_ni_syscall /* idle */
973 .long sys_ni_syscall /* vm86old */
974 .long sys_wait4
975 .long sys_swapoff /* 115 */
976 .long sys_sysinfo
977 .long sys_ipc
978 .long sys_fsync
979 .long sys_sigreturn
980 .long sys_clone /* 120 */
981 .long sys_setdomainname
982 .long sys_newuname
983 .long sys_ni_syscall /* sys_modify_ldt */
984 .long sys_adjtimex
985 .long sys_mprotect /* 125 */
986 .long sys_sigprocmask
987 .long sys_ni_syscall /* old "create_module" */
988 .long sys_init_module
989 .long sys_delete_module
990 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
991 .long sys_quotactl
992 .long sys_getpgid
993 .long sys_fchdir
994 .long sys_bdflush
995 .long sys_sysfs /* 135 */
996 .long sys_personality
997 .long sys_ni_syscall /* for afs_syscall */
998 .long sys_setfsuid16
999 .long sys_setfsgid16
1000 .long sys_llseek /* 140 */
1001 .long sys_getdents
1002 .long sys_select
1003 .long sys_flock
1004 .long sys_msync
1005 .long sys_readv /* 145 */
1006 .long sys_writev
1007 .long sys_getsid
1008 .long sys_fdatasync
1009 .long sys_sysctl
1010 .long sys_mlock /* 150 */
1011 .long sys_munlock
1012 .long sys_mlockall
1013 .long sys_munlockall
1014 .long sys_sched_setparam
1015 .long sys_sched_getparam /* 155 */
1016 .long sys_sched_setscheduler
1017 .long sys_sched_getscheduler
1018 .long sys_sched_yield
1019 .long sys_sched_get_priority_max
1020 .long sys_sched_get_priority_min /* 160 */
1021 .long sys_sched_rr_get_interval
1022 .long sys_nanosleep
1023 .long sys_mremap
1024 .long sys_setresuid16
1025 .long sys_getresuid16 /* 165 */
1026 .long sys_ni_syscall /* vm86 */
1027 .long sys_ni_syscall /* old "query_module" */
1028 .long sys_poll
1029 .long sys_nfsservctl
1030 .long sys_setresgid16 /* 170 */
1031 .long sys_getresgid16
1032 .long sys_prctl
1033 .long sys_rt_sigreturn
1034 .long sys_rt_sigaction
1035 .long sys_rt_sigprocmask /* 175 */
1036 .long sys_rt_sigpending
1037 .long sys_rt_sigtimedwait
1038 .long sys_rt_sigqueueinfo
1039 .long sys_rt_sigsuspend
1040 .long sys_pread_wrapper /* 180 */
1041 .long sys_pwrite_wrapper
1042 .long sys_chown16
1043 .long sys_getcwd
1044 .long sys_capget
1045 .long sys_capset /* 185 */
1046 .long sys_sigaltstack
1047 .long sys_sendfile
1048 .long sys_ni_syscall /* streams1 */
1049 .long sys_ni_syscall /* streams2 */
1050 .long sys_vfork /* 190 */
1051 .long sys_getrlimit
1052 .long sys_mmap2
1053 .long sys_truncate64
1054 .long sys_ftruncate64
1055 .long sys_stat64 /* 195 */
1056 .long sys_lstat64
1057 .long sys_fstat64
1058 .long sys_lchown
1059 .long sys_getuid
1060 .long sys_getgid /* 200 */
1061 .long sys_geteuid
1062 .long sys_getegid
1063 .long sys_setreuid
1064 .long sys_setregid
1065 .long sys_getgroups /* 205 */
1066 .long sys_setgroups
1067 .long sys_fchown
1068 .long sys_setresuid
1069 .long sys_getresuid
1070 .long sys_setresgid /* 210 */
1071 .long sys_getresgid
1072 .long sys_chown
1073 .long sys_setuid
1074 .long sys_setgid
1075 .long sys_setfsuid /* 215 */
1076 .long sys_setfsgid
1077 .long sys_pivot_root
1078 .long sys_mincore
1079 .long sys_madvise
1080 .long sys_getdents64 /* 220 */
1081 .long sys_fcntl64
1082 .long sys_ni_syscall /* reserved for TUX */
1083 .long sys_ni_syscall /* Reserved for Security */
1084 .long sys_gettid
1085 .long sys_readahead /* 225 */
1086 .long sys_setxattr
1087 .long sys_lsetxattr
1088 .long sys_fsetxattr
1089 .long sys_getxattr
1090 .long sys_lgetxattr /* 230 */
1091 .long sys_fgetxattr
1092 .long sys_listxattr
1093 .long sys_llistxattr
1094 .long sys_flistxattr
1095 .long sys_removexattr /* 235 */
1096 .long sys_lremovexattr
1097 .long sys_fremovexattr
1098 .long sys_tkill
1099 .long sys_sendfile64
1100 .long sys_futex /* 240 */
1101 .long sys_sched_setaffinity
1102 .long sys_sched_getaffinity
1103 .long sys_ni_syscall
1104 .long sys_ni_syscall
1105 .long sys_io_setup /* 245 */
1106 .long sys_io_destroy
1107 .long sys_io_getevents
1108 .long sys_io_submit
1109 .long sys_io_cancel
1110 .long sys_fadvise64 /* 250 */
1111 .long sys_ni_syscall
1112 .long sys_exit_group
1113 .long sys_lookup_dcookie
1114 .long sys_epoll_create
1115 .long sys_epoll_ctl /* 255 */
1116 .long sys_epoll_wait
1117 .long sys_remap_file_pages
1118 .long sys_set_tid_address
1119 .long sys_timer_create
1120 .long sys_timer_settime /* 260 */
1121 .long sys_timer_gettime
1122 .long sys_timer_getoverrun
1123 .long sys_timer_delete
1124 .long sys_clock_settime
1125 .long sys_clock_gettime /* 265 */
1126 .long sys_clock_getres
1127 .long sys_clock_nanosleep
1128 .long sys_statfs64
1129 .long sys_fstatfs64
1130 .long sys_tgkill /* 270 */
1131 .long sys_utimes
1132 .long sys_fadvise64_64_wrapper
1133 .long sys_ni_syscall /* Reserved for vserver */
1134 .long sys_ni_syscall /* Reserved for mbind */
1135 .long sys_ni_syscall /* 275 - get_mempolicy */
1136 .long sys_ni_syscall /* set_mempolicy */
1137 .long sys_mq_open
1138 .long sys_mq_unlink
1139 .long sys_mq_timedsend
1140 .long sys_mq_timedreceive /* 280 */
1141 .long sys_mq_notify
1142 .long sys_mq_getsetattr
1143 .long sys_ni_syscall /* Reserved for kexec */
1144 .long sys_waitid
1145 .long sys_add_key /* 285 */
1146 .long sys_request_key
1147 .long sys_keyctl
1148
1149/* End of entry.S */
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
new file mode 100644
index 000000000000..9b9e6ef626ce
--- /dev/null
+++ b/arch/sh/kernel/head.S
@@ -0,0 +1,76 @@
1/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
2 *
3 * arch/sh/kernel/head.S
4 *
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Head.S contains the SH exception handlers and startup code.
12 */
13#include <linux/linkage.h>
14
15 .section .empty_zero_page, "aw"
16ENTRY(empty_zero_page)
17 .long 1 /* MOUNT_ROOT_RDONLY */
18 .long 0 /* RAMDISK_FLAGS */
19 .long 0x0200 /* ORIG_ROOT_DEV */
20 .long 1 /* LOADER_TYPE */
21 .long 0x00360000 /* INITRD_START */
22 .long 0x000a0000 /* INITRD_SIZE */
23 .long 0
24 .balign 4096,0,4096
25
26 .text
27/*
28 * Condition at the entry of _stext:
29 *
30 * BSC has already been initialized.
31 * INTC may or may not be initialized.
32 * VBR may or may not be initialized.
33 * MMU may or may not be initialized.
34 * Cache may or may not be initialized.
35 * Hardware (including on-chip modules) may or may not be initialized.
36 *
37 */
38ENTRY(_stext)
39 ! Initialize Status Register
40 mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
41 ldc r0, sr
42 ! Initialize global interrupt mask
43 mov #0, r0
44 ldc r0, r6_bank
45 !
46 mov.l 2f, r0
47 mov r0, r15 ! Set initial r15 (stack pointer)
48 mov #0x20, r1 !
49 shll8 r1 ! r1 = 8192
50 sub r1, r0 !
51 ldc r0, r7_bank ! ... and initial thread_info
52 !
53 ! Additional CPU initialization
54 mov.l 6f, r0
55 jsr @r0
56 nop
57 ! Clear BSS area
58 mov.l 3f, r1
59 add #4, r1
60 mov.l 4f, r2
61 mov #0, r0
629: cmp/hs r2, r1
63 bf/s 9b ! while (r1 < r2)
64 mov.l r0,@-r2
65 ! Start kernel
66 mov.l 5f, r0
67 jmp @r0
68 nop
69
70 .balign 4
711: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
722: .long stack
733: .long __bss_start
744: .long _end
755: .long start_kernel
766: .long sh_cpu_init
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
new file mode 100644
index 000000000000..44053ea92936
--- /dev/null
+++ b/arch/sh/kernel/init_task.c
@@ -0,0 +1,36 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init_task.h>
5#include <linux/mqueue.h>
6
7#include <asm/uaccess.h>
8#include <asm/pgtable.h>
9
10static struct fs_struct init_fs = INIT_FS;
11static struct files_struct init_files = INIT_FILES;
12static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
14struct mm_struct init_mm = INIT_MM(init_mm);
15
16EXPORT_SYMBOL(init_mm);
17
18/*
19 * Initial thread structure.
20 *
21 * We need to make sure that this is 8192-byte aligned due to the
22 * way process stacks are handled. This is done by having a special
23 * "init_task" linker map entry..
24 */
25union thread_union init_thread_union
26 __attribute__((__section__(".data.init_task"))) =
27 { INIT_THREAD_INFO(init_task) };
28
29/*
30 * Initial task structure.
31 *
32 * All other task structs will be allocated on slabs in fork.c
33 */
34struct task_struct init_task = INIT_TASK(init_task);
35
36EXPORT_SYMBOL(init_task);
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
new file mode 100644
index 000000000000..d9932f25993b
--- /dev/null
+++ b/arch/sh/kernel/io.c
@@ -0,0 +1,59 @@
1/*
2 * linux/arch/sh/kernel/io.c
3 *
4 * Copyright (C) 2000 Stuart Menefy
5 *
6 * Provide real functions which expand to whatever the header file defined.
7 * Also definitions of machine independent IO functions.
8 */
9
10#include <asm/io.h>
11#include <linux/module.h>
12
13/*
14 * Copy data from IO memory space to "real" memory space.
15 * This needs to be optimized.
16 */
17void memcpy_fromio(void * to, unsigned long from, unsigned long count)
18{
19 char *p = to;
20 while (count) {
21 count--;
22 *p = readb(from);
23 p++;
24 from++;
25 }
26}
27
28/*
29 * Copy data from "real" memory space to IO memory space.
30 * This needs to be optimized.
31 */
32void memcpy_toio(unsigned long to, const void * from, unsigned long count)
33{
34 const char *p = from;
35 while (count) {
36 count--;
37 writeb(*p, to);
38 p++;
39 to++;
40 }
41}
42
43/*
44 * "memset" on IO memory space.
45 * This needs to be optimized.
46 */
47void memset_io(unsigned long dst, int c, unsigned long count)
48{
49 while (count) {
50 count--;
51 writeb(c, dst);
52 dst++;
53 }
54}
55
56EXPORT_SYMBOL(memcpy_fromio);
57EXPORT_SYMBOL(memcpy_toio);
58EXPORT_SYMBOL(memset_io);
59
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
new file mode 100644
index 000000000000..a911b0149d1f
--- /dev/null
+++ b/arch/sh/kernel/io_generic.c
@@ -0,0 +1,243 @@
1/* $Id: io_generic.c,v 1.2 2003/05/04 19:29:53 lethal Exp $
2 *
3 * linux/arch/sh/kernel/io_generic.c
4 *
5 * Copyright (C) 2000 Niibe Yutaka
6 *
7 * Generic I/O routine. These can be used where a machine specific version
8 * is not required.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 *
14 */
15
16#include <asm/io.h>
17#include <asm/machvec.h>
18#include <linux/module.h>
19
20#if defined(CONFIG_CPU_SH3)
21/* I'm not sure SH7709 has this kind of bug */
22#define SH3_PCMCIA_BUG_WORKAROUND 1
23#define DUMMY_READ_AREA6 0xba000000
24#endif
25
26#define PORT2ADDR(x) (sh_mv.mv_isa_port2addr(x))
27
28unsigned long generic_io_base;
29
30static inline void delay(void)
31{
32 ctrl_inw(0xa0000000);
33}
34
35unsigned char generic_inb(unsigned long port)
36{
37 return *(volatile unsigned char*)PORT2ADDR(port);
38}
39
40unsigned short generic_inw(unsigned long port)
41{
42 return *(volatile unsigned short*)PORT2ADDR(port);
43}
44
45unsigned int generic_inl(unsigned long port)
46{
47 return *(volatile unsigned long*)PORT2ADDR(port);
48}
49
50unsigned char generic_inb_p(unsigned long port)
51{
52 unsigned long v = *(volatile unsigned char*)PORT2ADDR(port);
53
54 delay();
55 return v;
56}
57
58unsigned short generic_inw_p(unsigned long port)
59{
60 unsigned long v = *(volatile unsigned short*)PORT2ADDR(port);
61
62 delay();
63 return v;
64}
65
66unsigned int generic_inl_p(unsigned long port)
67{
68 unsigned long v = *(volatile unsigned long*)PORT2ADDR(port);
69
70 delay();
71 return v;
72}
73
74/*
75 * insb/w/l all read a series of bytes/words/longs from a fixed port
76 * address. However as the port address doesn't change we only need to
77 * convert the port address to real address once.
78 */
79
80void generic_insb(unsigned long port, void *buffer, unsigned long count)
81{
82 volatile unsigned char *port_addr;
83 unsigned char *buf=buffer;
84
85 port_addr = (volatile unsigned char *)PORT2ADDR(port);
86
87 while(count--)
88 *buf++ = *port_addr;
89}
90
91void generic_insw(unsigned long port, void *buffer, unsigned long count)
92{
93 volatile unsigned short *port_addr;
94 unsigned short *buf=buffer;
95
96 port_addr = (volatile unsigned short *)PORT2ADDR(port);
97
98 while(count--)
99 *buf++ = *port_addr;
100#ifdef SH3_PCMCIA_BUG_WORKAROUND
101 ctrl_inb (DUMMY_READ_AREA6);
102#endif
103}
104
105void generic_insl(unsigned long port, void *buffer, unsigned long count)
106{
107 volatile unsigned long *port_addr;
108 unsigned long *buf=buffer;
109
110 port_addr = (volatile unsigned long *)PORT2ADDR(port);
111
112 while(count--)
113 *buf++ = *port_addr;
114#ifdef SH3_PCMCIA_BUG_WORKAROUND
115 ctrl_inb (DUMMY_READ_AREA6);
116#endif
117}
118
119void generic_outb(unsigned char b, unsigned long port)
120{
121 *(volatile unsigned char*)PORT2ADDR(port) = b;
122}
123
124void generic_outw(unsigned short b, unsigned long port)
125{
126 *(volatile unsigned short*)PORT2ADDR(port) = b;
127}
128
129void generic_outl(unsigned int b, unsigned long port)
130{
131 *(volatile unsigned long*)PORT2ADDR(port) = b;
132}
133
134void generic_outb_p(unsigned char b, unsigned long port)
135{
136 *(volatile unsigned char*)PORT2ADDR(port) = b;
137 delay();
138}
139
140void generic_outw_p(unsigned short b, unsigned long port)
141{
142 *(volatile unsigned short*)PORT2ADDR(port) = b;
143 delay();
144}
145
146void generic_outl_p(unsigned int b, unsigned long port)
147{
148 *(volatile unsigned long*)PORT2ADDR(port) = b;
149 delay();
150}
151
152/*
153 * outsb/w/l all write a series of bytes/words/longs to a fixed port
154 * address. However as the port address doesn't change we only need to
155 * convert the port address to real address once.
156 */
157
158void generic_outsb(unsigned long port, const void *buffer, unsigned long count)
159{
160 volatile unsigned char *port_addr;
161 const unsigned char *buf=buffer;
162
163 port_addr = (volatile unsigned char *)PORT2ADDR(port);
164
165 while(count--)
166 *port_addr = *buf++;
167}
168
169void generic_outsw(unsigned long port, const void *buffer, unsigned long count)
170{
171 volatile unsigned short *port_addr;
172 const unsigned short *buf=buffer;
173
174 port_addr = (volatile unsigned short *)PORT2ADDR(port);
175
176 while(count--)
177 *port_addr = *buf++;
178
179#ifdef SH3_PCMCIA_BUG_WORKAROUND
180 ctrl_inb (DUMMY_READ_AREA6);
181#endif
182}
183
184void generic_outsl(unsigned long port, const void *buffer, unsigned long count)
185{
186 volatile unsigned long *port_addr;
187 const unsigned long *buf=buffer;
188
189 port_addr = (volatile unsigned long *)PORT2ADDR(port);
190
191 while(count--)
192 *port_addr = *buf++;
193
194#ifdef SH3_PCMCIA_BUG_WORKAROUND
195 ctrl_inb (DUMMY_READ_AREA6);
196#endif
197}
198
199unsigned char generic_readb(unsigned long addr)
200{
201 return *(volatile unsigned char*)addr;
202}
203
204unsigned short generic_readw(unsigned long addr)
205{
206 return *(volatile unsigned short*)addr;
207}
208
209unsigned int generic_readl(unsigned long addr)
210{
211 return *(volatile unsigned long*)addr;
212}
213
214void generic_writeb(unsigned char b, unsigned long addr)
215{
216 *(volatile unsigned char*)addr = b;
217}
218
219void generic_writew(unsigned short b, unsigned long addr)
220{
221 *(volatile unsigned short*)addr = b;
222}
223
224void generic_writel(unsigned int b, unsigned long addr)
225{
226 *(volatile unsigned long*)addr = b;
227}
228
229void * generic_ioremap(unsigned long offset, unsigned long size)
230{
231 return (void *) P2SEGADDR(offset);
232}
233EXPORT_SYMBOL(generic_ioremap);
234
235void generic_iounmap(void *addr)
236{
237}
238EXPORT_SYMBOL(generic_iounmap);
239
240unsigned long generic_isa_port2addr(unsigned long offset)
241{
242 return offset + generic_io_base;
243}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
new file mode 100644
index 000000000000..54c171225b78
--- /dev/null
+++ b/arch/sh/kernel/irq.c
@@ -0,0 +1,106 @@
1/* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $
2 *
3 * linux/arch/sh/kernel/irq.c
4 *
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 *
7 *
8 * SuperH version: Copyright (C) 1999 Niibe Yutaka
9 */
10
11/*
12 * IRQs are in fact implemented a bit like signal handlers for the kernel.
13 * Naturally it's not a 1:1 relation, but there are similarities.
14 */
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/ptrace.h>
19#include <linux/errno.h>
20#include <linux/kernel_stat.h>
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/ioport.h>
24#include <linux/interrupt.h>
25#include <linux/timex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/random.h>
29#include <linux/smp.h>
30#include <linux/smp_lock.h>
31#include <linux/init.h>
32#include <linux/seq_file.h>
33#include <linux/kallsyms.h>
34#include <linux/bitops.h>
35
36#include <asm/system.h>
37#include <asm/io.h>
38#include <asm/pgalloc.h>
39#include <asm/delay.h>
40#include <asm/irq.h>
41#include <linux/irq.h>
42
43
44/*
45 * 'what should we do if we get a hw irq event on an illegal vector'.
46 * each architecture has to answer this themselves, it doesn't deserve
47 * a generic callback i think.
48 */
49void ack_bad_irq(unsigned int irq)
50{
51 printk("unexpected IRQ trap at vector %02x\n", irq);
52}
53
54#if defined(CONFIG_PROC_FS)
55int show_interrupts(struct seq_file *p, void *v)
56{
57 int i = *(loff_t *) v, j;
58 struct irqaction * action;
59 unsigned long flags;
60
61 if (i == 0) {
62 seq_puts(p, " ");
63 for (j=0; j<NR_CPUS; j++)
64 if (cpu_online(j))
65 seq_printf(p, "CPU%d ",j);
66 seq_putc(p, '\n');
67 }
68
69 if (i < ACTUAL_NR_IRQS) {
70 spin_lock_irqsave(&irq_desc[i].lock, flags);
71 action = irq_desc[i].action;
72 if (!action)
73 goto unlock;
74 seq_printf(p, "%3d: ",i);
75 seq_printf(p, "%10u ", kstat_irqs(i));
76 seq_printf(p, " %14s", irq_desc[i].handler->typename);
77 seq_printf(p, " %s", action->name);
78
79 for (action=action->next; action; action = action->next)
80 seq_printf(p, ", %s", action->name);
81 seq_putc(p, '\n');
82unlock:
83 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
84 }
85 return 0;
86}
87#endif
88
89asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
90 unsigned long r6, unsigned long r7,
91 struct pt_regs regs)
92{
93 int irq;
94
95 irq_enter();
96 asm volatile("stc r2_bank, %0\n\t"
97 "shlr2 %0\n\t"
98 "shlr2 %0\n\t"
99 "shlr %0\n\t"
100 "add #-16, %0\n\t"
101 :"=z" (irq));
102 irq = irq_demux(irq);
103 __do_IRQ(irq, &regs);
104 irq_exit();
105 return 1;
106}
diff --git a/arch/sh/kernel/kgdb_jmp.S b/arch/sh/kernel/kgdb_jmp.S
new file mode 100644
index 000000000000..339bb1d7ff0b
--- /dev/null
+++ b/arch/sh/kernel/kgdb_jmp.S
@@ -0,0 +1,33 @@
1#include <linux/linkage.h>
2
3ENTRY(setjmp)
4 add #(9*4), r4
5 sts.l pr, @-r4
6 mov.l r15, @-r4
7 mov.l r14, @-r4
8 mov.l r13, @-r4
9 mov.l r12, @-r4
10 mov.l r11, @-r4
11 mov.l r10, @-r4
12 mov.l r9, @-r4
13 mov.l r8, @-r4
14 rts
15 mov #0, r0
16
17ENTRY(longjmp)
18 mov.l @r4+, r8
19 mov.l @r4+, r9
20 mov.l @r4+, r10
21 mov.l @r4+, r11
22 mov.l @r4+, r12
23 mov.l @r4+, r13
24 mov.l @r4+, r14
25 mov.l @r4+, r15
26 lds.l @r4+, pr
27 mov r5, r0
28 tst r0, r0
29 bf 1f
30 mov #1, r0 ! in case val==0
311: rts
32 nop
33
diff --git a/arch/sh/kernel/kgdb_stub.c b/arch/sh/kernel/kgdb_stub.c
new file mode 100644
index 000000000000..42638b92b51c
--- /dev/null
+++ b/arch/sh/kernel/kgdb_stub.c
@@ -0,0 +1,1491 @@
1/*
2 * May be copied or modified under the terms of the GNU General Public
3 * License. See linux/COPYING for more information.
4 *
5 * Containes extracts from code by Glenn Engel, Jim Kingdon,
6 * David Grothe <dave@gcom.com>, Tigran Aivazian <tigran@sco.com>,
7 * Amit S. Kale <akale@veritas.com>, William Gatliff <bgat@open-widgets.com>,
8 * Ben Lee, Steve Chamberlain and Benoit Miller <fulg@iname.com>.
9 *
10 * This version by Henry Bell <henry.bell@st.com>
11 * Minor modifications by Jeremy Siegel <jsiegel@mvista.com>
12 *
13 * Contains low-level support for remote debug using GDB.
14 *
15 * To enable debugger support, two things need to happen. A call to
16 * set_debug_traps() is necessary in order to allow any breakpoints
17 * or error conditions to be properly intercepted and reported to gdb.
18 * A breakpoint also needs to be generated to begin communication. This
19 * is most easily accomplished by a call to breakpoint() which does
20 * a trapa if the initialisation phase has been successfully completed.
21 *
22 * In this case, set_debug_traps() is not used to "take over" exceptions;
23 * other kernel code is modified instead to enter the kgdb functions here
24 * when appropriate (see entry.S for breakpoint traps and NMI interrupts,
25 * see traps.c for kernel error exceptions).
26 *
27 * The following gdb commands are supported:
28 *
29 * Command Function Return value
30 *
31 * g return the value of the CPU registers hex data or ENN
32 * G set the value of the CPU registers OK or ENN
33 *
34 * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
35 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
36 * XAA..AA,LLLL: Same, but data is binary (not hex) OK or ENN
37 *
38 * c Resume at current address SNN ( signal NN)
39 * cAA..AA Continue at address AA..AA SNN
40 * CNN; Resume at current address with signal SNN
41 * CNN;AA..AA Resume at address AA..AA with signal SNN
42 *
43 * s Step one instruction SNN
44 * sAA..AA Step one instruction from AA..AA SNN
45 * SNN; Step one instruction with signal SNN
46 * SNNAA..AA Step one instruction from AA..AA w/NN SNN
47 *
48 * k kill (Detach GDB)
49 *
50 * d Toggle debug flag
51 * D Detach GDB
52 *
53 * Hct Set thread t for operations, OK or ENN
54 * c = 'c' (step, cont), c = 'g' (other
55 * operations)
56 *
57 * qC Query current thread ID QCpid
58 * qfThreadInfo Get list of current threads (first) m<id>
59 * qsThreadInfo " " " " " (subsequent)
60 * qOffsets Get section offsets Text=x;Data=y;Bss=z
61 *
62 * TXX Find if thread XX is alive OK or ENN
63 * ? What was the last sigval ? SNN (signal NN)
64 * O Output to GDB console
65 *
66 * Remote communication protocol.
67 *
68 * A debug packet whose contents are <data> is encapsulated for
69 * transmission in the form:
70 *
71 * $ <data> # CSUM1 CSUM2
72 *
73 * <data> must be ASCII alphanumeric and cannot include characters
74 * '$' or '#'. If <data> starts with two characters followed by
75 * ':', then the existing stubs interpret this as a sequence number.
76 *
77 * CSUM1 and CSUM2 are ascii hex representation of an 8-bit
78 * checksum of <data>, the most significant nibble is sent first.
79 * the hex digits 0-9,a-f are used.
80 *
81 * Receiver responds with:
82 *
83 * + - if CSUM is correct and ready for next packet
84 * - - if CSUM is incorrect
85 *
86 * Responses can be run-length encoded to save space. A '*' means that
87 * the next character is an ASCII encoding giving a repeat count which
88 * stands for that many repititions of the character preceding the '*'.
89 * The encoding is n+29, yielding a printable character where n >=3
90 * (which is where RLE starts to win). Don't use an n > 126.
91 *
92 * So "0* " means the same as "0000".
93 */
94
95#include <linux/string.h>
96#include <linux/kernel.h>
97#include <linux/sched.h>
98#include <linux/smp.h>
99#include <linux/spinlock.h>
100#include <linux/delay.h>
101#include <linux/linkage.h>
102#include <linux/init.h>
103
104#include <asm/system.h>
105#include <asm/current.h>
106#include <asm/signal.h>
107#include <asm/pgtable.h>
108#include <asm/ptrace.h>
109#include <asm/kgdb.h>
110
111#ifdef CONFIG_SH_KGDB_CONSOLE
112#include <linux/console.h>
113#endif
114
115/* Function pointers for linkage */
116kgdb_debug_hook_t *kgdb_debug_hook;
117kgdb_bus_error_hook_t *kgdb_bus_err_hook;
118
119int (*kgdb_getchar)(void);
120void (*kgdb_putchar)(int);
121
122static void put_debug_char(int c)
123{
124 if (!kgdb_putchar)
125 return;
126 (*kgdb_putchar)(c);
127}
128static int get_debug_char(void)
129{
130 if (!kgdb_getchar)
131 return -1;
132 return (*kgdb_getchar)();
133}
134
135/* Num chars in in/out bound buffers, register packets need NUMREGBYTES * 2 */
136#define BUFMAX 1024
137#define NUMREGBYTES (MAXREG*4)
138#define OUTBUFMAX (NUMREGBYTES*2+512)
139
140enum regs {
141 R0 = 0, R1, R2, R3, R4, R5, R6, R7,
142 R8, R9, R10, R11, R12, R13, R14, R15,
143 PC, PR, GBR, VBR, MACH, MACL, SR,
144 /* */
145 MAXREG
146};
147
148static unsigned int registers[MAXREG];
149struct kgdb_regs trap_registers;
150
151char kgdb_in_gdb_mode;
152char in_nmi; /* Set during NMI to prevent reentry */
153int kgdb_nofault; /* Boolean to ignore bus errs (i.e. in GDB) */
154int kgdb_enabled = 1; /* Default to enabled, cmdline can disable */
155int kgdb_halt;
156
157/* Exposed for user access */
158struct task_struct *kgdb_current;
159unsigned int kgdb_g_imask;
160int kgdb_trapa_val;
161int kgdb_excode;
162
163/* Default values for SCI (can override via kernel args in setup.c) */
164#ifndef CONFIG_KGDB_DEFPORT
165#define CONFIG_KGDB_DEFPORT 1
166#endif
167
168#ifndef CONFIG_KGDB_DEFBAUD
169#define CONFIG_KGDB_DEFBAUD 115200
170#endif
171
172#if defined(CONFIG_KGDB_DEFPARITY_E)
173#define CONFIG_KGDB_DEFPARITY 'E'
174#elif defined(CONFIG_KGDB_DEFPARITY_O)
175#define CONFIG_KGDB_DEFPARITY 'O'
176#else /* CONFIG_KGDB_DEFPARITY_N */
177#define CONFIG_KGDB_DEFPARITY 'N'
178#endif
179
180#ifdef CONFIG_KGDB_DEFBITS_7
181#define CONFIG_KGDB_DEFBITS '7'
182#else /* CONFIG_KGDB_DEFBITS_8 */
183#define CONFIG_KGDB_DEFBITS '8'
184#endif
185
186/* SCI/UART settings, used in kgdb_console_setup() */
187int kgdb_portnum = CONFIG_KGDB_DEFPORT;
188int kgdb_baud = CONFIG_KGDB_DEFBAUD;
189char kgdb_parity = CONFIG_KGDB_DEFPARITY;
190char kgdb_bits = CONFIG_KGDB_DEFBITS;
191
192/* Jump buffer for setjmp/longjmp */
193static jmp_buf rem_com_env;
194
195/* TRA differs sh3/4 */
196#if defined(CONFIG_CPU_SH3)
197#define TRA 0xffffffd0
198#elif defined(CONFIG_CPU_SH4)
199#define TRA 0xff000020
200#endif
201
202/* Macros for single step instruction identification */
203#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
204#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
205#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
206 (((op) & 0x7f ) << 1))
207#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
208#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
209#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
210#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
211 (((op) & 0x7ff) << 1))
212#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
213#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
214#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
215#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
216 (((op) & 0x7ff) << 1))
217#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
218#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
219#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
220#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
221#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
222#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
223#define OPCODE_RTS(op) ((op) == 0xb)
224#define OPCODE_RTE(op) ((op) == 0x2b)
225
226#define SR_T_BIT_MASK 0x1
227#define STEP_OPCODE 0xc320
228#define BIOS_CALL_TRAP 0x3f
229
230/* Exception codes as per SH-4 core manual */
231#define ADDRESS_ERROR_LOAD_VEC 7
232#define ADDRESS_ERROR_STORE_VEC 8
233#define TRAP_VEC 11
234#define INVALID_INSN_VEC 12
235#define INVALID_SLOT_VEC 13
236#define NMI_VEC 14
237#define USER_BREAK_VEC 15
238#define SERIAL_BREAK_VEC 58
239
240/* Misc static */
241static int stepped_address;
242static short stepped_opcode;
243static const char hexchars[] = "0123456789abcdef";
244static char in_buffer[BUFMAX];
245static char out_buffer[OUTBUFMAX];
246
247static void kgdb_to_gdb(const char *s);
248
249#ifdef CONFIG_KGDB_THREAD
250static struct task_struct *trapped_thread;
251static struct task_struct *current_thread;
252typedef unsigned char threadref[8];
253#define BUF_THREAD_ID_SIZE 16
254#endif
255
256/* Return addr as a real volatile address */
257static inline unsigned int ctrl_inl(const unsigned long addr)
258{
259 return *(volatile unsigned long *) addr;
260}
261
262/* Correctly set *addr using volatile */
263static inline void ctrl_outl(const unsigned int b, unsigned long addr)
264{
265 *(volatile unsigned long *) addr = b;
266}
267
268/* Get high hex bits */
269static char highhex(const int x)
270{
271 return hexchars[(x >> 4) & 0xf];
272}
273
274/* Get low hex bits */
275static char lowhex(const int x)
276{
277 return hexchars[x & 0xf];
278}
279
280/* Convert ch to hex */
281static int hex(const char ch)
282{
283 if ((ch >= 'a') && (ch <= 'f'))
284 return (ch - 'a' + 10);
285 if ((ch >= '0') && (ch <= '9'))
286 return (ch - '0');
287 if ((ch >= 'A') && (ch <= 'F'))
288 return (ch - 'A' + 10);
289 return (-1);
290}
291
292/* Convert the memory pointed to by mem into hex, placing result in buf.
293 Returns a pointer to the last char put in buf (null) */
294static char *mem_to_hex(const char *mem, char *buf, const int count)
295{
296 int i;
297 int ch;
298 unsigned short s_val;
299 unsigned long l_val;
300
301 /* Check for 16 or 32 */
302 if (count == 2 && ((long) mem & 1) == 0) {
303 s_val = *(unsigned short *) mem;
304 mem = (char *) &s_val;
305 } else if (count == 4 && ((long) mem & 3) == 0) {
306 l_val = *(unsigned long *) mem;
307 mem = (char *) &l_val;
308 }
309 for (i = 0; i < count; i++) {
310 ch = *mem++;
311 *buf++ = highhex(ch);
312 *buf++ = lowhex(ch);
313 }
314 *buf = 0;
315 return (buf);
316}
317
318/* Convert the hex array pointed to by buf into binary, to be placed in mem.
319 Return a pointer to the character after the last byte written */
320static char *hex_to_mem(const char *buf, char *mem, const int count)
321{
322 int i;
323 unsigned char ch;
324
325 for (i = 0; i < count; i++) {
326 ch = hex(*buf++) << 4;
327 ch = ch + hex(*buf++);
328 *mem++ = ch;
329 }
330 return (mem);
331}
332
333/* While finding valid hex chars, convert to an integer, then return it */
334static int hex_to_int(char **ptr, int *int_value)
335{
336 int num_chars = 0;
337 int hex_value;
338
339 *int_value = 0;
340
341 while (**ptr) {
342 hex_value = hex(**ptr);
343 if (hex_value >= 0) {
344 *int_value = (*int_value << 4) | hex_value;
345 num_chars++;
346 } else
347 break;
348 (*ptr)++;
349 }
350 return num_chars;
351}
352
353/* Copy the binary array pointed to by buf into mem. Fix $, #,
354 and 0x7d escaped with 0x7d. Return a pointer to the character
355 after the last byte written. */
356static char *ebin_to_mem(const char *buf, char *mem, int count)
357{
358 for (; count > 0; count--, buf++) {
359 if (*buf == 0x7d)
360 *mem++ = *(++buf) ^ 0x20;
361 else
362 *mem++ = *buf;
363 }
364 return mem;
365}
366
367/* Pack a hex byte */
368static char *pack_hex_byte(char *pkt, int byte)
369{
370 *pkt++ = hexchars[(byte >> 4) & 0xf];
371 *pkt++ = hexchars[(byte & 0xf)];
372 return pkt;
373}
374
375#ifdef CONFIG_KGDB_THREAD
376
377/* Pack a thread ID */
378static char *pack_threadid(char *pkt, threadref * id)
379{
380 char *limit;
381 unsigned char *altid;
382
383 altid = (unsigned char *) id;
384
385 limit = pkt + BUF_THREAD_ID_SIZE;
386 while (pkt < limit)
387 pkt = pack_hex_byte(pkt, *altid++);
388 return pkt;
389}
390
391/* Convert an integer into our threadref */
392static void int_to_threadref(threadref * id, const int value)
393{
394 unsigned char *scan = (unsigned char *) id;
395 int i = 4;
396
397 while (i--)
398 *scan++ = 0;
399
400 *scan++ = (value >> 24) & 0xff;
401 *scan++ = (value >> 16) & 0xff;
402 *scan++ = (value >> 8) & 0xff;
403 *scan++ = (value & 0xff);
404}
405
406/* Return a task structure ptr for a particular pid */
407static struct task_struct *get_thread(int pid)
408{
409 struct task_struct *thread;
410
411 /* Use PID_MAX w/gdb for pid 0 */
412 if (pid == PID_MAX) pid = 0;
413
414 /* First check via PID */
415 thread = find_task_by_pid(pid);
416
417 if (thread)
418 return thread;
419
420 /* Start at the start */
421 thread = init_tasks[0];
422
423 /* Walk along the linked list of tasks */
424 do {
425 if (thread->pid == pid)
426 return thread;
427 thread = thread->next_task;
428 } while (thread != init_tasks[0]);
429
430 return NULL;
431}
432
433#endif /* CONFIG_KGDB_THREAD */
434
435/* Scan for the start char '$', read the packet and check the checksum */
436static void get_packet(char *buffer, int buflen)
437{
438 unsigned char checksum;
439 unsigned char xmitcsum;
440 int i;
441 int count;
442 char ch;
443
444 do {
445 /* Ignore everything until the start character */
446 while ((ch = get_debug_char()) != '$');
447
448 checksum = 0;
449 xmitcsum = -1;
450 count = 0;
451
452 /* Now, read until a # or end of buffer is found */
453 while (count < (buflen - 1)) {
454 ch = get_debug_char();
455
456 if (ch == '#')
457 break;
458
459 checksum = checksum + ch;
460 buffer[count] = ch;
461 count = count + 1;
462 }
463
464 buffer[count] = 0;
465
466 /* Continue to read checksum following # */
467 if (ch == '#') {
468 xmitcsum = hex(get_debug_char()) << 4;
469 xmitcsum += hex(get_debug_char());
470
471 /* Checksum */
472 if (checksum != xmitcsum)
473 put_debug_char('-'); /* Failed checksum */
474 else {
475 /* Ack successful transfer */
476 put_debug_char('+');
477
478 /* If a sequence char is present, reply
479 the sequence ID */
480 if (buffer[2] == ':') {
481 put_debug_char(buffer[0]);
482 put_debug_char(buffer[1]);
483
484 /* Remove sequence chars from buffer */
485 count = strlen(buffer);
486 for (i = 3; i <= count; i++)
487 buffer[i - 3] = buffer[i];
488 }
489 }
490 }
491 }
492 while (checksum != xmitcsum); /* Keep trying while we fail */
493}
494
495/* Send the packet in the buffer with run-length encoding */
496static void put_packet(char *buffer)
497{
498 int checksum;
499 char *src;
500 int runlen;
501 int encode;
502
503 do {
504 src = buffer;
505 put_debug_char('$');
506 checksum = 0;
507
508 /* Continue while we still have chars left */
509 while (*src) {
510 /* Check for runs up to 99 chars long */
511 for (runlen = 1; runlen < 99; runlen++) {
512 if (src[0] != src[runlen])
513 break;
514 }
515
516 if (runlen > 3) {
517 /* Got a useful amount, send encoding */
518 encode = runlen + ' ' - 4;
519 put_debug_char(*src); checksum += *src;
520 put_debug_char('*'); checksum += '*';
521 put_debug_char(encode); checksum += encode;
522 src += runlen;
523 } else {
524 /* Otherwise just send the current char */
525 put_debug_char(*src); checksum += *src;
526 src += 1;
527 }
528 }
529
530 /* '#' Separator, put high and low components of checksum */
531 put_debug_char('#');
532 put_debug_char(highhex(checksum));
533 put_debug_char(lowhex(checksum));
534 }
535 while ((get_debug_char()) != '+'); /* While no ack */
536}
537
538/* A bus error has occurred - perform a longjmp to return execution and
539 allow handling of the error */
540static void kgdb_handle_bus_error(void)
541{
542 longjmp(rem_com_env, 1);
543}
544
545/* Translate SH-3/4 exception numbers to unix-like signal values */
546static int compute_signal(const int excep_code)
547{
548 int sigval;
549
550 switch (excep_code) {
551
552 case INVALID_INSN_VEC:
553 case INVALID_SLOT_VEC:
554 sigval = SIGILL;
555 break;
556 case ADDRESS_ERROR_LOAD_VEC:
557 case ADDRESS_ERROR_STORE_VEC:
558 sigval = SIGSEGV;
559 break;
560
561 case SERIAL_BREAK_VEC:
562 case NMI_VEC:
563 sigval = SIGINT;
564 break;
565
566 case USER_BREAK_VEC:
567 case TRAP_VEC:
568 sigval = SIGTRAP;
569 break;
570
571 default:
572 sigval = SIGBUS; /* "software generated" */
573 break;
574 }
575
576 return (sigval);
577}
578
579/* Make a local copy of the registers passed into the handler (bletch) */
580static void kgdb_regs_to_gdb_regs(const struct kgdb_regs *regs,
581 int *gdb_regs)
582{
583 gdb_regs[R0] = regs->regs[R0];
584 gdb_regs[R1] = regs->regs[R1];
585 gdb_regs[R2] = regs->regs[R2];
586 gdb_regs[R3] = regs->regs[R3];
587 gdb_regs[R4] = regs->regs[R4];
588 gdb_regs[R5] = regs->regs[R5];
589 gdb_regs[R6] = regs->regs[R6];
590 gdb_regs[R7] = regs->regs[R7];
591 gdb_regs[R8] = regs->regs[R8];
592 gdb_regs[R9] = regs->regs[R9];
593 gdb_regs[R10] = regs->regs[R10];
594 gdb_regs[R11] = regs->regs[R11];
595 gdb_regs[R12] = regs->regs[R12];
596 gdb_regs[R13] = regs->regs[R13];
597 gdb_regs[R14] = regs->regs[R14];
598 gdb_regs[R15] = regs->regs[R15];
599 gdb_regs[PC] = regs->pc;
600 gdb_regs[PR] = regs->pr;
601 gdb_regs[GBR] = regs->gbr;
602 gdb_regs[MACH] = regs->mach;
603 gdb_regs[MACL] = regs->macl;
604 gdb_regs[SR] = regs->sr;
605 gdb_regs[VBR] = regs->vbr;
606}
607
608/* Copy local gdb registers back to kgdb regs, for later copy to kernel */
609static void gdb_regs_to_kgdb_regs(const int *gdb_regs,
610 struct kgdb_regs *regs)
611{
612 regs->regs[R0] = gdb_regs[R0];
613 regs->regs[R1] = gdb_regs[R1];
614 regs->regs[R2] = gdb_regs[R2];
615 regs->regs[R3] = gdb_regs[R3];
616 regs->regs[R4] = gdb_regs[R4];
617 regs->regs[R5] = gdb_regs[R5];
618 regs->regs[R6] = gdb_regs[R6];
619 regs->regs[R7] = gdb_regs[R7];
620 regs->regs[R8] = gdb_regs[R8];
621 regs->regs[R9] = gdb_regs[R9];
622 regs->regs[R10] = gdb_regs[R10];
623 regs->regs[R11] = gdb_regs[R11];
624 regs->regs[R12] = gdb_regs[R12];
625 regs->regs[R13] = gdb_regs[R13];
626 regs->regs[R14] = gdb_regs[R14];
627 regs->regs[R15] = gdb_regs[R15];
628 regs->pc = gdb_regs[PC];
629 regs->pr = gdb_regs[PR];
630 regs->gbr = gdb_regs[GBR];
631 regs->mach = gdb_regs[MACH];
632 regs->macl = gdb_regs[MACL];
633 regs->sr = gdb_regs[SR];
634 regs->vbr = gdb_regs[VBR];
635}
636
637#ifdef CONFIG_KGDB_THREAD
638/* Make a local copy of registers from the specified thread */
639asmlinkage void ret_from_fork(void);
640static void thread_regs_to_gdb_regs(const struct task_struct *thread,
641 int *gdb_regs)
642{
643 int regno;
644 int *tregs;
645
646 /* Initialize to zero */
647 for (regno = 0; regno < MAXREG; regno++)
648 gdb_regs[regno] = 0;
649
650 /* Just making sure... */
651 if (thread == NULL)
652 return;
653
654 /* A new fork has pt_regs on the stack from a fork() call */
655 if (thread->thread.pc == (unsigned long)ret_from_fork) {
656
657 int vbr_val;
658 struct pt_regs *kregs;
659 kregs = (struct pt_regs*)thread->thread.sp;
660
661 gdb_regs[R0] = kregs->regs[R0];
662 gdb_regs[R1] = kregs->regs[R1];
663 gdb_regs[R2] = kregs->regs[R2];
664 gdb_regs[R3] = kregs->regs[R3];
665 gdb_regs[R4] = kregs->regs[R4];
666 gdb_regs[R5] = kregs->regs[R5];
667 gdb_regs[R6] = kregs->regs[R6];
668 gdb_regs[R7] = kregs->regs[R7];
669 gdb_regs[R8] = kregs->regs[R8];
670 gdb_regs[R9] = kregs->regs[R9];
671 gdb_regs[R10] = kregs->regs[R10];
672 gdb_regs[R11] = kregs->regs[R11];
673 gdb_regs[R12] = kregs->regs[R12];
674 gdb_regs[R13] = kregs->regs[R13];
675 gdb_regs[R14] = kregs->regs[R14];
676 gdb_regs[R15] = kregs->regs[R15];
677 gdb_regs[PC] = kregs->pc;
678 gdb_regs[PR] = kregs->pr;
679 gdb_regs[GBR] = kregs->gbr;
680 gdb_regs[MACH] = kregs->mach;
681 gdb_regs[MACL] = kregs->macl;
682 gdb_regs[SR] = kregs->sr;
683
684 asm("stc vbr, %0":"=r"(vbr_val));
685 gdb_regs[VBR] = vbr_val;
686 return;
687 }
688
689 /* Otherwise, we have only some registers from switch_to() */
690 tregs = (int *)thread->thread.sp;
691 gdb_regs[R15] = (int)tregs;
692 gdb_regs[R14] = *tregs++;
693 gdb_regs[R13] = *tregs++;
694 gdb_regs[R12] = *tregs++;
695 gdb_regs[R11] = *tregs++;
696 gdb_regs[R10] = *tregs++;
697 gdb_regs[R9] = *tregs++;
698 gdb_regs[R8] = *tregs++;
699 gdb_regs[PR] = *tregs++;
700 gdb_regs[GBR] = *tregs++;
701 gdb_regs[PC] = thread->thread.pc;
702}
703#endif /* CONFIG_KGDB_THREAD */
704
705/* Calculate the new address for after a step */
706static short *get_step_address(void)
707{
708 short op = *(short *) trap_registers.pc;
709 long addr;
710
711 /* BT */
712 if (OPCODE_BT(op)) {
713 if (trap_registers.sr & SR_T_BIT_MASK)
714 addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
715 else
716 addr = trap_registers.pc + 2;
717 }
718
719 /* BTS */
720 else if (OPCODE_BTS(op)) {
721 if (trap_registers.sr & SR_T_BIT_MASK)
722 addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
723 else
724 addr = trap_registers.pc + 4; /* Not in delay slot */
725 }
726
727 /* BF */
728 else if (OPCODE_BF(op)) {
729 if (!(trap_registers.sr & SR_T_BIT_MASK))
730 addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
731 else
732 addr = trap_registers.pc + 2;
733 }
734
735 /* BFS */
736 else if (OPCODE_BFS(op)) {
737 if (!(trap_registers.sr & SR_T_BIT_MASK))
738 addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
739 else
740 addr = trap_registers.pc + 4; /* Not in delay slot */
741 }
742
743 /* BRA */
744 else if (OPCODE_BRA(op))
745 addr = trap_registers.pc + 4 + OPCODE_BRA_DISP(op);
746
747 /* BRAF */
748 else if (OPCODE_BRAF(op))
749 addr = trap_registers.pc + 4
750 + trap_registers.regs[OPCODE_BRAF_REG(op)];
751
752 /* BSR */
753 else if (OPCODE_BSR(op))
754 addr = trap_registers.pc + 4 + OPCODE_BSR_DISP(op);
755
756 /* BSRF */
757 else if (OPCODE_BSRF(op))
758 addr = trap_registers.pc + 4
759 + trap_registers.regs[OPCODE_BSRF_REG(op)];
760
761 /* JMP */
762 else if (OPCODE_JMP(op))
763 addr = trap_registers.regs[OPCODE_JMP_REG(op)];
764
765 /* JSR */
766 else if (OPCODE_JSR(op))
767 addr = trap_registers.regs[OPCODE_JSR_REG(op)];
768
769 /* RTS */
770 else if (OPCODE_RTS(op))
771 addr = trap_registers.pr;
772
773 /* RTE */
774 else if (OPCODE_RTE(op))
775 addr = trap_registers.regs[15];
776
777 /* Other */
778 else
779 addr = trap_registers.pc + 2;
780
781 kgdb_flush_icache_range(addr, addr + 2);
782 return (short *) addr;
783}
784
785/* Set up a single-step. Replace the instruction immediately after the
786 current instruction (i.e. next in the expected flow of control) with a
787 trap instruction, so that returning will cause only a single instruction
788 to be executed. Note that this model is slightly broken for instructions
789 with delay slots (e.g. B[TF]S, BSR, BRA etc), where both the branch
790 and the instruction in the delay slot will be executed. */
791static void do_single_step(void)
792{
793 unsigned short *addr = 0;
794
795 /* Determine where the target instruction will send us to */
796 addr = get_step_address();
797 stepped_address = (int)addr;
798
799 /* Replace it */
800 stepped_opcode = *(short *)addr;
801 *addr = STEP_OPCODE;
802
803 /* Flush and return */
804 kgdb_flush_icache_range((long) addr, (long) addr + 2);
805 return;
806}
807
808/* Undo a single step */
809static void undo_single_step(void)
810{
811 /* If we have stepped, put back the old instruction */
812 /* Use stepped_address in case we stopped elsewhere */
813 if (stepped_opcode != 0) {
814 *(short*)stepped_address = stepped_opcode;
815 kgdb_flush_icache_range(stepped_address, stepped_address + 2);
816 }
817 stepped_opcode = 0;
818}
819
820/* Send a signal message */
821static void send_signal_msg(const int signum)
822{
823#ifndef CONFIG_KGDB_THREAD
824 out_buffer[0] = 'S';
825 out_buffer[1] = highhex(signum);
826 out_buffer[2] = lowhex(signum);
827 out_buffer[3] = 0;
828 put_packet(out_buffer);
829#else /* CONFIG_KGDB_THREAD */
830 int threadid;
831 threadref thref;
832 char *out = out_buffer;
833 const char *tstring = "thread";
834
835 *out++ = 'T';
836 *out++ = highhex(signum);
837 *out++ = lowhex(signum);
838
839 while (*tstring) {
840 *out++ = *tstring++;
841 }
842 *out++ = ':';
843
844 threadid = trapped_thread->pid;
845 if (threadid == 0) threadid = PID_MAX;
846 int_to_threadref(&thref, threadid);
847 pack_threadid(out, &thref);
848 out += BUF_THREAD_ID_SIZE;
849 *out++ = ';';
850
851 *out = 0;
852 put_packet(out_buffer);
853#endif /* CONFIG_KGDB_THREAD */
854}
855
856/* Reply that all was well */
857static void send_ok_msg(void)
858{
859 strcpy(out_buffer, "OK");
860 put_packet(out_buffer);
861}
862
863/* Reply that an error occurred */
864static void send_err_msg(void)
865{
866 strcpy(out_buffer, "E01");
867 put_packet(out_buffer);
868}
869
870/* Empty message indicates unrecognised command */
871static void send_empty_msg(void)
872{
873 put_packet("");
874}
875
876/* Read memory due to 'm' message */
877static void read_mem_msg(void)
878{
879 char *ptr;
880 int addr;
881 int length;
882
883 /* Jmp, disable bus error handler */
884 if (setjmp(rem_com_env) == 0) {
885
886 kgdb_nofault = 1;
887
888 /* Walk through, have m<addr>,<length> */
889 ptr = &in_buffer[1];
890 if (hex_to_int(&ptr, &addr) && (*ptr++ == ','))
891 if (hex_to_int(&ptr, &length)) {
892 ptr = 0;
893 if (length * 2 > OUTBUFMAX)
894 length = OUTBUFMAX / 2;
895 mem_to_hex((char *) addr, out_buffer, length);
896 }
897 if (ptr)
898 send_err_msg();
899 else
900 put_packet(out_buffer);
901 } else
902 send_err_msg();
903
904 /* Restore bus error handler */
905 kgdb_nofault = 0;
906}
907
908/* Write memory due to 'M' or 'X' message */
909static void write_mem_msg(int binary)
910{
911 char *ptr;
912 int addr;
913 int length;
914
915 if (setjmp(rem_com_env) == 0) {
916
917 kgdb_nofault = 1;
918
919 /* Walk through, have M<addr>,<length>:<data> */
920 ptr = &in_buffer[1];
921 if (hex_to_int(&ptr, &addr) && (*ptr++ == ','))
922 if (hex_to_int(&ptr, &length) && (*ptr++ == ':')) {
923 if (binary)
924 ebin_to_mem(ptr, (char*)addr, length);
925 else
926 hex_to_mem(ptr, (char*)addr, length);
927 kgdb_flush_icache_range(addr, addr + length);
928 ptr = 0;
929 send_ok_msg();
930 }
931 if (ptr)
932 send_err_msg();
933 } else
934 send_err_msg();
935
936 /* Restore bus error handler */
937 kgdb_nofault = 0;
938}
939
940/* Continue message */
941static void continue_msg(void)
942{
943 /* Try to read optional parameter, PC unchanged if none */
944 char *ptr = &in_buffer[1];
945 int addr;
946
947 if (hex_to_int(&ptr, &addr))
948 trap_registers.pc = addr;
949}
950
951/* Continue message with signal */
952static void continue_with_sig_msg(void)
953{
954 int signal;
955 char *ptr = &in_buffer[1];
956 int addr;
957
958 /* Report limitation */
959 kgdb_to_gdb("Cannot force signal in kgdb, continuing anyway.\n");
960
961 /* Signal */
962 hex_to_int(&ptr, &signal);
963 if (*ptr == ';')
964 ptr++;
965
966 /* Optional address */
967 if (hex_to_int(&ptr, &addr))
968 trap_registers.pc = addr;
969}
970
971/* Step message */
972static void step_msg(void)
973{
974 continue_msg();
975 do_single_step();
976}
977
978/* Step message with signal */
979static void step_with_sig_msg(void)
980{
981 continue_with_sig_msg();
982 do_single_step();
983}
984
985/* Send register contents */
986static void send_regs_msg(void)
987{
988#ifdef CONFIG_KGDB_THREAD
989 if (!current_thread)
990 kgdb_regs_to_gdb_regs(&trap_registers, registers);
991 else
992 thread_regs_to_gdb_regs(current_thread, registers);
993#else
994 kgdb_regs_to_gdb_regs(&trap_registers, registers);
995#endif
996
997 mem_to_hex((char *) registers, out_buffer, NUMREGBYTES);
998 put_packet(out_buffer);
999}
1000
1001/* Set register contents - currently can't set other thread's registers */
1002static void set_regs_msg(void)
1003{
1004#ifdef CONFIG_KGDB_THREAD
1005 if (!current_thread) {
1006#endif
1007 kgdb_regs_to_gdb_regs(&trap_registers, registers);
1008 hex_to_mem(&in_buffer[1], (char *) registers, NUMREGBYTES);
1009 gdb_regs_to_kgdb_regs(registers, &trap_registers);
1010 send_ok_msg();
1011#ifdef CONFIG_KGDB_THREAD
1012 } else
1013 send_err_msg();
1014#endif
1015}
1016
1017
1018#ifdef CONFIG_KGDB_THREAD
1019
1020/* Set the status for a thread */
1021void set_thread_msg(void)
1022{
1023 int threadid;
1024 struct task_struct *thread = NULL;
1025 char *ptr;
1026
1027 switch (in_buffer[1]) {
1028
1029 /* To select which thread for gG etc messages, i.e. supported */
1030 case 'g':
1031
1032 ptr = &in_buffer[2];
1033 hex_to_int(&ptr, &threadid);
1034 thread = get_thread(threadid);
1035
1036 /* If we haven't found it */
1037 if (!thread) {
1038 send_err_msg();
1039 break;
1040 }
1041
1042 /* Set current_thread (or not) */
1043 if (thread == trapped_thread)
1044 current_thread = NULL;
1045 else
1046 current_thread = thread;
1047 send_ok_msg();
1048 break;
1049
1050 /* To select which thread for cCsS messages, i.e. unsupported */
1051 case 'c':
1052 send_ok_msg();
1053 break;
1054
1055 default:
1056 send_empty_msg();
1057 break;
1058 }
1059}
1060
1061/* Is a thread alive? */
1062static void thread_status_msg(void)
1063{
1064 char *ptr;
1065 int threadid;
1066 struct task_struct *thread = NULL;
1067
1068 ptr = &in_buffer[1];
1069 hex_to_int(&ptr, &threadid);
1070 thread = get_thread(threadid);
1071 if (thread)
1072 send_ok_msg();
1073 else
1074 send_err_msg();
1075}
1076/* Send the current thread ID */
1077static void thread_id_msg(void)
1078{
1079 int threadid;
1080 threadref thref;
1081
1082 out_buffer[0] = 'Q';
1083 out_buffer[1] = 'C';
1084
1085 if (current_thread)
1086 threadid = current_thread->pid;
1087 else if (trapped_thread)
1088 threadid = trapped_thread->pid;
1089 else /* Impossible, but just in case! */
1090 {
1091 send_err_msg();
1092 return;
1093 }
1094
1095 /* Translate pid 0 to PID_MAX for gdb */
1096 if (threadid == 0) threadid = PID_MAX;
1097
1098 int_to_threadref(&thref, threadid);
1099 pack_threadid(out_buffer + 2, &thref);
1100 out_buffer[2 + BUF_THREAD_ID_SIZE] = '\0';
1101 put_packet(out_buffer);
1102}
1103
1104/* Send thread info */
1105static void thread_info_msg(void)
1106{
1107 struct task_struct *thread = NULL;
1108 int threadid;
1109 char *pos;
1110 threadref thref;
1111
1112 /* Start with 'm' */
1113 out_buffer[0] = 'm';
1114 pos = &out_buffer[1];
1115
1116 /* For all possible thread IDs - this will overrun if > 44 threads! */
1117 /* Start at 1 and include PID_MAX (since GDB won't use pid 0...) */
1118 for (threadid = 1; threadid <= PID_MAX; threadid++) {
1119
1120 read_lock(&tasklist_lock);
1121 thread = get_thread(threadid);
1122 read_unlock(&tasklist_lock);
1123
1124 /* If it's a valid thread */
1125 if (thread) {
1126 int_to_threadref(&thref, threadid);
1127 pack_threadid(pos, &thref);
1128 pos += BUF_THREAD_ID_SIZE;
1129 *pos++ = ',';
1130 }
1131 }
1132 *--pos = 0; /* Lose final comma */
1133 put_packet(out_buffer);
1134
1135}
1136
1137/* Return printable info for gdb's 'info threads' command */
1138static void thread_extra_info_msg(void)
1139{
1140 int threadid;
1141 struct task_struct *thread = NULL;
1142 char buffer[20], *ptr;
1143 int i;
1144
1145 /* Extract thread ID */
1146 ptr = &in_buffer[17];
1147 hex_to_int(&ptr, &threadid);
1148 thread = get_thread(threadid);
1149
1150 /* If we don't recognise it, say so */
1151 if (thread == NULL)
1152 strcpy(buffer, "(unknown)");
1153 else
1154 strcpy(buffer, thread->comm);
1155
1156 /* Construct packet */
1157 for (i = 0, ptr = out_buffer; buffer[i]; i++)
1158 ptr = pack_hex_byte(ptr, buffer[i]);
1159
1160 if (thread->thread.pc == (unsigned long)ret_from_fork) {
1161 strcpy(buffer, "<new fork>");
1162 for (i = 0; buffer[i]; i++)
1163 ptr = pack_hex_byte(ptr, buffer[i]);
1164 }
1165
1166 *ptr = '\0';
1167 put_packet(out_buffer);
1168}
1169
1170/* Handle all qFooBarBaz messages - have to use an if statement as
1171 opposed to a switch because q messages can have > 1 char id. */
1172static void query_msg(void)
1173{
1174 const char *q_start = &in_buffer[1];
1175
1176 /* qC = return current thread ID */
1177 if (strncmp(q_start, "C", 1) == 0)
1178 thread_id_msg();
1179
1180 /* qfThreadInfo = query all threads (first) */
1181 else if (strncmp(q_start, "fThreadInfo", 11) == 0)
1182 thread_info_msg();
1183
1184 /* qsThreadInfo = query all threads (subsequent). We know we have sent
1185 them all after the qfThreadInfo message, so there are no to send */
1186 else if (strncmp(q_start, "sThreadInfo", 11) == 0)
1187 put_packet("l"); /* el = last */
1188
1189 /* qThreadExtraInfo = supply printable information per thread */
1190 else if (strncmp(q_start, "ThreadExtraInfo", 15) == 0)
1191 thread_extra_info_msg();
1192
1193 /* Unsupported - empty message as per spec */
1194 else
1195 send_empty_msg();
1196}
1197#endif /* CONFIG_KGDB_THREAD */
1198
1199/*
1200 * Bring up the ports..
1201 */
1202static int kgdb_serial_setup(void)
1203{
1204 extern int kgdb_console_setup(struct console *co, char *options);
1205 struct console dummy;
1206
1207 kgdb_console_setup(&dummy, 0);
1208
1209 return 0;
1210}
1211
1212/* The command loop, read and act on requests */
1213static void kgdb_command_loop(const int excep_code, const int trapa_value)
1214{
1215 int sigval;
1216
1217 if (excep_code == NMI_VEC) {
1218#ifndef CONFIG_KGDB_NMI
1219 KGDB_PRINTK("Ignoring unexpected NMI?\n");
1220 return;
1221#else /* CONFIG_KGDB_NMI */
1222 if (!kgdb_enabled) {
1223 kgdb_enabled = 1;
1224 kgdb_init();
1225 }
1226#endif /* CONFIG_KGDB_NMI */
1227 }
1228
1229 /* Ignore if we're disabled */
1230 if (!kgdb_enabled)
1231 return;
1232
1233#ifdef CONFIG_KGDB_THREAD
1234 /* Until GDB specifies a thread */
1235 current_thread = NULL;
1236 trapped_thread = current;
1237#endif
1238
1239 /* Enter GDB mode (e.g. after detach) */
1240 if (!kgdb_in_gdb_mode) {
1241 /* Do serial setup, notify user, issue preemptive ack */
1242 kgdb_serial_setup();
1243 KGDB_PRINTK("Waiting for GDB (on %s%d at %d baud)\n",
1244 (kgdb_porttype ? kgdb_porttype->name : ""),
1245 kgdb_portnum, kgdb_baud);
1246 kgdb_in_gdb_mode = 1;
1247 put_debug_char('+');
1248 }
1249
1250 /* Reply to host that an exception has occurred */
1251 sigval = compute_signal(excep_code);
1252 send_signal_msg(sigval);
1253
1254 /* TRAP_VEC exception indicates a software trap inserted in place of
1255 code by GDB so back up PC by one instruction, as this instruction
1256 will later be replaced by its original one. Do NOT do this for
1257 trap 0xff, since that indicates a compiled-in breakpoint which
1258 will not be replaced (and we would retake the trap forever) */
1259 if ((excep_code == TRAP_VEC) && (trapa_value != (0xff << 2))) {
1260 trap_registers.pc -= 2;
1261 }
1262
1263 /* Undo any stepping we may have done */
1264 undo_single_step();
1265
1266 while (1) {
1267
1268 out_buffer[0] = 0;
1269 get_packet(in_buffer, BUFMAX);
1270
1271 /* Examine first char of buffer to see what we need to do */
1272 switch (in_buffer[0]) {
1273
1274 case '?': /* Send which signal we've received */
1275 send_signal_msg(sigval);
1276 break;
1277
1278 case 'g': /* Return the values of the CPU registers */
1279 send_regs_msg();
1280 break;
1281
1282 case 'G': /* Set the value of the CPU registers */
1283 set_regs_msg();
1284 break;
1285
1286 case 'm': /* Read LLLL bytes address AA..AA */
1287 read_mem_msg();
1288 break;
1289
1290 case 'M': /* Write LLLL bytes address AA..AA, ret OK */
1291 write_mem_msg(0); /* 0 = data in hex */
1292 break;
1293
1294 case 'X': /* Write LLLL bytes esc bin address AA..AA */
1295 if (kgdb_bits == '8')
1296 write_mem_msg(1); /* 1 = data in binary */
1297 else
1298 send_empty_msg();
1299 break;
1300
1301 case 'C': /* Continue, signum included, we ignore it */
1302 continue_with_sig_msg();
1303 return;
1304
1305 case 'c': /* Continue at address AA..AA (optional) */
1306 continue_msg();
1307 return;
1308
1309 case 'S': /* Step, signum included, we ignore it */
1310 step_with_sig_msg();
1311 return;
1312
1313 case 's': /* Step one instruction from AA..AA */
1314 step_msg();
1315 return;
1316
1317#ifdef CONFIG_KGDB_THREAD
1318
1319 case 'H': /* Task related */
1320 set_thread_msg();
1321 break;
1322
1323 case 'T': /* Query thread status */
1324 thread_status_msg();
1325 break;
1326
1327 case 'q': /* Handle query - currently thread-related */
1328 query_msg();
1329 break;
1330#endif
1331
1332 case 'k': /* 'Kill the program' with a kernel ? */
1333 break;
1334
1335 case 'D': /* Detach from program, send reply OK */
1336 kgdb_in_gdb_mode = 0;
1337 send_ok_msg();
1338 get_debug_char();
1339 return;
1340
1341 default:
1342 send_empty_msg();
1343 break;
1344 }
1345 }
1346}
1347
1348/* There has been an exception, most likely a breakpoint. */
1349void kgdb_handle_exception(struct pt_regs *regs)
1350{
1351 int excep_code, vbr_val;
1352 int count;
1353 int trapa_value = ctrl_inl(TRA);
1354
1355 /* Copy kernel regs (from stack) */
1356 for (count = 0; count < 16; count++)
1357 trap_registers.regs[count] = regs->regs[count];
1358 trap_registers.pc = regs->pc;
1359 trap_registers.pr = regs->pr;
1360 trap_registers.sr = regs->sr;
1361 trap_registers.gbr = regs->gbr;
1362 trap_registers.mach = regs->mach;
1363 trap_registers.macl = regs->macl;
1364
1365 asm("stc vbr, %0":"=r"(vbr_val));
1366 trap_registers.vbr = vbr_val;
1367
1368 /* Get excode for command loop call, user access */
1369 asm("stc r2_bank, %0":"=r"(excep_code));
1370 kgdb_excode = excep_code;
1371
1372 /* Other interesting environment items for reference */
1373 asm("stc r6_bank, %0":"=r"(kgdb_g_imask));
1374 kgdb_current = current;
1375 kgdb_trapa_val = trapa_value;
1376
1377 /* Act on the exception */
1378 kgdb_command_loop(excep_code >> 5, trapa_value);
1379
1380 kgdb_current = NULL;
1381
1382 /* Copy back the (maybe modified) registers */
1383 for (count = 0; count < 16; count++)
1384 regs->regs[count] = trap_registers.regs[count];
1385 regs->pc = trap_registers.pc;
1386 regs->pr = trap_registers.pr;
1387 regs->sr = trap_registers.sr;
1388 regs->gbr = trap_registers.gbr;
1389 regs->mach = trap_registers.mach;
1390 regs->macl = trap_registers.macl;
1391
1392 vbr_val = trap_registers.vbr;
1393 asm("ldc %0, vbr": :"r"(vbr_val));
1394
1395 return;
1396}
1397
1398/* Trigger a breakpoint by function */
1399void breakpoint(void)
1400{
1401 if (!kgdb_enabled) {
1402 kgdb_enabled = 1;
1403 kgdb_init();
1404 }
1405 BREAKPOINT();
1406}
1407
1408/* Initialise the KGDB data structures and serial configuration */
1409int kgdb_init(void)
1410{
1411 if (!kgdb_enabled)
1412 return 1;
1413
1414 in_nmi = 0;
1415 kgdb_nofault = 0;
1416 stepped_opcode = 0;
1417 kgdb_in_gdb_mode = 0;
1418
1419 if (kgdb_serial_setup() != 0) {
1420 KGDB_PRINTK("serial setup error\n");
1421 return -1;
1422 }
1423
1424 /* Init ptr to exception handler */
1425 kgdb_debug_hook = kgdb_handle_exception;
1426 kgdb_bus_err_hook = kgdb_handle_bus_error;
1427
1428 /* Enter kgdb now if requested, or just report init done */
1429 if (kgdb_halt) {
1430 kgdb_in_gdb_mode = 1;
1431 put_debug_char('+');
1432 breakpoint();
1433 }
1434 else
1435 {
1436 KGDB_PRINTK("stub is initialized.\n");
1437 }
1438
1439 return 0;
1440}
1441
1442/* Make function available for "user messages"; console will use it too. */
1443
1444char gdbmsgbuf[BUFMAX];
1445#define MAXOUT ((BUFMAX-2)/2)
1446
1447static void kgdb_msg_write(const char *s, unsigned count)
1448{
1449 int i;
1450 int wcount;
1451 char *bufptr;
1452
1453 /* 'O'utput */
1454 gdbmsgbuf[0] = 'O';
1455
1456 /* Fill and send buffers... */
1457 while (count > 0) {
1458 bufptr = gdbmsgbuf + 1;
1459
1460 /* Calculate how many this time */
1461 wcount = (count > MAXOUT) ? MAXOUT : count;
1462
1463 /* Pack in hex chars */
1464 for (i = 0; i < wcount; i++)
1465 bufptr = pack_hex_byte(bufptr, s[i]);
1466 *bufptr = '\0';
1467
1468 /* Move up */
1469 s += wcount;
1470 count -= wcount;
1471
1472 /* Write packet */
1473 put_packet(gdbmsgbuf);
1474 }
1475}
1476
1477static void kgdb_to_gdb(const char *s)
1478{
1479 kgdb_msg_write(s, strlen(s));
1480}
1481
1482#ifdef CONFIG_SH_KGDB_CONSOLE
1483void kgdb_console_write(struct console *co, const char *s, unsigned count)
1484{
1485 /* Bail if we're not talking to GDB */
1486 if (!kgdb_in_gdb_mode)
1487 return;
1488
1489 kgdb_msg_write(s, count);
1490}
1491#endif
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
new file mode 100644
index 000000000000..142a4e5b7ebc
--- /dev/null
+++ b/arch/sh/kernel/module.c
@@ -0,0 +1,146 @@
1/* Kernel module help for SH.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; either version 2 of the License, or
6 (at your option) any later version.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16*/
17#include <linux/moduleloader.h>
18#include <linux/elf.h>
19#include <linux/vmalloc.h>
20#include <linux/fs.h>
21#include <linux/string.h>
22#include <linux/kernel.h>
23
24#if 0
25#define DEBUGP printk
26#else
27#define DEBUGP(fmt...)
28#endif
29
30void *module_alloc(unsigned long size)
31{
32 if (size == 0)
33 return NULL;
34 return vmalloc(size);
35}
36
37
38/* Free memory returned from module_alloc */
39void module_free(struct module *mod, void *module_region)
40{
41 vfree(module_region);
42 /* FIXME: If module_region == mod->init_region, trim exception
43 table entries. */
44}
45
46/* We don't need anything special. */
47int module_frob_arch_sections(Elf_Ehdr *hdr,
48 Elf_Shdr *sechdrs,
49 char *secstrings,
50 struct module *mod)
51{
52 return 0;
53}
54
55#define COPY_UNALIGNED_WORD(sw, tw, align) \
56{ \
57 void *__s = &(sw), *__t = &(tw); \
58 unsigned short *__s2 = __s, *__t2 = __t; \
59 unsigned char *__s1 = __s, *__t1 = __t; \
60 switch ((align)) \
61 { \
62 case 0: \
63 *(unsigned long *) __t = *(unsigned long *) __s; \
64 break; \
65 case 2: \
66 *__t2++ = *__s2++; \
67 *__t2 = *__s2; \
68 break; \
69 default: \
70 *__t1++ = *__s1++; \
71 *__t1++ = *__s1++; \
72 *__t1++ = *__s1++; \
73 *__t1 = *__s1; \
74 break; \
75 } \
76}
77
78int apply_relocate_add(Elf32_Shdr *sechdrs,
79 const char *strtab,
80 unsigned int symindex,
81 unsigned int relsec,
82 struct module *me)
83{
84 unsigned int i;
85 Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
86 Elf32_Sym *sym;
87 Elf32_Addr relocation;
88 uint32_t *location;
89 uint32_t value;
90 int align;
91
92 DEBUGP("Applying relocate section %u to %u\n", relsec,
93 sechdrs[relsec].sh_info);
94 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
95 /* This is where to make the change */
96 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
97 + rel[i].r_offset;
98 /* This is the symbol it is referring to. Note that all
99 undefined symbols have been resolved. */
100 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
101 + ELF32_R_SYM(rel[i].r_info);
102 relocation = sym->st_value + rel[i].r_addend;
103 align = (int)location & 3;
104
105 switch (ELF32_R_TYPE(rel[i].r_info)) {
106 case R_SH_DIR32:
107 COPY_UNALIGNED_WORD (*location, value, align);
108 value += relocation;
109 COPY_UNALIGNED_WORD (value, *location, align);
110 break;
111 case R_SH_REL32:
112 relocation = (relocation - (Elf32_Addr) location);
113 COPY_UNALIGNED_WORD (*location, value, align);
114 value += relocation;
115 COPY_UNALIGNED_WORD (value, *location, align);
116 break;
117 default:
118 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
119 me->name, ELF32_R_TYPE(rel[i].r_info));
120 return -ENOEXEC;
121 }
122 }
123 return 0;
124}
125
126int apply_relocate(Elf32_Shdr *sechdrs,
127 const char *strtab,
128 unsigned int symindex,
129 unsigned int relsec,
130 struct module *me)
131{
132 printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
133 me->name);
134 return -ENOEXEC;
135}
136
137int module_finalize(const Elf_Ehdr *hdr,
138 const Elf_Shdr *sechdrs,
139 struct module *me)
140{
141 return 0;
142}
143
144void module_arch_cleanup(struct module *mod)
145{
146}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 000000000000..3d024590c24e
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,531 @@
1/* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $
2 *
3 * linux/arch/sh/kernel/process.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8 */
9
10/*
11 * This file handles the architecture-dependent parts of process handling..
12 */
13
14#include <linux/module.h>
15#include <linux/unistd.h>
16#include <linux/mm.h>
17#include <linux/elfcore.h>
18#include <linux/slab.h>
19#include <linux/a.out.h>
20#include <linux/ptrace.h>
21#include <linux/platform.h>
22#include <linux/kallsyms.h>
23
24#include <asm/io.h>
25#include <asm/uaccess.h>
26#include <asm/mmu_context.h>
27#include <asm/elf.h>
28#if defined(CONFIG_SH_HS7751RVOIP)
29#include <asm/hs7751rvoip/hs7751rvoip.h>
30#elif defined(CONFIG_SH_RTS7751R2D)
31#include <asm/rts7751r2d/rts7751r2d.h>
32#endif
33
34static int hlt_counter=0;
35
36int ubc_usercnt = 0;
37
38#define HARD_IDLE_TIMEOUT (HZ / 3)
39
40void disable_hlt(void)
41{
42 hlt_counter++;
43}
44
45EXPORT_SYMBOL(disable_hlt);
46
47void enable_hlt(void)
48{
49 hlt_counter--;
50}
51
52EXPORT_SYMBOL(enable_hlt);
53
54void default_idle(void)
55{
56 /* endless idle loop with no priority at all */
57 while (1) {
58 if (hlt_counter) {
59 while (1)
60 if (need_resched())
61 break;
62 } else {
63 while (!need_resched())
64 cpu_sleep();
65 }
66
67 schedule();
68 }
69}
70
71void cpu_idle(void)
72{
73 default_idle();
74}
75
76void machine_restart(char * __unused)
77{
78 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
79 asm volatile("ldc %0, sr\n\t"
80 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
81}
82
83EXPORT_SYMBOL(machine_restart);
84
85void machine_halt(void)
86{
87#if defined(CONFIG_SH_HS7751RVOIP)
88 unsigned short value;
89
90 value = ctrl_inw(PA_OUTPORTR);
91 ctrl_outw((value & 0xffdf), PA_OUTPORTR);
92#elif defined(CONFIG_SH_RTS7751R2D)
93 ctrl_outw(0x0001, PA_POWOFF);
94#endif
95 while (1)
96 cpu_sleep();
97}
98
99EXPORT_SYMBOL(machine_halt);
100
101void machine_power_off(void)
102{
103#if defined(CONFIG_SH_HS7751RVOIP)
104 unsigned short value;
105
106 value = ctrl_inw(PA_OUTPORTR);
107 ctrl_outw((value & 0xffdf), PA_OUTPORTR);
108#elif defined(CONFIG_SH_RTS7751R2D)
109 ctrl_outw(0x0001, PA_POWOFF);
110#endif
111}
112
113EXPORT_SYMBOL(machine_power_off);
114
115void show_regs(struct pt_regs * regs)
116{
117 printk("\n");
118 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm);
119 print_symbol("PC is at %s\n", regs->pc);
120 printk("PC : %08lx SP : %08lx SR : %08lx ",
121 regs->pc, regs->regs[15], regs->sr);
122#ifdef CONFIG_MMU
123 printk("TEA : %08x ", ctrl_inl(MMU_TEA));
124#else
125 printk(" ");
126#endif
127 printk("%s\n", print_tainted());
128
129 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
130 regs->regs[0],regs->regs[1],
131 regs->regs[2],regs->regs[3]);
132 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
133 regs->regs[4],regs->regs[5],
134 regs->regs[6],regs->regs[7]);
135 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
136 regs->regs[8],regs->regs[9],
137 regs->regs[10],regs->regs[11]);
138 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
139 regs->regs[12],regs->regs[13],
140 regs->regs[14]);
141 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
142 regs->mach, regs->macl, regs->gbr, regs->pr);
143
144 /*
145 * If we're in kernel mode, dump the stack too..
146 */
147 if (!user_mode(regs)) {
148 extern void show_task(unsigned long *sp);
149 unsigned long sp = regs->regs[15];
150
151 show_task((unsigned long *)sp);
152 }
153}
154
155/*
156 * Create a kernel thread
157 */
158
159/*
160 * This is the mechanism for creating a new kernel thread.
161 *
162 */
163extern void kernel_thread_helper(void);
164__asm__(".align 5\n"
165 "kernel_thread_helper:\n\t"
166 "jsr @r5\n\t"
167 " nop\n\t"
168 "mov.l 1f, r1\n\t"
169 "jsr @r1\n\t"
170 " mov r0, r4\n\t"
171 ".align 2\n\t"
172 "1:.long do_exit");
173
174int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
175{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
176 struct pt_regs regs;
177
178 memset(&regs, 0, sizeof(regs));
179 regs.regs[4] = (unsigned long) arg;
180 regs.regs[5] = (unsigned long) fn;
181
182 regs.pc = (unsigned long) kernel_thread_helper;
183 regs.sr = (1 << 30);
184
185 /* Ok, create the new process.. */
186 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
187}
188
189/*
190 * Free current thread data structures etc..
191 */
192void exit_thread(void)
193{
194 if (current->thread.ubc_pc) {
195 current->thread.ubc_pc = 0;
196 ubc_usercnt -= 1;
197 }
198}
199
200void flush_thread(void)
201{
202#if defined(CONFIG_SH_FPU)
203 struct task_struct *tsk = current;
204 struct pt_regs *regs = (struct pt_regs *)
205 ((unsigned long)tsk->thread_info
206 + THREAD_SIZE - sizeof(struct pt_regs)
207 - sizeof(unsigned long));
208
209 /* Forget lazy FPU state */
210 clear_fpu(tsk, regs);
211 clear_used_math();
212#endif
213}
214
215void release_thread(struct task_struct *dead_task)
216{
217 /* do nothing */
218}
219
220/* Fill in the fpu structure for a core dump.. */
221int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
222{
223 int fpvalid = 0;
224
225#if defined(CONFIG_SH_FPU)
226 struct task_struct *tsk = current;
227
228 fpvalid = !!tsk_used_math(tsk);
229 if (fpvalid) {
230 unlazy_fpu(tsk, regs);
231 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
232 }
233#endif
234
235 return fpvalid;
236}
237
238/*
239 * Capture the user space registers if the task is not running (in user space)
240 */
241int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
242{
243 struct pt_regs ptregs;
244
245 ptregs = *(struct pt_regs *)
246 ((unsigned long)tsk->thread_info + THREAD_SIZE
247 - sizeof(struct pt_regs)
248#ifdef CONFIG_SH_DSP
249 - sizeof(struct pt_dspregs)
250#endif
251 - sizeof(unsigned long));
252 elf_core_copy_regs(regs, &ptregs);
253
254 return 1;
255}
256
257int
258dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
259{
260 int fpvalid = 0;
261
262#if defined(CONFIG_SH_FPU)
263 fpvalid = !!tsk_used_math(tsk);
264 if (fpvalid) {
265 struct pt_regs *regs = (struct pt_regs *)
266 ((unsigned long)tsk->thread_info
267 + THREAD_SIZE - sizeof(struct pt_regs)
268 - sizeof(unsigned long));
269 unlazy_fpu(tsk, regs);
270 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
271 }
272#endif
273
274 return fpvalid;
275}
276
277asmlinkage void ret_from_fork(void);
278
279int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
280 unsigned long unused,
281 struct task_struct *p, struct pt_regs *regs)
282{
283 struct pt_regs *childregs;
284#if defined(CONFIG_SH_FPU)
285 struct task_struct *tsk = current;
286
287 unlazy_fpu(tsk, regs);
288 p->thread.fpu = tsk->thread.fpu;
289 copy_to_stopped_child_used_math(p);
290#endif
291
292 childregs = ((struct pt_regs *)
293 (THREAD_SIZE + (unsigned long) p->thread_info)
294#ifdef CONFIG_SH_DSP
295 - sizeof(struct pt_dspregs)
296#endif
297 - sizeof(unsigned long)) - 1;
298 *childregs = *regs;
299
300 if (user_mode(regs)) {
301 childregs->regs[15] = usp;
302 } else {
303 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
304 }
305 if (clone_flags & CLONE_SETTLS) {
306 childregs->gbr = childregs->regs[0];
307 }
308 childregs->regs[0] = 0; /* Set return value for child */
309
310 p->thread.sp = (unsigned long) childregs;
311 p->thread.pc = (unsigned long) ret_from_fork;
312
313 p->thread.ubc_pc = 0;
314
315 return 0;
316}
317
318/*
319 * fill in the user structure for a core dump..
320 */
321void dump_thread(struct pt_regs * regs, struct user * dump)
322{
323 dump->magic = CMAGIC;
324 dump->start_code = current->mm->start_code;
325 dump->start_data = current->mm->start_data;
326 dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1);
327 dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
328 dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT;
329 dump->u_ssize = (current->mm->start_stack - dump->start_stack +
330 PAGE_SIZE - 1) >> PAGE_SHIFT;
331 /* Debug registers will come here. */
332
333 dump->regs = *regs;
334
335 dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
336}
337
338/* Tracing by user break controller. */
339static void
340ubc_set_tracing(int asid, unsigned long pc)
341{
342 ctrl_outl(pc, UBC_BARA);
343
344 /* We don't have any ASID settings for the SH-2! */
345 if (cpu_data->type != CPU_SH7604)
346 ctrl_outb(asid, UBC_BASRA);
347
348 ctrl_outl(0, UBC_BAMRA);
349
350 if (cpu_data->type == CPU_SH7729) {
351 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
352 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
353 } else {
354 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
355 ctrl_outw(BRCR_PCBA, UBC_BRCR);
356 }
357}
358
359/*
360 * switch_to(x,y) should switch tasks from x to y.
361 *
362 */
363struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
364{
365#if defined(CONFIG_SH_FPU)
366 struct pt_regs *regs = (struct pt_regs *)
367 ((unsigned long)prev->thread_info
368 + THREAD_SIZE - sizeof(struct pt_regs)
369 - sizeof(unsigned long));
370 unlazy_fpu(prev, regs);
371#endif
372
373#ifdef CONFIG_PREEMPT
374 {
375 unsigned long flags;
376 struct pt_regs *regs;
377
378 local_irq_save(flags);
379 regs = (struct pt_regs *)
380 ((unsigned long)prev->thread_info
381 + THREAD_SIZE - sizeof(struct pt_regs)
382#ifdef CONFIG_SH_DSP
383 - sizeof(struct pt_dspregs)
384#endif
385 - sizeof(unsigned long));
386 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
387 int offset = (int)regs->regs[15];
388
389 /* Reset stack pointer: clear critical region mark */
390 regs->regs[15] = regs->regs[1];
391 if (regs->pc < regs->regs[0])
392 /* Go to rewind point */
393 regs->pc = regs->regs[0] + offset;
394 }
395 local_irq_restore(flags);
396 }
397#endif
398
399 /*
400 * Restore the kernel mode register
401 * k7 (r7_bank1)
402 */
403 asm volatile("ldc %0, r7_bank"
404 : /* no output */
405 : "r" (next->thread_info));
406
407#ifdef CONFIG_MMU
408 /* If no tasks are using the UBC, we're done */
409 if (ubc_usercnt == 0)
410 /* If no tasks are using the UBC, we're done */;
411 else if (next->thread.ubc_pc && next->mm) {
412 ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK,
413 next->thread.ubc_pc);
414 } else {
415 ctrl_outw(0, UBC_BBRA);
416 ctrl_outw(0, UBC_BBRB);
417 }
418#endif
419
420 return prev;
421}
422
423asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
424 unsigned long r6, unsigned long r7,
425 struct pt_regs regs)
426{
427#ifdef CONFIG_MMU
428 return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
429#else
430 /* fork almost works, enough to trick you into looking elsewhere :-( */
431 return -EINVAL;
432#endif
433}
434
435asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
436 unsigned long parent_tidptr,
437 unsigned long child_tidptr,
438 struct pt_regs regs)
439{
440 if (!newsp)
441 newsp = regs.regs[15];
442 return do_fork(clone_flags, newsp, &regs, 0,
443 (int __user *)parent_tidptr, (int __user *)child_tidptr);
444}
445
446/*
447 * This is trivial, and on the face of it looks like it
448 * could equally well be done in user mode.
449 *
450 * Not so, for quite unobvious reasons - register pressure.
451 * In user mode vfork() cannot have a stack frame, and if
452 * done by calling the "clone()" system call directly, you
453 * do not have enough call-clobbered registers to hold all
454 * the information you need.
455 */
456asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
457 unsigned long r6, unsigned long r7,
458 struct pt_regs regs)
459{
460 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
461 0, NULL, NULL);
462}
463
464/*
465 * sys_execve() executes a new program.
466 */
467asmlinkage int sys_execve(char *ufilename, char **uargv,
468 char **uenvp, unsigned long r7,
469 struct pt_regs regs)
470{
471 int error;
472 char *filename;
473
474 filename = getname((char __user *)ufilename);
475 error = PTR_ERR(filename);
476 if (IS_ERR(filename))
477 goto out;
478
479 error = do_execve(filename,
480 (char __user * __user *)uargv,
481 (char __user * __user *)uenvp,
482 &regs);
483 if (error == 0) {
484 task_lock(current);
485 current->ptrace &= ~PT_DTRACE;
486 task_unlock(current);
487 }
488 putname(filename);
489out:
490 return error;
491}
492
493unsigned long get_wchan(struct task_struct *p)
494{
495 unsigned long schedule_frame;
496 unsigned long pc;
497
498 if (!p || p == current || p->state == TASK_RUNNING)
499 return 0;
500
501 /*
502 * The same comment as on the Alpha applies here, too ...
503 */
504 pc = thread_saved_pc(p);
505 if (in_sched_functions(pc)) {
506 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
507 return (unsigned long)((unsigned long *)schedule_frame)[1];
508 }
509 return pc;
510}
511
512asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
513 unsigned long r6, unsigned long r7,
514 struct pt_regs regs)
515{
516 /* Clear tracing. */
517 ctrl_outw(0, UBC_BBRA);
518 ctrl_outw(0, UBC_BBRB);
519 current->thread.ubc_pc = 0;
520 ubc_usercnt -= 1;
521
522 force_sig(SIGTRAP, current);
523}
524
525asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
526 unsigned long r6, unsigned long r7,
527 struct pt_regs regs)
528{
529 regs.pc -= 2;
530 force_sig(SIGTRAP, current);
531}
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
new file mode 100644
index 000000000000..1b0dfb4d8ea4
--- /dev/null
+++ b/arch/sh/kernel/ptrace.c
@@ -0,0 +1,320 @@
1/*
2 * linux/arch/sh/kernel/ptrace.c
3 *
4 * Original x86 implementation:
5 * By Ross Biro 1/23/92
6 * edited by Linus Torvalds
7 *
8 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
9 *
10 */
11
12#include <linux/config.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/smp_lock.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/user.h>
21#include <linux/slab.h>
22#include <linux/security.h>
23
24#include <asm/io.h>
25#include <asm/uaccess.h>
26#include <asm/pgtable.h>
27#include <asm/system.h>
28#include <asm/processor.h>
29#include <asm/mmu_context.h>
30
31/*
32 * does not yet catch signals sent when the child dies.
33 * in exit.c or in signal.c.
34 */
35
36/*
37 * This routine will get a word off of the process kernel stack.
38 */
39static inline int get_stack_long(struct task_struct *task, int offset)
40{
41 unsigned char *stack;
42
43 stack = (unsigned char *)
44 task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
45#ifdef CONFIG_SH_DSP
46 - sizeof(struct pt_dspregs)
47#endif
48 - sizeof(unsigned long);
49 stack += offset;
50 return (*((int *)stack));
51}
52
53/*
54 * This routine will put a word on the process kernel stack.
55 */
56static inline int put_stack_long(struct task_struct *task, int offset,
57 unsigned long data)
58{
59 unsigned char *stack;
60
61 stack = (unsigned char *)
62 task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
63#ifdef CONFIG_SH_DSP
64 - sizeof(struct pt_dspregs)
65#endif
66 - sizeof(unsigned long);
67 stack += offset;
68 *(unsigned long *) stack = data;
69 return 0;
70}
71
72/*
73 * Called by kernel/ptrace.c when detaching..
74 *
75 * Make sure single step bits etc are not set.
76 */
77void ptrace_disable(struct task_struct *child)
78{
79 /* nothing to do.. */
80}
81
82asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
83{
84 struct task_struct *child;
85 struct user * dummy = NULL;
86 int ret;
87
88 lock_kernel();
89 ret = -EPERM;
90 if (request == PTRACE_TRACEME) {
91 /* are we already being traced? */
92 if (current->ptrace & PT_PTRACED)
93 goto out;
94 ret = security_ptrace(current->parent, current);
95 if (ret)
96 goto out;
97 /* set the ptrace bit in the process flags. */
98 current->ptrace |= PT_PTRACED;
99 ret = 0;
100 goto out;
101 }
102 ret = -ESRCH;
103 read_lock(&tasklist_lock);
104 child = find_task_by_pid(pid);
105 if (child)
106 get_task_struct(child);
107 read_unlock(&tasklist_lock);
108 if (!child)
109 goto out;
110
111 ret = -EPERM;
112 if (pid == 1) /* you may not mess with init */
113 goto out_tsk;
114
115 if (request == PTRACE_ATTACH) {
116 ret = ptrace_attach(child);
117 goto out_tsk;
118 }
119
120 ret = ptrace_check_attach(child, request == PTRACE_KILL);
121 if (ret < 0)
122 goto out_tsk;
123
124 switch (request) {
125 /* when I and D space are separate, these will need to be fixed. */
126 case PTRACE_PEEKTEXT: /* read word at location addr. */
127 case PTRACE_PEEKDATA: {
128 unsigned long tmp;
129 int copied;
130
131 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
132 ret = -EIO;
133 if (copied != sizeof(tmp))
134 break;
135 ret = put_user(tmp,(unsigned long *) data);
136 break;
137 }
138
139 /* read the word at location addr in the USER area. */
140 case PTRACE_PEEKUSR: {
141 unsigned long tmp;
142
143 ret = -EIO;
144 if ((addr & 3) || addr < 0 ||
145 addr > sizeof(struct user) - 3)
146 break;
147
148 if (addr < sizeof(struct pt_regs))
149 tmp = get_stack_long(child, addr);
150 else if (addr >= (long) &dummy->fpu &&
151 addr < (long) &dummy->u_fpvalid) {
152 if (!tsk_used_math(child)) {
153 if (addr == (long)&dummy->fpu.fpscr)
154 tmp = FPSCR_INIT;
155 else
156 tmp = 0;
157 } else
158 tmp = ((long *)&child->thread.fpu)
159 [(addr - (long)&dummy->fpu) >> 2];
160 } else if (addr == (long) &dummy->u_fpvalid)
161 tmp = !!tsk_used_math(child);
162 else
163 tmp = 0;
164 ret = put_user(tmp, (unsigned long *)data);
165 break;
166 }
167
168 /* when I and D space are separate, this will have to be fixed. */
169 case PTRACE_POKETEXT: /* write the word at location addr. */
170 case PTRACE_POKEDATA:
171 ret = 0;
172 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
173 break;
174 ret = -EIO;
175 break;
176
177 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
178 ret = -EIO;
179 if ((addr & 3) || addr < 0 ||
180 addr > sizeof(struct user) - 3)
181 break;
182
183 if (addr < sizeof(struct pt_regs))
184 ret = put_stack_long(child, addr, data);
185 else if (addr >= (long) &dummy->fpu &&
186 addr < (long) &dummy->u_fpvalid) {
187 set_stopped_child_used_math(child);
188 ((long *)&child->thread.fpu)
189 [(addr - (long)&dummy->fpu) >> 2] = data;
190 ret = 0;
191 } else if (addr == (long) &dummy->u_fpvalid) {
192 conditional_stopped_child_used_math(data, child);
193 ret = 0;
194 }
195 break;
196
197 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
198 case PTRACE_CONT: { /* restart after signal. */
199 ret = -EIO;
200 if ((unsigned long) data > _NSIG)
201 break;
202 if (request == PTRACE_SYSCALL)
203 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
204 else
205 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
206 child->exit_code = data;
207 wake_up_process(child);
208 ret = 0;
209 break;
210 }
211
212/*
213 * make the child exit. Best I can do is send it a sigkill.
214 * perhaps it should be put in the status that it wants to
215 * exit.
216 */
217 case PTRACE_KILL: {
218 ret = 0;
219 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
220 break;
221 child->exit_code = SIGKILL;
222 wake_up_process(child);
223 break;
224 }
225
226 case PTRACE_SINGLESTEP: { /* set the trap flag. */
227 long pc;
228 struct pt_regs *dummy = NULL;
229
230 ret = -EIO;
231 if ((unsigned long) data > _NSIG)
232 break;
233 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
234 if ((child->ptrace & PT_DTRACE) == 0) {
235 /* Spurious delayed TF traps may occur */
236 child->ptrace |= PT_DTRACE;
237 }
238
239 pc = get_stack_long(child, (long)&dummy->pc);
240
241 /* Next scheduling will set up UBC */
242 if (child->thread.ubc_pc == 0)
243 ubc_usercnt += 1;
244 child->thread.ubc_pc = pc;
245
246 child->exit_code = data;
247 /* give it a chance to run. */
248 wake_up_process(child);
249 ret = 0;
250 break;
251 }
252
253 case PTRACE_DETACH: /* detach a process that was attached. */
254 ret = ptrace_detach(child, data);
255 break;
256
257#ifdef CONFIG_SH_DSP
258 case PTRACE_GETDSPREGS: {
259 unsigned long dp;
260
261 ret = -EIO;
262 dp = ((unsigned long) child) + THREAD_SIZE -
263 sizeof(struct pt_dspregs);
264 if (*((int *) (dp - 4)) == SR_FD) {
265 copy_to_user(addr, (void *) dp,
266 sizeof(struct pt_dspregs));
267 ret = 0;
268 }
269 break;
270 }
271
272 case PTRACE_SETDSPREGS: {
273 unsigned long dp;
274 int i;
275
276 ret = -EIO;
277 dp = ((unsigned long) child) + THREAD_SIZE -
278 sizeof(struct pt_dspregs);
279 if (*((int *) (dp - 4)) == SR_FD) {
280 copy_from_user((void *) dp, addr,
281 sizeof(struct pt_dspregs));
282 ret = 0;
283 }
284 break;
285 }
286#endif
287 default:
288 ret = ptrace_request(child, request, addr, data);
289 break;
290 }
291out_tsk:
292 put_task_struct(child);
293out:
294 unlock_kernel();
295 return ret;
296}
297
298asmlinkage void do_syscall_trace(void)
299{
300 struct task_struct *tsk = current;
301
302 if (!test_thread_flag(TIF_SYSCALL_TRACE))
303 return;
304 if (!(tsk->ptrace & PT_PTRACED))
305 return;
306 /* the 0x80 provides a way for the tracing parent to distinguish
307 between a syscall stop and SIGTRAP delivery */
308 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
309 ? 0x80 : 0));
310
311 /*
312 * this isn't the same as continuing with a signal, but it will do
313 * for normal use. strace only continues with a signal if the
314 * stopping signal is not SIGTRAP. -brl
315 */
316 if (tsk->exit_code) {
317 send_sig(tsk->exit_code, tsk, 1);
318 tsk->exit_code = 0;
319 }
320}
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
new file mode 100644
index 000000000000..a3c24dcbf01d
--- /dev/null
+++ b/arch/sh/kernel/semaphore.c
@@ -0,0 +1,139 @@
1/*
2 * Just taken from alpha implementation.
3 * This can't work well, perhaps.
4 */
5/*
6 * Generic semaphore code. Buyer beware. Do your own
7 * specific changes in <asm/semaphore-helper.h>
8 */
9
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/wait.h>
13#include <linux/init.h>
14#include <asm/semaphore.h>
15#include <asm/semaphore-helper.h>
16
17spinlock_t semaphore_wake_lock;
18
19/*
20 * Semaphores are implemented using a two-way counter:
21 * The "count" variable is decremented for each process
22 * that tries to sleep, while the "waking" variable is
23 * incremented when the "up()" code goes to wake up waiting
24 * processes.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * waking_non_zero() (from asm/semaphore.h) must execute
32 * atomically.
33 *
34 * When __up() is called, the count was negative before
35 * incrementing it, and we need to wake up somebody.
36 *
37 * This routine adds one to the count of processes that need to
38 * wake up and exit. ALL waiting processes actually wake up but
39 * only the one that gets to the "waking" field first will gate
40 * through and acquire the semaphore. The others will go back
41 * to sleep.
42 *
43 * Note that these functions are only called when there is
44 * contention on the lock, and as such all this is the
45 * "non-critical" part of the whole semaphore business. The
46 * critical part is the inline stuff in <asm/semaphore.h>
47 * where we want to avoid any extra jumps and calls.
48 */
49void __up(struct semaphore *sem)
50{
51 wake_one_more(sem);
52 wake_up(&sem->wait);
53}
54
55/*
56 * Perform the "down" function. Return zero for semaphore acquired,
57 * return negative for signalled out of the function.
58 *
59 * If called from __down, the return is ignored and the wait loop is
60 * not interruptible. This means that a task waiting on a semaphore
61 * using "down()" cannot be killed until someone does an "up()" on
62 * the semaphore.
63 *
64 * If called from __down_interruptible, the return value gets checked
65 * upon return. If the return value is negative then the task continues
66 * with the negative value in the return register (it can be tested by
67 * the caller).
68 *
69 * Either form may be used in conjunction with "up()".
70 *
71 */
72
73#define DOWN_VAR \
74 struct task_struct *tsk = current; \
75 wait_queue_t wait; \
76 init_waitqueue_entry(&wait, tsk);
77
78#define DOWN_HEAD(task_state) \
79 \
80 \
81 tsk->state = (task_state); \
82 add_wait_queue(&sem->wait, &wait); \
83 \
84 /* \
85 * Ok, we're set up. sem->count is known to be less than zero \
86 * so we must wait. \
87 * \
88 * We can let go the lock for purposes of waiting. \
89 * We re-acquire it after awaking so as to protect \
90 * all semaphore operations. \
91 * \
92 * If "up()" is called before we call waking_non_zero() then \
93 * we will catch it right away. If it is called later then \
94 * we will have to go through a wakeup cycle to catch it. \
95 * \
96 * Multiple waiters contend for the semaphore lock to see \
97 * who gets to gate through and who has to wait some more. \
98 */ \
99 for (;;) {
100
101#define DOWN_TAIL(task_state) \
102 tsk->state = (task_state); \
103 } \
104 tsk->state = TASK_RUNNING; \
105 remove_wait_queue(&sem->wait, &wait);
106
107void __sched __down(struct semaphore * sem)
108{
109 DOWN_VAR
110 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
111 if (waking_non_zero(sem))
112 break;
113 schedule();
114 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
115}
116
117int __sched __down_interruptible(struct semaphore * sem)
118{
119 int ret = 0;
120 DOWN_VAR
121 DOWN_HEAD(TASK_INTERRUPTIBLE)
122
123 ret = waking_non_zero_interruptible(sem, tsk);
124 if (ret)
125 {
126 if (ret == 1)
127 /* ret != 0 only if we get interrupted -arca */
128 ret = 0;
129 break;
130 }
131 schedule();
132 DOWN_TAIL(TASK_INTERRUPTIBLE)
133 return ret;
134}
135
136int __down_trylock(struct semaphore * sem)
137{
138 return waking_non_zero_trylock(sem);
139}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
new file mode 100644
index 000000000000..25b9d9ebe858
--- /dev/null
+++ b/arch/sh/kernel/setup.c
@@ -0,0 +1,649 @@
1/* $Id: setup.c,v 1.30 2003/10/13 07:21:19 lethal Exp $
2 *
3 * linux/arch/sh/kernel/setup.c
4 *
5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2002, 2003 Paul Mundt
7 */
8
9/*
10 * This file handles the architecture-dependent parts of initialization
11 */
12
13#include <linux/tty.h>
14#include <linux/ioport.h>
15#include <linux/init.h>
16#include <linux/initrd.h>
17#include <linux/bootmem.h>
18#include <linux/console.h>
19#include <linux/seq_file.h>
20#include <linux/root_dev.h>
21#include <linux/utsname.h>
22#include <linux/cpu.h>
23#include <asm/uaccess.h>
24#include <asm/io.h>
25#include <asm/io_generic.h>
26#include <asm/sections.h>
27#include <asm/irq.h>
28#include <asm/setup.h>
29
30#ifdef CONFIG_SH_KGDB
31#include <asm/kgdb.h>
32static int kgdb_parse_options(char *options);
33#endif
34extern void * __rd_start, * __rd_end;
35/*
36 * Machine setup..
37 */
38
39/*
40 * Initialize loops_per_jiffy as 10000000 (1000MIPS).
41 * This value will be used at the very early stage of serial setup.
42 * The bigger value means no problem.
43 */
44struct sh_cpuinfo boot_cpu_data = { CPU_SH_NONE, 0, 10000000, };
45struct screen_info screen_info;
46
47#if defined(CONFIG_SH_UNKNOWN)
48struct sh_machine_vector sh_mv;
49#endif
50
51/* We need this to satisfy some external references. */
52struct screen_info screen_info = {
53 0, 25, /* orig-x, orig-y */
54 0, /* unused */
55 0, /* orig-video-page */
56 0, /* orig-video-mode */
57 80, /* orig-video-cols */
58 0,0,0, /* ega_ax, ega_bx, ega_cx */
59 25, /* orig-video-lines */
60 0, /* orig-video-isVGA */
61 16 /* orig-video-points */
62};
63
64extern void platform_setup(void);
65extern char *get_system_type(void);
66extern int root_mountflags;
67
68#define MV_NAME_SIZE 32
69
70static struct sh_machine_vector* __init get_mv_byname(const char* name);
71
72/*
73 * This is set up by the setup-routine at boot-time
74 */
75#define PARAM ((unsigned char *)empty_zero_page)
76
77#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
78#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
79#define ORIG_ROOT_DEV (*(unsigned long *) (PARAM+0x008))
80#define LOADER_TYPE (*(unsigned long *) (PARAM+0x00c))
81#define INITRD_START (*(unsigned long *) (PARAM+0x010))
82#define INITRD_SIZE (*(unsigned long *) (PARAM+0x014))
83/* ... */
84#define COMMAND_LINE ((char *) (PARAM+0x100))
85
86#define RAMDISK_IMAGE_START_MASK 0x07FF
87#define RAMDISK_PROMPT_FLAG 0x8000
88#define RAMDISK_LOAD_FLAG 0x4000
89
90static char command_line[COMMAND_LINE_SIZE] = { 0, };
91
92struct resource standard_io_resources[] = {
93 { "dma1", 0x00, 0x1f },
94 { "pic1", 0x20, 0x3f },
95 { "timer", 0x40, 0x5f },
96 { "keyboard", 0x60, 0x6f },
97 { "dma page reg", 0x80, 0x8f },
98 { "pic2", 0xa0, 0xbf },
99 { "dma2", 0xc0, 0xdf },
100 { "fpu", 0xf0, 0xff }
101};
102
103#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
104
105/* System RAM - interrupted by the 640kB-1M hole */
106#define code_resource (ram_resources[3])
107#define data_resource (ram_resources[4])
108static struct resource ram_resources[] = {
109 { "System RAM", 0x000000, 0x09ffff, IORESOURCE_BUSY },
110 { "System RAM", 0x100000, 0x100000, IORESOURCE_BUSY },
111 { "Video RAM area", 0x0a0000, 0x0bffff },
112 { "Kernel code", 0x100000, 0 },
113 { "Kernel data", 0, 0 }
114};
115
116unsigned long memory_start, memory_end;
117
118static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE],
119 struct sh_machine_vector** mvp,
120 unsigned long *mv_io_base,
121 int *mv_mmio_enable)
122{
123 char c = ' ', *to = command_line, *from = COMMAND_LINE;
124 int len = 0;
125
126 /* Save unparsed command line copy for /proc/cmdline */
127 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
128 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
129
130 memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
131 memory_end = memory_start + __MEMORY_SIZE;
132
133 for (;;) {
134 /*
135 * "mem=XXX[kKmM]" defines a size of memory.
136 */
137 if (c == ' ' && !memcmp(from, "mem=", 4)) {
138 if (to != command_line)
139 to--;
140 {
141 unsigned long mem_size;
142
143 mem_size = memparse(from+4, &from);
144 memory_end = memory_start + mem_size;
145 }
146 }
147 if (c == ' ' && !memcmp(from, "sh_mv=", 6)) {
148 char* mv_end;
149 char* mv_comma;
150 int mv_len;
151 if (to != command_line)
152 to--;
153 from += 6;
154 mv_end = strchr(from, ' ');
155 if (mv_end == NULL)
156 mv_end = from + strlen(from);
157
158 mv_comma = strchr(from, ',');
159 if ((mv_comma != NULL) && (mv_comma < mv_end)) {
160 int ints[3];
161 get_options(mv_comma+1, ARRAY_SIZE(ints), ints);
162 *mv_io_base = ints[1];
163 *mv_mmio_enable = ints[2];
164 mv_len = mv_comma - from;
165 } else {
166 mv_len = mv_end - from;
167 }
168 if (mv_len > (MV_NAME_SIZE-1))
169 mv_len = MV_NAME_SIZE-1;
170 memcpy(mv_name, from, mv_len);
171 mv_name[mv_len] = '\0';
172 from = mv_end;
173
174 *mvp = get_mv_byname(mv_name);
175 }
176 c = *(from++);
177 if (!c)
178 break;
179 if (COMMAND_LINE_SIZE <= ++len)
180 break;
181 *(to++) = c;
182 }
183 *to = '\0';
184 *cmdline_p = command_line;
185}
186
187static int __init sh_mv_setup(char **cmdline_p)
188{
189#if defined(CONFIG_SH_UNKNOWN)
190 extern struct sh_machine_vector mv_unknown;
191#endif
192 struct sh_machine_vector *mv = NULL;
193 char mv_name[MV_NAME_SIZE] = "";
194 unsigned long mv_io_base = 0;
195 int mv_mmio_enable = 0;
196
197 parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base, &mv_mmio_enable);
198
199#ifdef CONFIG_SH_GENERIC
200 if (mv == NULL) {
201 mv = &mv_unknown;
202 if (*mv_name != '\0') {
203 printk("Warning: Unsupported machine %s, using unknown\n",
204 mv_name);
205 }
206 }
207 sh_mv = *mv;
208#endif
209#ifdef CONFIG_SH_UNKNOWN
210 sh_mv = mv_unknown;
211#endif
212
213 /*
214 * Manually walk the vec, fill in anything that the board hasn't yet
215 * by hand, wrapping to the generic implementation.
216 */
217#define mv_set(elem) do { \
218 if (!sh_mv.mv_##elem) \
219 sh_mv.mv_##elem = generic_##elem; \
220} while (0)
221
222 mv_set(inb); mv_set(inw); mv_set(inl);
223 mv_set(outb); mv_set(outw); mv_set(outl);
224
225 mv_set(inb_p); mv_set(inw_p); mv_set(inl_p);
226 mv_set(outb_p); mv_set(outw_p); mv_set(outl_p);
227
228 mv_set(insb); mv_set(insw); mv_set(insl);
229 mv_set(outsb); mv_set(outsw); mv_set(outsl);
230
231 mv_set(readb); mv_set(readw); mv_set(readl);
232 mv_set(writeb); mv_set(writew); mv_set(writel);
233
234 mv_set(ioremap);
235 mv_set(iounmap);
236
237 mv_set(isa_port2addr);
238 mv_set(irq_demux);
239
240#ifdef CONFIG_SH_UNKNOWN
241 __set_io_port_base(mv_io_base);
242#endif
243
244 return 0;
245}
246
247void __init setup_arch(char **cmdline_p)
248{
249 unsigned long bootmap_size;
250 unsigned long start_pfn, max_pfn, max_low_pfn;
251
252#ifdef CONFIG_EARLY_PRINTK
253 extern void enable_early_printk(void);
254
255 enable_early_printk();
256#endif
257#ifdef CONFIG_CMDLINE_BOOL
258 strcpy(COMMAND_LINE, CONFIG_CMDLINE);
259#endif
260
261 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
262
263#ifdef CONFIG_BLK_DEV_RAM
264 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
265 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
266 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
267#endif
268
269 if (!MOUNT_ROOT_RDONLY)
270 root_mountflags &= ~MS_RDONLY;
271 init_mm.start_code = (unsigned long) _text;
272 init_mm.end_code = (unsigned long) _etext;
273 init_mm.end_data = (unsigned long) _edata;
274 init_mm.brk = (unsigned long) _end;
275
276 code_resource.start = virt_to_bus(_text);
277 code_resource.end = virt_to_bus(_etext)-1;
278 data_resource.start = virt_to_bus(_etext);
279 data_resource.end = virt_to_bus(_edata)-1;
280
281 sh_mv_setup(cmdline_p);
282
283#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
284#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
285#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
286
287#ifdef CONFIG_DISCONTIGMEM
288 NODE_DATA(0)->bdata = &discontig_node_bdata[0];
289 NODE_DATA(1)->bdata = &discontig_node_bdata[1];
290
291 bootmap_size = init_bootmem_node(NODE_DATA(1),
292 PFN_UP(__MEMORY_START_2ND),
293 PFN_UP(__MEMORY_START_2ND),
294 PFN_DOWN(__MEMORY_START_2ND+__MEMORY_SIZE_2ND));
295 free_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, __MEMORY_SIZE_2ND);
296 reserve_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, bootmap_size);
297#endif
298
299 /*
300 * Find the highest page frame number we have available
301 */
302 max_pfn = PFN_DOWN(__pa(memory_end));
303
304 /*
305 * Determine low and high memory ranges:
306 */
307 max_low_pfn = max_pfn;
308
309 /*
310 * Partially used pages are not usable - thus
311 * we are rounding upwards:
312 */
313 start_pfn = PFN_UP(__pa(_end));
314
315 /*
316 * Find a proper area for the bootmem bitmap. After this
317 * bootstrap step all allocations (until the page allocator
318 * is intact) must be done via bootmem_alloc().
319 */
320 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
321 __MEMORY_START>>PAGE_SHIFT,
322 max_low_pfn);
323 /*
324 * Register fully available low RAM pages with the bootmem allocator.
325 */
326 {
327 unsigned long curr_pfn, last_pfn, pages;
328
329 /*
330 * We are rounding up the start address of usable memory:
331 */
332 curr_pfn = PFN_UP(__MEMORY_START);
333 /*
334 * ... and at the end of the usable range downwards:
335 */
336 last_pfn = PFN_DOWN(__pa(memory_end));
337
338 if (last_pfn > max_low_pfn)
339 last_pfn = max_low_pfn;
340
341 pages = last_pfn - curr_pfn;
342 free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn),
343 PFN_PHYS(pages));
344 }
345
346 /*
347 * Reserve the kernel text and
348 * Reserve the bootmem bitmap. We do this in two steps (first step
349 * was init_bootmem()), because this catches the (definitely buggy)
350 * case of us accidentally initializing the bootmem allocator with
351 * an invalid RAM area.
352 */
353 reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE,
354 (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);
355
356 /*
357 * reserve physical page 0 - it's a special BIOS page on many boxes,
358 * enabling clean reboots, SMP operation, laptop functions.
359 */
360 reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE);
361
362#ifdef CONFIG_BLK_DEV_INITRD
363 ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
364 if (&__rd_start != &__rd_end) {
365 LOADER_TYPE = 1;
366 INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START;
367 INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start;
368 }
369
370 if (LOADER_TYPE && INITRD_START) {
371 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
372 reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE);
373 initrd_start =
374 INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
375 initrd_end = initrd_start + INITRD_SIZE;
376 } else {
377 printk("initrd extends beyond end of memory "
378 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
379 INITRD_START + INITRD_SIZE,
380 max_low_pfn << PAGE_SHIFT);
381 initrd_start = 0;
382 }
383 }
384#endif
385
386#ifdef CONFIG_DUMMY_CONSOLE
387 conswitchp = &dummy_con;
388#endif
389
390 /* Perform the machine specific initialisation */
391 platform_setup();
392
393 paging_init();
394}
395
396struct sh_machine_vector* __init get_mv_byname(const char* name)
397{
398 extern int strcasecmp(const char *, const char *);
399 extern long __machvec_start, __machvec_end;
400 struct sh_machine_vector *all_vecs =
401 (struct sh_machine_vector *)&__machvec_start;
402
403 int i, n = ((unsigned long)&__machvec_end
404 - (unsigned long)&__machvec_start)/
405 sizeof(struct sh_machine_vector);
406
407 for (i = 0; i < n; ++i) {
408 struct sh_machine_vector *mv = &all_vecs[i];
409 if (mv == NULL)
410 continue;
411 if (strcasecmp(name, get_system_type()) == 0) {
412 return mv;
413 }
414 }
415 return NULL;
416}
417
418static struct cpu cpu[NR_CPUS];
419
420static int __init topology_init(void)
421{
422 int cpu_id;
423
424 for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++)
425 if (cpu_possible(cpu_id))
426 register_cpu(&cpu[cpu_id], cpu_id, NULL);
427
428 return 0;
429}
430
431subsys_initcall(topology_init);
432
433static const char *cpu_name[] = {
434 [CPU_SH7604] = "SH7604",
435 [CPU_SH7705] = "SH7705",
436 [CPU_SH7708] = "SH7708",
437 [CPU_SH7729] = "SH7729",
438 [CPU_SH7300] = "SH7300",
439 [CPU_SH7750] = "SH7750",
440 [CPU_SH7750S] = "SH7750S",
441 [CPU_SH7750R] = "SH7750R",
442 [CPU_SH7751] = "SH7751",
443 [CPU_SH7751R] = "SH7751R",
444 [CPU_SH7760] = "SH7760",
445 [CPU_SH73180] = "SH73180",
446 [CPU_ST40RA] = "ST40RA",
447 [CPU_ST40GX1] = "ST40GX1",
448 [CPU_SH4_202] = "SH4-202",
449 [CPU_SH4_501] = "SH4-501",
450 [CPU_SH_NONE] = "Unknown"
451};
452
453const char *get_cpu_subtype(void)
454{
455 return cpu_name[boot_cpu_data.type];
456}
457
458#ifdef CONFIG_PROC_FS
459static const char *cpu_flags[] = {
460 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
461};
462
463static void show_cpuflags(struct seq_file *m)
464{
465 unsigned long i;
466
467 seq_printf(m, "cpu flags\t:");
468
469 if (!cpu_data->flags) {
470 seq_printf(m, " %s\n", cpu_flags[0]);
471 return;
472 }
473
474 for (i = 0; i < cpu_data->flags; i++)
475 if ((cpu_data->flags & (1 << i)))
476 seq_printf(m, " %s", cpu_flags[i+1]);
477
478 seq_printf(m, "\n");
479}
480
481static void show_cacheinfo(struct seq_file *m, const char *type, struct cache_info info)
482{
483 unsigned int cache_size;
484
485 cache_size = info.ways * info.sets * info.linesz;
486
487 seq_printf(m, "%s size\t: %dKiB\n", type, cache_size >> 10);
488}
489
490/*
491 * Get CPU information for use by the procfs.
492 */
493static int show_cpuinfo(struct seq_file *m, void *v)
494{
495 unsigned int cpu = smp_processor_id();
496
497 if (!cpu && cpu_online(cpu))
498 seq_printf(m, "machine\t\t: %s\n", get_system_type());
499
500 seq_printf(m, "processor\t: %d\n", cpu);
501 seq_printf(m, "cpu family\t: %s\n", system_utsname.machine);
502 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype());
503
504 show_cpuflags(m);
505
506 seq_printf(m, "cache type\t: ");
507
508 /*
509 * Check for what type of cache we have, we support both the
510 * unified cache on the SH-2 and SH-3, as well as the harvard
511 * style cache on the SH-4.
512 */
513 if (test_bit(SH_CACHE_COMBINED, &(boot_cpu_data.icache.flags))) {
514 seq_printf(m, "unified\n");
515 show_cacheinfo(m, "cache", boot_cpu_data.icache);
516 } else {
517 seq_printf(m, "split (harvard)\n");
518 show_cacheinfo(m, "icache", boot_cpu_data.icache);
519 show_cacheinfo(m, "dcache", boot_cpu_data.dcache);
520 }
521
522 seq_printf(m, "bogomips\t: %lu.%02lu\n",
523 boot_cpu_data.loops_per_jiffy/(500000/HZ),
524 (boot_cpu_data.loops_per_jiffy/(5000/HZ)) % 100);
525
526#define PRINT_CLOCK(name, value) \
527 seq_printf(m, name " clock\t: %d.%02dMHz\n", \
528 ((value) / 1000000), ((value) % 1000000)/10000)
529
530 PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock);
531 PRINT_CLOCK("bus", boot_cpu_data.bus_clock);
532#ifdef CONFIG_CPU_SUBTYPE_ST40STB1
533 PRINT_CLOCK("memory", boot_cpu_data.memory_clock);
534#endif
535 PRINT_CLOCK("module", boot_cpu_data.module_clock);
536
537 return 0;
538}
539
540
541static void *c_start(struct seq_file *m, loff_t *pos)
542{
543 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
544}
545static void *c_next(struct seq_file *m, void *v, loff_t *pos)
546{
547 ++*pos;
548 return c_start(m, pos);
549}
550static void c_stop(struct seq_file *m, void *v)
551{
552}
553struct seq_operations cpuinfo_op = {
554 .start = c_start,
555 .next = c_next,
556 .stop = c_stop,
557 .show = show_cpuinfo,
558};
559#endif /* CONFIG_PROC_FS */
560
561#ifdef CONFIG_SH_KGDB
562/*
563 * Parse command-line kgdb options. By default KGDB is enabled,
564 * entered on error (or other action) using default serial info.
565 * The command-line option can include a serial port specification
566 * and an action to override default or configured behavior.
567 */
568struct kgdb_sermap kgdb_sci_sermap =
569{ "ttySC", 5, kgdb_sci_setup, NULL };
570
571struct kgdb_sermap *kgdb_serlist = &kgdb_sci_sermap;
572struct kgdb_sermap *kgdb_porttype = &kgdb_sci_sermap;
573
574void kgdb_register_sermap(struct kgdb_sermap *map)
575{
576 struct kgdb_sermap *last;
577
578 for (last = kgdb_serlist; last->next; last = last->next)
579 ;
580 last->next = map;
581 if (!map->namelen) {
582 map->namelen = strlen(map->name);
583 }
584}
585
586static int __init kgdb_parse_options(char *options)
587{
588 char c;
589 int baud;
590
591 /* Check for port spec (or use default) */
592
593 /* Determine port type and instance */
594 if (!memcmp(options, "tty", 3)) {
595 struct kgdb_sermap *map = kgdb_serlist;
596
597 while (map && memcmp(options, map->name, map->namelen))
598 map = map->next;
599
600 if (!map) {
601 KGDB_PRINTK("unknown port spec in %s\n", options);
602 return -1;
603 }
604
605 kgdb_porttype = map;
606 kgdb_serial_setup = map->setup_fn;
607 kgdb_portnum = options[map->namelen] - '0';
608 options += map->namelen + 1;
609
610 options = (*options == ',') ? options+1 : options;
611
612 /* Read optional parameters (baud/parity/bits) */
613 baud = simple_strtoul(options, &options, 10);
614 if (baud != 0) {
615 kgdb_baud = baud;
616
617 c = toupper(*options);
618 if (c == 'E' || c == 'O' || c == 'N') {
619 kgdb_parity = c;
620 options++;
621 }
622
623 c = *options;
624 if (c == '7' || c == '8') {
625 kgdb_bits = c;
626 options++;
627 }
628 options = (*options == ',') ? options+1 : options;
629 }
630 }
631
632 /* Check for action specification */
633 if (!memcmp(options, "halt", 4)) {
634 kgdb_halt = 1;
635 options += 4;
636 } else if (!memcmp(options, "disabled", 8)) {
637 kgdb_enabled = 0;
638 options += 8;
639 }
640
641 if (*options) {
642 KGDB_PRINTK("ignored unknown options: %s\n", options);
643 return 0;
644 }
645 return 1;
646}
647__setup("kgdb=", kgdb_parse_options);
648#endif /* CONFIG_SH_KGDB */
649
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
new file mode 100644
index 000000000000..5b53e10bb9cd
--- /dev/null
+++ b/arch/sh/kernel/sh_bios.c
@@ -0,0 +1,75 @@
1/*
2 * linux/arch/sh/kernel/sh_bios.c
3 * C interface for trapping into the standard LinuxSH BIOS.
4 *
5 * Copyright (C) 2000 Greg Banks, Mitch Davis
6 *
7 */
8
9#include <asm/sh_bios.h>
10
11#define BIOS_CALL_CONSOLE_WRITE 0
12#define BIOS_CALL_READ_BLOCK 1
13#define BIOS_CALL_ETH_NODE_ADDR 10
14#define BIOS_CALL_SHUTDOWN 11
15#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */
16#define BIOS_CALL_GDB_GET_MODE_PTR 0xfe
17#define BIOS_CALL_GDB_DETACH 0xff
18
19static __inline__ long sh_bios_call(long func, long arg0, long arg1, long arg2, long arg3)
20{
21 register long r0 __asm__("r0") = func;
22 register long r4 __asm__("r4") = arg0;
23 register long r5 __asm__("r5") = arg1;
24 register long r6 __asm__("r6") = arg2;
25 register long r7 __asm__("r7") = arg3;
26 __asm__ __volatile__("trapa #0x3f"
27 : "=z" (r0)
28 : "0" (r0), "r" (r4), "r" (r5), "r" (r6), "r" (r7)
29 : "memory");
30 return r0;
31}
32
33
34void sh_bios_console_write(const char *buf, unsigned int len)
35{
36 sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
37}
38
39
40void sh_bios_char_out(char ch)
41{
42 sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0);
43}
44
45
46int sh_bios_in_gdb_mode(void)
47{
48 static char queried = 0;
49 static char *gdb_mode_p = 0;
50
51 if (!queried)
52 {
53 /* Query the gdb stub for address of its gdb mode variable */
54 long r = sh_bios_call(BIOS_CALL_GDB_GET_MODE_PTR, 0, 0, 0, 0);
55 if (r != ~0) /* BIOS returns -1 for unknown function */
56 gdb_mode_p = (char *)r;
57 queried = 1;
58 }
59 return (gdb_mode_p != 0 ? *gdb_mode_p : 0);
60}
61
62void sh_bios_gdb_detach(void)
63{
64 sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
65}
66
67void sh_bios_get_node_addr (unsigned char *node_addr)
68{
69 sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0);
70}
71
72void sh_bios_shutdown(unsigned int how)
73{
74 sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
75}
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
new file mode 100644
index 000000000000..6954fd62470a
--- /dev/null
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -0,0 +1,126 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/smp.h>
4#include <linux/user.h>
5#include <linux/elfcore.h>
6#include <linux/sched.h>
7#include <linux/in6.h>
8#include <linux/interrupt.h>
9#include <linux/smp_lock.h>
10#include <linux/vmalloc.h>
11#include <linux/pci.h>
12#include <linux/irq.h>
13
14#include <asm/semaphore.h>
15#include <asm/processor.h>
16#include <asm/uaccess.h>
17#include <asm/checksum.h>
18#include <asm/io.h>
19#include <asm/delay.h>
20#include <asm/tlbflush.h>
21#include <asm/cacheflush.h>
22#include <asm/checksum.h>
23
24extern void dump_thread(struct pt_regs *, struct user *);
25extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
26extern struct hw_interrupt_type no_irq_type;
27
28EXPORT_SYMBOL(sh_mv);
29
30/* platform dependent support */
31EXPORT_SYMBOL(dump_thread);
32EXPORT_SYMBOL(dump_fpu);
33EXPORT_SYMBOL(iounmap);
34EXPORT_SYMBOL(enable_irq);
35EXPORT_SYMBOL(disable_irq);
36EXPORT_SYMBOL(probe_irq_mask);
37EXPORT_SYMBOL(kernel_thread);
38EXPORT_SYMBOL(disable_irq_nosync);
39EXPORT_SYMBOL(irq_desc);
40EXPORT_SYMBOL(no_irq_type);
41
42EXPORT_SYMBOL(strpbrk);
43EXPORT_SYMBOL(strstr);
44EXPORT_SYMBOL(strlen);
45EXPORT_SYMBOL(strnlen);
46EXPORT_SYMBOL(strchr);
47EXPORT_SYMBOL(strcat);
48EXPORT_SYMBOL(strncat);
49
50/* PCI exports */
51#ifdef CONFIG_PCI
52EXPORT_SYMBOL(pci_alloc_consistent);
53EXPORT_SYMBOL(pci_free_consistent);
54#endif
55
56/* mem exports */
57EXPORT_SYMBOL(memchr);
58EXPORT_SYMBOL(memcpy);
59EXPORT_SYMBOL(memcpy_fromio);
60EXPORT_SYMBOL(memcpy_toio);
61EXPORT_SYMBOL(memset);
62EXPORT_SYMBOL(memset_io);
63EXPORT_SYMBOL(memmove);
64EXPORT_SYMBOL(memcmp);
65EXPORT_SYMBOL(memscan);
66EXPORT_SYMBOL(__copy_user);
67EXPORT_SYMBOL(boot_cpu_data);
68
69#ifdef CONFIG_MMU
70EXPORT_SYMBOL(get_vm_area);
71#endif
72
73/* semaphore exports */
74EXPORT_SYMBOL(__up);
75EXPORT_SYMBOL(__down);
76EXPORT_SYMBOL(__down_interruptible);
77
78EXPORT_SYMBOL(__udelay);
79EXPORT_SYMBOL(__ndelay);
80EXPORT_SYMBOL(__const_udelay);
81
82EXPORT_SYMBOL(__div64_32);
83
84#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
85
86/* These symbols are generated by the compiler itself */
87DECLARE_EXPORT(__udivsi3);
88DECLARE_EXPORT(__udivdi3);
89DECLARE_EXPORT(__sdivsi3);
90DECLARE_EXPORT(__ashrdi3);
91DECLARE_EXPORT(__ashldi3);
92DECLARE_EXPORT(__lshrdi3);
93DECLARE_EXPORT(__movstr);
94
95EXPORT_SYMBOL(strcpy);
96
97#ifdef CONFIG_CPU_SH4
98DECLARE_EXPORT(__movstr_i4_even);
99DECLARE_EXPORT(__movstr_i4_odd);
100DECLARE_EXPORT(__movstrSI12_i4);
101
102/* needed by some modules */
103EXPORT_SYMBOL(flush_cache_all);
104EXPORT_SYMBOL(flush_cache_range);
105EXPORT_SYMBOL(flush_dcache_page);
106EXPORT_SYMBOL(__flush_purge_region);
107#endif
108
109#if defined(CONFIG_SH7705_CACHE_32KB)
110EXPORT_SYMBOL(flush_cache_all);
111EXPORT_SYMBOL(flush_cache_range);
112EXPORT_SYMBOL(flush_dcache_page);
113EXPORT_SYMBOL(__flush_purge_region);
114#endif
115
116EXPORT_SYMBOL(flush_tlb_page);
117EXPORT_SYMBOL(__down_trylock);
118
119#ifdef CONFIG_SMP
120EXPORT_SYMBOL(synchronize_irq);
121#endif
122
123EXPORT_SYMBOL(csum_partial);
124EXPORT_SYMBOL(csum_ipv6_magic);
125EXPORT_SYMBOL(consistent_sync);
126EXPORT_SYMBOL(clear_page);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
new file mode 100644
index 000000000000..06f1b47eded9
--- /dev/null
+++ b/arch/sh/kernel/signal.c
@@ -0,0 +1,607 @@
1/*
2 * linux/arch/sh/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/kernel.h>
17#include <linux/signal.h>
18#include <linux/errno.h>
19#include <linux/wait.h>
20#include <linux/ptrace.h>
21#include <linux/unistd.h>
22#include <linux/stddef.h>
23#include <linux/tty.h>
24#include <linux/personality.h>
25#include <linux/binfmts.h>
26
27#include <asm/ucontext.h>
28#include <asm/uaccess.h>
29#include <asm/pgtable.h>
30#include <asm/cacheflush.h>
31
32#define DEBUG_SIG 0
33
34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35
36asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
37
38/*
39 * Atomically swap in the new signal mask, and wait for a signal.
40 */
41asmlinkage int
42sys_sigsuspend(old_sigset_t mask,
43 unsigned long r5, unsigned long r6, unsigned long r7,
44 struct pt_regs regs)
45{
46 sigset_t saveset;
47
48 mask &= _BLOCKABLE;
49 spin_lock_irq(&current->sighand->siglock);
50 saveset = current->blocked;
51 siginitset(&current->blocked, mask);
52 recalc_sigpending();
53 spin_unlock_irq(&current->sighand->siglock);
54
55 regs.regs[0] = -EINTR;
56 while (1) {
57 current->state = TASK_INTERRUPTIBLE;
58 schedule();
59 if (do_signal(&regs, &saveset))
60 return -EINTR;
61 }
62}
63
64asmlinkage int
65sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
66 unsigned long r6, unsigned long r7,
67 struct pt_regs regs)
68{
69 sigset_t saveset, newset;
70
71 /* XXX: Don't preclude handling different sized sigset_t's. */
72 if (sigsetsize != sizeof(sigset_t))
73 return -EINVAL;
74
75 if (copy_from_user(&newset, unewset, sizeof(newset)))
76 return -EFAULT;
77 sigdelsetmask(&newset, ~_BLOCKABLE);
78 spin_lock_irq(&current->sighand->siglock);
79 saveset = current->blocked;
80 current->blocked = newset;
81 recalc_sigpending();
82 spin_unlock_irq(&current->sighand->siglock);
83
84 regs.regs[0] = -EINTR;
85 while (1) {
86 current->state = TASK_INTERRUPTIBLE;
87 schedule();
88 if (do_signal(&regs, &saveset))
89 return -EINTR;
90 }
91}
92
93asmlinkage int
94sys_sigaction(int sig, const struct old_sigaction __user *act,
95 struct old_sigaction __user *oact)
96{
97 struct k_sigaction new_ka, old_ka;
98 int ret;
99
100 if (act) {
101 old_sigset_t mask;
102 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
103 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
104 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
105 return -EFAULT;
106 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
107 __get_user(mask, &act->sa_mask);
108 siginitset(&new_ka.sa.sa_mask, mask);
109 }
110
111 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
112
113 if (!ret && oact) {
114 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
115 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
116 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
117 return -EFAULT;
118 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
119 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
120 }
121
122 return ret;
123}
124
125asmlinkage int
126sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
127 unsigned long r6, unsigned long r7,
128 struct pt_regs regs)
129{
130 return do_sigaltstack(uss, uoss, regs.regs[15]);
131}
132
133
134/*
135 * Do a signal return; undo the signal stack.
136 */
137
138#define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
139#define TRAP16 0xc310 /* Syscall w/no args (NR in R3) */
140#define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */
141
142struct sigframe
143{
144 struct sigcontext sc;
145 unsigned long extramask[_NSIG_WORDS-1];
146 u16 retcode[8];
147};
148
149struct rt_sigframe
150{
151 struct siginfo info;
152 struct ucontext uc;
153 u16 retcode[8];
154};
155
156#ifdef CONFIG_SH_FPU
157static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
158{
159 struct task_struct *tsk = current;
160
161 if (!(cpu_data->flags & CPU_HAS_FPU))
162 return 0;
163
164 set_used_math();
165 return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
166 sizeof(long)*(16*2+2));
167}
168
169static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
170 struct pt_regs *regs)
171{
172 struct task_struct *tsk = current;
173
174 if (!(cpu_data->flags & CPU_HAS_FPU))
175 return 0;
176
177 if (!used_math()) {
178 __put_user(0, &sc->sc_ownedfp);
179 return 0;
180 }
181
182 __put_user(1, &sc->sc_ownedfp);
183
184 /* This will cause a "finit" to be triggered by the next
185 attempted FPU operation by the 'current' process.
186 */
187 clear_used_math();
188
189 unlazy_fpu(tsk, regs);
190 return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
191 sizeof(long)*(16*2+2));
192}
193#endif /* CONFIG_SH_FPU */
194
195static int
196restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
197{
198 unsigned int err = 0;
199
200#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
201 COPY(regs[1]);
202 COPY(regs[2]); COPY(regs[3]);
203 COPY(regs[4]); COPY(regs[5]);
204 COPY(regs[6]); COPY(regs[7]);
205 COPY(regs[8]); COPY(regs[9]);
206 COPY(regs[10]); COPY(regs[11]);
207 COPY(regs[12]); COPY(regs[13]);
208 COPY(regs[14]); COPY(regs[15]);
209 COPY(gbr); COPY(mach);
210 COPY(macl); COPY(pr);
211 COPY(sr); COPY(pc);
212#undef COPY
213
214#ifdef CONFIG_SH_FPU
215 if (cpu_data->flags & CPU_HAS_FPU) {
216 int owned_fp;
217 struct task_struct *tsk = current;
218
219 regs->sr |= SR_FD; /* Release FPU */
220 clear_fpu(tsk, regs);
221 clear_used_math();
222 __get_user (owned_fp, &sc->sc_ownedfp);
223 if (owned_fp)
224 err |= restore_sigcontext_fpu(sc);
225 }
226#endif
227
228 regs->tra = -1; /* disable syscall checks */
229 err |= __get_user(*r0_p, &sc->sc_regs[0]);
230 return err;
231}
232
233asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
234 unsigned long r6, unsigned long r7,
235 struct pt_regs regs)
236{
237 struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15];
238 sigset_t set;
239 int r0;
240
241 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
242 goto badframe;
243
244 if (__get_user(set.sig[0], &frame->sc.oldmask)
245 || (_NSIG_WORDS > 1
246 && __copy_from_user(&set.sig[1], &frame->extramask,
247 sizeof(frame->extramask))))
248 goto badframe;
249
250 sigdelsetmask(&set, ~_BLOCKABLE);
251
252 spin_lock_irq(&current->sighand->siglock);
253 current->blocked = set;
254 recalc_sigpending();
255 spin_unlock_irq(&current->sighand->siglock);
256
257 if (restore_sigcontext(&regs, &frame->sc, &r0))
258 goto badframe;
259 return r0;
260
261badframe:
262 force_sig(SIGSEGV, current);
263 return 0;
264}
265
266asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
267 unsigned long r6, unsigned long r7,
268 struct pt_regs regs)
269{
270 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15];
271 sigset_t set;
272 stack_t st;
273 int r0;
274
275 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
276 goto badframe;
277
278 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
279 goto badframe;
280
281 sigdelsetmask(&set, ~_BLOCKABLE);
282 spin_lock_irq(&current->sighand->siglock);
283 current->blocked = set;
284 recalc_sigpending();
285 spin_unlock_irq(&current->sighand->siglock);
286
287 if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
288 goto badframe;
289
290 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
291 goto badframe;
292 /* It is more difficult to avoid calling this function than to
293 call it and ignore errors. */
294 do_sigaltstack(&st, NULL, regs.regs[15]);
295
296 return r0;
297
298badframe:
299 force_sig(SIGSEGV, current);
300 return 0;
301}
302
303/*
304 * Set up a signal frame.
305 */
306
307static int
308setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
309 unsigned long mask)
310{
311 int err = 0;
312
313#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
314 COPY(regs[0]); COPY(regs[1]);
315 COPY(regs[2]); COPY(regs[3]);
316 COPY(regs[4]); COPY(regs[5]);
317 COPY(regs[6]); COPY(regs[7]);
318 COPY(regs[8]); COPY(regs[9]);
319 COPY(regs[10]); COPY(regs[11]);
320 COPY(regs[12]); COPY(regs[13]);
321 COPY(regs[14]); COPY(regs[15]);
322 COPY(gbr); COPY(mach);
323 COPY(macl); COPY(pr);
324 COPY(sr); COPY(pc);
325#undef COPY
326
327#ifdef CONFIG_SH_FPU
328 err |= save_sigcontext_fpu(sc, regs);
329#endif
330
331 /* non-iBCS2 extensions.. */
332 err |= __put_user(mask, &sc->oldmask);
333
334 return err;
335}
336
337/*
338 * Determine which stack to use..
339 */
340static inline void __user *
341get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
342{
343 if (ka->sa.sa_flags & SA_ONSTACK) {
344 if (sas_ss_flags(sp) == 0)
345 sp = current->sas_ss_sp + current->sas_ss_size;
346 }
347
348 return (void __user *)((sp - frame_size) & -8ul);
349}
350
351static void setup_frame(int sig, struct k_sigaction *ka,
352 sigset_t *set, struct pt_regs *regs)
353{
354 struct sigframe __user *frame;
355 int err = 0;
356 int signal;
357
358 frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
359
360 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
361 goto give_sigsegv;
362
363 signal = current_thread_info()->exec_domain
364 && current_thread_info()->exec_domain->signal_invmap
365 && sig < 32
366 ? current_thread_info()->exec_domain->signal_invmap[sig]
367 : sig;
368
369 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
370
371 if (_NSIG_WORDS > 1) {
372 err |= __copy_to_user(frame->extramask, &set->sig[1],
373 sizeof(frame->extramask));
374 }
375
376 /* Set up to return from userspace. If provided, use a stub
377 already in userspace. */
378 if (ka->sa.sa_flags & SA_RESTORER) {
379 regs->pr = (unsigned long) ka->sa.sa_restorer;
380 } else {
381 /* Generate return code (system call to sigreturn) */
382 err |= __put_user(MOVW(7), &frame->retcode[0]);
383 err |= __put_user(TRAP16, &frame->retcode[1]);
384 err |= __put_user(OR_R0_R0, &frame->retcode[2]);
385 err |= __put_user(OR_R0_R0, &frame->retcode[3]);
386 err |= __put_user(OR_R0_R0, &frame->retcode[4]);
387 err |= __put_user(OR_R0_R0, &frame->retcode[5]);
388 err |= __put_user(OR_R0_R0, &frame->retcode[6]);
389 err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
390 regs->pr = (unsigned long) frame->retcode;
391 }
392
393 if (err)
394 goto give_sigsegv;
395
396 /* Set up registers for signal handler */
397 regs->regs[15] = (unsigned long) frame;
398 regs->regs[4] = signal; /* Arg for signal handler */
399 regs->regs[5] = 0;
400 regs->regs[6] = (unsigned long) &frame->sc;
401 regs->pc = (unsigned long) ka->sa.sa_handler;
402
403 set_fs(USER_DS);
404
405#if DEBUG_SIG
406 printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
407 current->comm, current->pid, frame, regs->pc, regs->pr);
408#endif
409
410 flush_cache_sigtramp(regs->pr);
411 if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
412 flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
413 return;
414
415give_sigsegv:
416 force_sigsegv(sig, current);
417}
418
419static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
420 sigset_t *set, struct pt_regs *regs)
421{
422 struct rt_sigframe __user *frame;
423 int err = 0;
424 int signal;
425
426 frame = get_sigframe(ka, regs->regs[15], sizeof(*frame));
427
428 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
429 goto give_sigsegv;
430
431 signal = current_thread_info()->exec_domain
432 && current_thread_info()->exec_domain->signal_invmap
433 && sig < 32
434 ? current_thread_info()->exec_domain->signal_invmap[sig]
435 : sig;
436
437 err |= copy_siginfo_to_user(&frame->info, info);
438
439 /* Create the ucontext. */
440 err |= __put_user(0, &frame->uc.uc_flags);
441 err |= __put_user(0, &frame->uc.uc_link);
442 err |= __put_user((void *)current->sas_ss_sp,
443 &frame->uc.uc_stack.ss_sp);
444 err |= __put_user(sas_ss_flags(regs->regs[15]),
445 &frame->uc.uc_stack.ss_flags);
446 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
447 err |= setup_sigcontext(&frame->uc.uc_mcontext,
448 regs, set->sig[0]);
449 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
450
451 /* Set up to return from userspace. If provided, use a stub
452 already in userspace. */
453 if (ka->sa.sa_flags & SA_RESTORER) {
454 regs->pr = (unsigned long) ka->sa.sa_restorer;
455 } else {
456 /* Generate return code (system call to rt_sigreturn) */
457 err |= __put_user(MOVW(7), &frame->retcode[0]);
458 err |= __put_user(TRAP16, &frame->retcode[1]);
459 err |= __put_user(OR_R0_R0, &frame->retcode[2]);
460 err |= __put_user(OR_R0_R0, &frame->retcode[3]);
461 err |= __put_user(OR_R0_R0, &frame->retcode[4]);
462 err |= __put_user(OR_R0_R0, &frame->retcode[5]);
463 err |= __put_user(OR_R0_R0, &frame->retcode[6]);
464 err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
465 regs->pr = (unsigned long) frame->retcode;
466 }
467
468 if (err)
469 goto give_sigsegv;
470
471 /* Set up registers for signal handler */
472 regs->regs[15] = (unsigned long) frame;
473 regs->regs[4] = signal; /* Arg for signal handler */
474 regs->regs[5] = (unsigned long) &frame->info;
475 regs->regs[6] = (unsigned long) &frame->uc;
476 regs->pc = (unsigned long) ka->sa.sa_handler;
477
478 set_fs(USER_DS);
479
480#if DEBUG_SIG
481 printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
482 current->comm, current->pid, frame, regs->pc, regs->pr);
483#endif
484
485 flush_cache_sigtramp(regs->pr);
486 if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
487 flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
488 return;
489
490give_sigsegv:
491 force_sigsegv(sig, current);
492}
493
494/*
495 * OK, we're invoking a handler
496 */
497
498static void
499handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
500 sigset_t *oldset, struct pt_regs *regs)
501{
502 /* Are we from a system call? */
503 if (regs->tra >= 0) {
504 /* If so, check system call restarting.. */
505 switch (regs->regs[0]) {
506 case -ERESTARTNOHAND:
507 regs->regs[0] = -EINTR;
508 break;
509
510 case -ERESTARTSYS:
511 if (!(ka->sa.sa_flags & SA_RESTART)) {
512 regs->regs[0] = -EINTR;
513 break;
514 }
515 /* fallthrough */
516 case -ERESTARTNOINTR:
517 regs->pc -= 2;
518 }
519 } else {
520 /* gUSA handling */
521#ifdef CONFIG_PREEMPT
522 unsigned long flags;
523
524 local_irq_save(flags);
525#endif
526 if (regs->regs[15] >= 0xc0000000) {
527 int offset = (int)regs->regs[15];
528
529 /* Reset stack pointer: clear critical region mark */
530 regs->regs[15] = regs->regs[1];
531 if (regs->pc < regs->regs[0])
532 /* Go to rewind point #1 */
533 regs->pc = regs->regs[0] + offset - 2;
534 }
535#ifdef CONFIG_PREEMPT
536 local_irq_restore(flags);
537#endif
538 }
539
540 /* Set up the stack frame */
541 if (ka->sa.sa_flags & SA_SIGINFO)
542 setup_rt_frame(sig, ka, info, oldset, regs);
543 else
544 setup_frame(sig, ka, oldset, regs);
545
546 if (ka->sa.sa_flags & SA_ONESHOT)
547 ka->sa.sa_handler = SIG_DFL;
548
549 if (!(ka->sa.sa_flags & SA_NODEFER)) {
550 spin_lock_irq(&current->sighand->siglock);
551 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
552 sigaddset(&current->blocked,sig);
553 recalc_sigpending();
554 spin_unlock_irq(&current->sighand->siglock);
555 }
556}
557
558/*
559 * Note that 'init' is a special process: it doesn't get signals it doesn't
560 * want to handle. Thus you cannot kill init even with a SIGKILL even by
561 * mistake.
562 *
563 * Note that we go through the signals twice: once to check the signals that
564 * the kernel can handle, and then we build all the user-level signal handling
565 * stack-frames in one go after that.
566 */
567int do_signal(struct pt_regs *regs, sigset_t *oldset)
568{
569 siginfo_t info;
570 int signr;
571 struct k_sigaction ka;
572
573 /*
574 * We want the common case to go fast, which
575 * is why we may in certain cases get here from
576 * kernel mode. Just return without doing anything
577 * if so.
578 */
579 if (!user_mode(regs))
580 return 1;
581
582 if (try_to_freeze(0))
583 goto no_signal;
584
585 if (!oldset)
586 oldset = &current->blocked;
587
588 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
589 if (signr > 0) {
590 /* Whee! Actually deliver the signal. */
591 handle_signal(signr, &ka, &info, oldset, regs);
592 return 1;
593 }
594
595 no_signal:
596 /* Did we come from a system call? */
597 if (regs->tra >= 0) {
598 /* Restart the system call - no handlers present */
599 if (regs->regs[0] == -ERESTARTNOHAND ||
600 regs->regs[0] == -ERESTARTSYS ||
601 regs->regs[0] == -ERESTARTNOINTR ||
602 regs->regs[0] == -ERESTART_RESTARTBLOCK) {
603 regs->pc -= 2;
604 }
605 }
606 return 0;
607}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
new file mode 100644
index 000000000000..56a39d69e080
--- /dev/null
+++ b/arch/sh/kernel/smp.c
@@ -0,0 +1,199 @@
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002, 2003 Paul Mundt
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#include <linux/config.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
20#include <linux/threads.h>
21#include <linux/module.h>
22#include <linux/time.h>
23#include <linux/timex.h>
24#include <linux/sched.h>
25
26#include <asm/atomic.h>
27#include <asm/processor.h>
28#include <asm/system.h>
29#include <asm/mmu_context.h>
30#include <asm/smp.h>
31
32/*
33 * This was written with the Sega Saturn (SMP SH-2 7604) in mind,
34 * but is designed to be usable regardless if there's an MMU
35 * present or not.
36 */
37struct sh_cpuinfo cpu_data[NR_CPUS];
38
39extern void per_cpu_trap_init(void);
40
41cpumask_t cpu_possible_map;
42cpumask_t cpu_online_map;
43static atomic_t cpus_booted = ATOMIC_INIT(0);
44
45/* These are defined by the board-specific code. */
46
47/*
48 * Cause the function described by call_data to be executed on the passed
49 * cpu. When the function has finished, increment the finished field of
50 * call_data.
51 */
52void __smp_send_ipi(unsigned int cpu, unsigned int action);
53
54/*
55 * Find the number of available processors
56 */
57unsigned int __smp_probe_cpus(void);
58
59/*
60 * Start a particular processor
61 */
62void __smp_slave_init(unsigned int cpu);
63
64/*
65 * Run specified function on a particular processor.
66 */
67void __smp_call_function(unsigned int cpu);
68
69static inline void __init smp_store_cpu_info(unsigned int cpu)
70{
71 cpu_data[cpu].loops_per_jiffy = loops_per_jiffy;
72}
73
74void __init smp_prepare_cpus(unsigned int max_cpus)
75{
76 unsigned int cpu = smp_processor_id();
77 int i;
78
79 atomic_set(&cpus_booted, 1);
80 smp_store_cpu_info(cpu);
81
82 for (i = 0; i < __smp_probe_cpus(); i++)
83 cpu_set(i, cpu_possible_map);
84}
85
86void __devinit smp_prepare_boot_cpu(void)
87{
88 unsigned int cpu = smp_processor_id();
89
90 cpu_set(cpu, cpu_online_map);
91 cpu_set(cpu, cpu_possible_map);
92}
93
94int __cpu_up(unsigned int cpu)
95{
96 struct task_struct *tsk;
97
98 tsk = fork_idle(cpu);
99
100 if (IS_ERR(tsk))
101 panic("Failed forking idle task for cpu %d\n", cpu);
102
103 tsk->thread_info->cpu = cpu;
104
105 cpu_set(cpu, cpu_online_map);
106
107 return 0;
108}
109
110int start_secondary(void *unused)
111{
112 unsigned int cpu = smp_processor_id();
113
114 atomic_inc(&init_mm.mm_count);
115 current->active_mm = &init_mm;
116
117 smp_store_cpu_info(cpu);
118
119 __smp_slave_init(cpu);
120 per_cpu_trap_init();
121
122 atomic_inc(&cpus_booted);
123
124 cpu_idle();
125 return 0;
126}
127
128void __init smp_cpus_done(unsigned int max_cpus)
129{
130 smp_mb();
131}
132
133void smp_send_reschedule(int cpu)
134{
135 __smp_send_ipi(cpu, SMP_MSG_RESCHEDULE);
136}
137
138static void stop_this_cpu(void *unused)
139{
140 cpu_clear(smp_processor_id(), cpu_online_map);
141 local_irq_disable();
142
143 for (;;)
144 cpu_relax();
145}
146
147void smp_send_stop(void)
148{
149 smp_call_function(stop_this_cpu, 0, 1, 0);
150}
151
152
153struct smp_fn_call_struct smp_fn_call = {
154 .lock = SPIN_LOCK_UNLOCKED,
155 .finished = ATOMIC_INIT(0),
156};
157
158/*
159 * The caller of this wants the passed function to run on every cpu. If wait
160 * is set, wait until all cpus have finished the function before returning.
161 * The lock is here to protect the call structure.
162 * You must not call this function with disabled interrupts or from a
163 * hardware interrupt handler or from a bottom half handler.
164 */
165int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
166{
167 unsigned int nr_cpus = atomic_read(&cpus_booted);
168 int i;
169
170 if (nr_cpus < 2)
171 return 0;
172
173 /* Can deadlock when called with interrupts disabled */
174 WARN_ON(irqs_disabled());
175
176 spin_lock(&smp_fn_call.lock);
177
178 atomic_set(&smp_fn_call.finished, 0);
179 smp_fn_call.fn = func;
180 smp_fn_call.data = info;
181
182 for (i = 0; i < nr_cpus; i++)
183 if (i != smp_processor_id())
184 __smp_call_function(i);
185
186 if (wait)
187 while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
188
189 spin_unlock(&smp_fn_call.lock);
190
191 return 0;
192}
193
194/* Not really SMP stuff ... */
195int setup_profiling_timer(unsigned int multiplier)
196{
197 return 0;
198}
199
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
new file mode 100644
index 000000000000..df5ac294c379
--- /dev/null
+++ b/arch/sh/kernel/sys_sh.c
@@ -0,0 +1,289 @@
1/*
2 * linux/arch/sh/kernel/sys_sh.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/SuperH
6 * platform.
7 *
8 * Taken from i386 version.
9 */
10
11#include <linux/errno.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/sem.h>
17#include <linux/msg.h>
18#include <linux/shm.h>
19#include <linux/stat.h>
20#include <linux/syscalls.h>
21#include <linux/mman.h>
22#include <linux/file.h>
23#include <linux/utsname.h>
24
25#include <asm/uaccess.h>
26#include <asm/ipc.h>
27
28/*
29 * sys_pipe() is the normal C calling standard for creating
30 * a pipe. It's not the way Unix traditionally does this, though.
31 */
32asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
33 unsigned long r6, unsigned long r7,
34 struct pt_regs regs)
35{
36 int fd[2];
37 int error;
38
39 error = do_pipe(fd);
40 if (!error) {
41 regs.regs[1] = fd[1];
42 return fd[0];
43 }
44 return error;
45}
46
47#if defined(HAVE_ARCH_UNMAPPED_AREA)
48/*
49 * To avoid cache alias, we map the shard page with same color.
50 */
51#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
52
53unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
54 unsigned long len, unsigned long pgoff, unsigned long flags)
55{
56 struct mm_struct *mm = current->mm;
57 struct vm_area_struct *vma;
58 unsigned long start_addr;
59
60 if (flags & MAP_FIXED) {
61 /* We do not accept a shared mapping if it would violate
62 * cache aliasing constraints.
63 */
64 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
65 return -EINVAL;
66 return addr;
67 }
68
69 if (len > TASK_SIZE)
70 return -ENOMEM;
71
72 if (addr) {
73 if (flags & MAP_PRIVATE)
74 addr = PAGE_ALIGN(addr);
75 else
76 addr = COLOUR_ALIGN(addr);
77 vma = find_vma(mm, addr);
78 if (TASK_SIZE - len >= addr &&
79 (!vma || addr + len <= vma->vm_start))
80 return addr;
81 }
82 if (flags & MAP_PRIVATE)
83 addr = PAGE_ALIGN(mm->free_area_cache);
84 else
85 addr = COLOUR_ALIGN(mm->free_area_cache);
86 start_addr = addr;
87
88full_search:
89 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
90 /* At this point: (!vma || addr < vma->vm_end). */
91 if (TASK_SIZE - len < addr) {
92 /*
93 * Start a new search - just in case we missed
94 * some holes.
95 */
96 if (start_addr != TASK_UNMAPPED_BASE) {
97 start_addr = addr = TASK_UNMAPPED_BASE;
98 goto full_search;
99 }
100 return -ENOMEM;
101 }
102 if (!vma || addr + len <= vma->vm_start) {
103 /*
104 * Remember the place where we stopped the search:
105 */
106 mm->free_area_cache = addr + len;
107 return addr;
108 }
109 addr = vma->vm_end;
110 if (!(flags & MAP_PRIVATE))
111 addr = COLOUR_ALIGN(addr);
112 }
113}
114#endif
115
116static inline long
117do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
118 unsigned long flags, int fd, unsigned long pgoff)
119{
120 int error = -EBADF;
121 struct file *file = NULL;
122
123 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
124 if (!(flags & MAP_ANONYMOUS)) {
125 file = fget(fd);
126 if (!file)
127 goto out;
128 }
129
130 down_write(&current->mm->mmap_sem);
131 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
132 up_write(&current->mm->mmap_sem);
133
134 if (file)
135 fput(file);
136out:
137 return error;
138}
139
140asmlinkage int old_mmap(unsigned long addr, unsigned long len,
141 unsigned long prot, unsigned long flags,
142 int fd, unsigned long off)
143{
144 if (off & ~PAGE_MASK)
145 return -EINVAL;
146 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
147}
148
149asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
150 unsigned long prot, unsigned long flags,
151 unsigned long fd, unsigned long pgoff)
152{
153 return do_mmap2(addr, len, prot, flags, fd, pgoff);
154}
155
156/*
157 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
158 *
159 * This is really horribly ugly.
160 */
161asmlinkage int sys_ipc(uint call, int first, int second,
162 int third, void __user *ptr, long fifth)
163{
164 int version, ret;
165
166 version = call >> 16; /* hack for backward compatibility */
167 call &= 0xffff;
168
169 if (call <= SEMCTL)
170 switch (call) {
171 case SEMOP:
172 return sys_semtimedop(first, (struct sembuf __user *)ptr,
173 second, NULL);
174 case SEMTIMEDOP:
175 return sys_semtimedop(first, (struct sembuf __user *)ptr,
176 second,
177 (const struct timespec __user *)fifth);
178 case SEMGET:
179 return sys_semget (first, second, third);
180 case SEMCTL: {
181 union semun fourth;
182 if (!ptr)
183 return -EINVAL;
184 if (get_user(fourth.__pad, (void * __user *) ptr))
185 return -EFAULT;
186 return sys_semctl (first, second, third, fourth);
187 }
188 default:
189 return -EINVAL;
190 }
191
192 if (call <= MSGCTL)
193 switch (call) {
194 case MSGSND:
195 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
196 second, third);
197 case MSGRCV:
198 switch (version) {
199 case 0: {
200 struct ipc_kludge tmp;
201 if (!ptr)
202 return -EINVAL;
203
204 if (copy_from_user(&tmp,
205 (struct ipc_kludge __user *) ptr,
206 sizeof (tmp)))
207 return -EFAULT;
208 return sys_msgrcv (first, tmp.msgp, second,
209 tmp.msgtyp, third);
210 }
211 default:
212 return sys_msgrcv (first,
213 (struct msgbuf __user *) ptr,
214 second, fifth, third);
215 }
216 case MSGGET:
217 return sys_msgget ((key_t) first, second);
218 case MSGCTL:
219 return sys_msgctl (first, second,
220 (struct msqid_ds __user *) ptr);
221 default:
222 return -EINVAL;
223 }
224 if (call <= SHMCTL)
225 switch (call) {
226 case SHMAT:
227 switch (version) {
228 default: {
229 ulong raddr;
230 ret = do_shmat (first, (char __user *) ptr,
231 second, &raddr);
232 if (ret)
233 return ret;
234 return put_user (raddr, (ulong __user *) third);
235 }
236 case 1: /* iBCS2 emulator entry point */
237 if (!segment_eq(get_fs(), get_ds()))
238 return -EINVAL;
239 return do_shmat (first, (char __user *) ptr,
240 second, (ulong *) third);
241 }
242 case SHMDT:
243 return sys_shmdt ((char __user *)ptr);
244 case SHMGET:
245 return sys_shmget (first, second, third);
246 case SHMCTL:
247 return sys_shmctl (first, second,
248 (struct shmid_ds __user *) ptr);
249 default:
250 return -EINVAL;
251 }
252
253 return -EINVAL;
254}
255
256asmlinkage int sys_uname(struct old_utsname * name)
257{
258 int err;
259 if (!name)
260 return -EFAULT;
261 down_read(&uts_sem);
262 err=copy_to_user(name, &system_utsname, sizeof (*name));
263 up_read(&uts_sem);
264 return err?-EFAULT:0;
265}
266
267asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
268 size_t count, long dummy, loff_t pos)
269{
270 return sys_pread64(fd, buf, count, pos);
271}
272
273asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
274 size_t count, long dummy, loff_t pos)
275{
276 return sys_pwrite64(fd, buf, count, pos);
277}
278
279asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
280 u32 len0, u32 len1, int advice)
281{
282#ifdef __LITTLE_ENDIAN__
283 return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
284 (u64)len1 << 32 | len0, advice);
285#else
286 return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
287 (u64)len0 << 32 | len1, advice);
288#endif
289}
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
new file mode 100644
index 000000000000..df7a9b9d4cbf
--- /dev/null
+++ b/arch/sh/kernel/time.c
@@ -0,0 +1,657 @@
1/*
2 * arch/sh/kernel/time.c
3 *
4 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
6 * Copyright (C) 2002, 2003, 2004 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
8 *
9 * Some code taken from i386 version.
10 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
11 */
12
13#include <linux/config.h>
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/param.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/interrupt.h>
22#include <linux/time.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/smp.h>
26#include <linux/profile.h>
27
28#include <asm/processor.h>
29#include <asm/uaccess.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <asm/delay.h>
33#include <asm/machvec.h>
34#include <asm/rtc.h>
35#include <asm/freq.h>
36#include <asm/cpu/timer.h>
37#ifdef CONFIG_SH_KGDB
38#include <asm/kgdb.h>
39#endif
40
41#include <linux/timex.h>
42#include <linux/irq.h>
43
44#define TMU_TOCR_INIT 0x00
45#define TMU0_TCR_INIT 0x0020
46#define TMU_TSTR_INIT 1
47
48#define TMU0_TCR_CALIB 0x0000
49
50#ifdef CONFIG_CPU_SUBTYPE_ST40STB1
51#define CLOCKGEN_MEMCLKCR 0xbb040038
52#define MEMCLKCR_RATIO_MASK 0x7
53#endif /* CONFIG_CPU_SUBTYPE_ST40STB1 */
54
55extern unsigned long wall_jiffies;
56#define TICK_SIZE (tick_nsec / 1000)
57DEFINE_SPINLOCK(tmu0_lock);
58
59u64 jiffies_64 = INITIAL_JIFFIES;
60
61EXPORT_SYMBOL(jiffies_64);
62
63/* XXX: Can we initialize this in a routine somewhere? Dreamcast doesn't want
64 * these routines anywhere... */
65#ifdef CONFIG_SH_RTC
66void (*rtc_get_time)(struct timespec *) = sh_rtc_gettimeofday;
67int (*rtc_set_time)(const time_t) = sh_rtc_settimeofday;
68#else
69void (*rtc_get_time)(struct timespec *);
70int (*rtc_set_time)(const time_t);
71#endif
72
73#if defined(CONFIG_CPU_SUBTYPE_SH7300)
74static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 };
75#endif
76#if defined(CONFIG_CPU_SH3)
77static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
78static int stc_values[] = { 0, 1, 4, 2, 5, 0, 0, 0 };
79#define bfc_divisors stc_multipliers
80#define bfc_values stc_values
81static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
82static int ifc_values[] = { 0, 1, 4, 2, 0, 0, 0, 0 };
83static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
84static int pfc_values[] = { 0, 1, 4, 2, 5, 0, 0, 0 };
85#elif defined(CONFIG_CPU_SH4)
86#if defined(CONFIG_CPU_SUBTYPE_SH73180)
87static int ifc_divisors[] = { 1, 2, 3, 4, 6, 8, 12, 16 };
88static int ifc_values[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
89#define bfc_divisors ifc_divisors /* Same */
90#define bfc_values ifc_values
91#define pfc_divisors ifc_divisors /* Same */
92#define pfc_values ifc_values
93#else
94static int ifc_divisors[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
95static int ifc_values[] = { 0, 1, 2, 3, 0, 4, 0, 5 };
96#define bfc_divisors ifc_divisors /* Same */
97#define bfc_values ifc_values
98static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
99static int pfc_values[] = { 0, 0, 1, 2, 0, 3, 0, 4 };
100#endif
101#else
102#error "Unknown ifc/bfc/pfc/stc values for this processor"
103#endif
104
105/*
106 * Scheduler clock - returns current time in nanosec units.
107 */
108unsigned long long sched_clock(void)
109{
110 return (unsigned long long)jiffies * (1000000000 / HZ);
111}
112
113static unsigned long do_gettimeoffset(void)
114{
115 int count;
116 unsigned long flags;
117
118 static int count_p = 0x7fffffff; /* for the first call after boot */
119 static unsigned long jiffies_p = 0;
120
121 /*
122 * cache volatile jiffies temporarily; we have IRQs turned off.
123 */
124 unsigned long jiffies_t;
125
126 spin_lock_irqsave(&tmu0_lock, flags);
127 /* timer count may underflow right here */
128 count = ctrl_inl(TMU0_TCNT); /* read the latched count */
129
130 jiffies_t = jiffies;
131
132 /*
133 * avoiding timer inconsistencies (they are rare, but they happen)...
134 * there is one kind of problem that must be avoided here:
135 * 1. the timer counter underflows
136 */
137
138 if( jiffies_t == jiffies_p ) {
139 if( count > count_p ) {
140 /* the nutcase */
141
142 if(ctrl_inw(TMU0_TCR) & 0x100) { /* Check UNF bit */
143 /*
144 * We cannot detect lost timer interrupts ...
145 * well, that's why we call them lost, don't we? :)
146 * [hmm, on the Pentium and Alpha we can ... sort of]
147 */
148 count -= LATCH;
149 } else {
150 printk("do_slow_gettimeoffset(): hardware timer problem?\n");
151 }
152 }
153 } else
154 jiffies_p = jiffies_t;
155
156 count_p = count;
157 spin_unlock_irqrestore(&tmu0_lock, flags);
158
159 count = ((LATCH-1) - count) * TICK_SIZE;
160 count = (count + LATCH/2) / LATCH;
161
162 return count;
163}
164
165void do_gettimeofday(struct timeval *tv)
166{
167 unsigned long seq;
168 unsigned long usec, sec;
169 unsigned long lost;
170
171 do {
172 seq = read_seqbegin(&xtime_lock);
173 usec = do_gettimeoffset();
174
175 lost = jiffies - wall_jiffies;
176 if (lost)
177 usec += lost * (1000000 / HZ);
178
179 sec = xtime.tv_sec;
180 usec += xtime.tv_nsec / 1000;
181 } while (read_seqretry(&xtime_lock, seq));
182
183 while (usec >= 1000000) {
184 usec -= 1000000;
185 sec++;
186 }
187
188 tv->tv_sec = sec;
189 tv->tv_usec = usec;
190}
191
192EXPORT_SYMBOL(do_gettimeofday);
193
194int do_settimeofday(struct timespec *tv)
195{
196 time_t wtm_sec, sec = tv->tv_sec;
197 long wtm_nsec, nsec = tv->tv_nsec;
198
199 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
200 return -EINVAL;
201
202 write_seqlock_irq(&xtime_lock);
203 /*
204 * This is revolting. We need to set "xtime" correctly. However, the
205 * value in this location is the value at the most recent update of
206 * wall time. Discover what correction gettimeofday() would have
207 * made, and then undo it!
208 */
209 nsec -= 1000 * (do_gettimeoffset() +
210 (jiffies - wall_jiffies) * (1000000 / HZ));
211
212 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
213 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
214
215 set_normalized_timespec(&xtime, sec, nsec);
216 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
217
218 time_adjust = 0; /* stop active adjtime() */
219 time_status |= STA_UNSYNC;
220 time_maxerror = NTP_PHASE_LIMIT;
221 time_esterror = NTP_PHASE_LIMIT;
222 write_sequnlock_irq(&xtime_lock);
223 clock_was_set();
224
225 return 0;
226}
227
228EXPORT_SYMBOL(do_settimeofday);
229
230/* last time the RTC clock got updated */
231static long last_rtc_update;
232
233/*
234 * timer_interrupt() needs to keep up the real-time clock,
235 * as well as call the "do_timer()" routine every clocktick
236 */
237static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
238{
239 do_timer(regs);
240#ifndef CONFIG_SMP
241 update_process_times(user_mode(regs));
242#endif
243 profile_tick(CPU_PROFILING, regs);
244
245#ifdef CONFIG_HEARTBEAT
246 if (sh_mv.mv_heartbeat != NULL)
247 sh_mv.mv_heartbeat();
248#endif
249
250 /*
251 * If we have an externally synchronized Linux clock, then update
252 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
253 * called as close as possible to 500 ms before the new second starts.
254 */
255 if ((time_status & STA_UNSYNC) == 0 &&
256 xtime.tv_sec > last_rtc_update + 660 &&
257 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
258 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
259 if (rtc_set_time(xtime.tv_sec) == 0)
260 last_rtc_update = xtime.tv_sec;
261 else
262 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
263 }
264}
265
266/*
267 * This is the same as the above, except we _also_ save the current
268 * Time Stamp Counter value at the time of the timer interrupt, so that
269 * we later on can estimate the time of day more exactly.
270 */
271static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
272{
273 unsigned long timer_status;
274
275 /* Clear UNF bit */
276 timer_status = ctrl_inw(TMU0_TCR);
277 timer_status &= ~0x100;
278 ctrl_outw(timer_status, TMU0_TCR);
279
280 /*
281 * Here we are in the timer irq handler. We just have irqs locally
282 * disabled but we don't know if the timer_bh is running on the other
283 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
284 * the irq version of write_lock because as just said we have irq
285 * locally disabled. -arca
286 */
287 write_seqlock(&xtime_lock);
288 do_timer_interrupt(irq, NULL, regs);
289 write_sequnlock(&xtime_lock);
290
291 return IRQ_HANDLED;
292}
293
294/*
295 * Hah! We'll see if this works (switching from usecs to nsecs).
296 */
297static unsigned int __init get_timer_frequency(void)
298{
299 u32 freq;
300 struct timespec ts1, ts2;
301 unsigned long diff_nsec;
302 unsigned long factor;
303
304 /* Setup the timer: We don't want to generate interrupts, just
305 * have it count down at its natural rate.
306 */
307 ctrl_outb(0, TMU_TSTR);
308#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
309 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
310#endif
311 ctrl_outw(TMU0_TCR_CALIB, TMU0_TCR);
312 ctrl_outl(0xffffffff, TMU0_TCOR);
313 ctrl_outl(0xffffffff, TMU0_TCNT);
314
315 rtc_get_time(&ts2);
316
317 do {
318 rtc_get_time(&ts1);
319 } while (ts1.tv_nsec == ts2.tv_nsec && ts1.tv_sec == ts2.tv_sec);
320
321 /* actually start the timer */
322 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
323
324 do {
325 rtc_get_time(&ts2);
326 } while (ts1.tv_nsec == ts2.tv_nsec && ts1.tv_sec == ts2.tv_sec);
327
328 freq = 0xffffffff - ctrl_inl(TMU0_TCNT);
329 if (ts2.tv_nsec < ts1.tv_nsec) {
330 ts2.tv_nsec += 1000000000;
331 ts2.tv_sec--;
332 }
333
334 diff_nsec = (ts2.tv_sec - ts1.tv_sec) * 1000000000 + (ts2.tv_nsec - ts1.tv_nsec);
335
336 /* this should work well if the RTC has a precision of n Hz, where
337 * n is an integer. I don't think we have to worry about the other
338 * cases. */
339 factor = (1000000000 + diff_nsec/2) / diff_nsec;
340
341 if (factor * diff_nsec > 1100000000 ||
342 factor * diff_nsec < 900000000)
343 panic("weird RTC (diff_nsec %ld)", diff_nsec);
344
345 return freq * factor;
346}
347
348void (*board_time_init)(void);
349void (*board_timer_setup)(struct irqaction *irq);
350
351static unsigned int sh_pclk_freq __initdata = CONFIG_SH_PCLK_FREQ;
352
353static int __init sh_pclk_setup(char *str)
354{
355 unsigned int freq;
356
357 if (get_option(&str, &freq))
358 sh_pclk_freq = freq;
359
360 return 1;
361}
362__setup("sh_pclk=", sh_pclk_setup);
363
364static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL};
365
366void get_current_frequency_divisors(unsigned int *ifc, unsigned int *bfc, unsigned int *pfc)
367{
368 unsigned int frqcr = ctrl_inw(FRQCR);
369
370#if defined(CONFIG_CPU_SH3)
371#if defined(CONFIG_CPU_SUBTYPE_SH7300)
372 *ifc = md_table[((frqcr & 0x0070) >> 4)];
373 *bfc = md_table[((frqcr & 0x0700) >> 8)];
374 *pfc = md_table[frqcr & 0x0007];
375#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
376 *bfc = stc_multipliers[(frqcr & 0x0300) >> 8];
377 *ifc = ifc_divisors[(frqcr & 0x0030) >> 4];
378 *pfc = pfc_divisors[frqcr & 0x0003];
379#else
380 unsigned int tmp;
381
382 tmp = (frqcr & 0x8000) >> 13;
383 tmp |= (frqcr & 0x0030) >> 4;
384 *bfc = stc_multipliers[tmp];
385 tmp = (frqcr & 0x4000) >> 12;
386 tmp |= (frqcr & 0x000c) >> 2;
387 *ifc = ifc_divisors[tmp];
388 tmp = (frqcr & 0x2000) >> 11;
389 tmp |= frqcr & 0x0003;
390 *pfc = pfc_divisors[tmp];
391#endif
392#elif defined(CONFIG_CPU_SH4)
393#if defined(CONFIG_CPU_SUBTYPE_SH73180)
394 *ifc = ifc_divisors[(frqcr>> 20) & 0x0007];
395 *bfc = bfc_divisors[(frqcr>> 12) & 0x0007];
396 *pfc = pfc_divisors[frqcr & 0x0007];
397#else
398 *ifc = ifc_divisors[(frqcr >> 6) & 0x0007];
399 *bfc = bfc_divisors[(frqcr >> 3) & 0x0007];
400 *pfc = pfc_divisors[frqcr & 0x0007];
401#endif
402#endif
403}
404
405/*
406 * This bit of ugliness builds up accessor routines to get at both
407 * the divisors and the physical values.
408 */
409#define _FREQ_TABLE(x) \
410 unsigned int get_##x##_divisor(unsigned int value) \
411 { return x##_divisors[value]; } \
412 \
413 unsigned int get_##x##_value(unsigned int divisor) \
414 { return x##_values[(divisor - 1)]; }
415
416_FREQ_TABLE(ifc);
417_FREQ_TABLE(bfc);
418_FREQ_TABLE(pfc);
419
420#ifdef CONFIG_CPU_SUBTYPE_ST40STB1
421
422/*
423 * The ST40 divisors are totally different so we set the cpu data
424 * clocks using a different algorithm
425 *
426 * I've just plugged this from the 2.4 code
427 * - Alex Bennee <kernel-hacker@bennee.com>
428 */
429#define CCN_PVR_CHIP_SHIFT 24
430#define CCN_PVR_CHIP_MASK 0xff
431#define CCN_PVR_CHIP_ST40STB1 0x4
432
433
434struct frqcr_data {
435 unsigned short frqcr;
436
437 struct {
438 unsigned char multiplier;
439 unsigned char divisor;
440 } factor[3];
441};
442
443static struct frqcr_data st40_frqcr_table[] = {
444 { 0x000, {{1,1}, {1,1}, {1,2}}},
445 { 0x002, {{1,1}, {1,1}, {1,4}}},
446 { 0x004, {{1,1}, {1,1}, {1,8}}},
447 { 0x008, {{1,1}, {1,2}, {1,2}}},
448 { 0x00A, {{1,1}, {1,2}, {1,4}}},
449 { 0x00C, {{1,1}, {1,2}, {1,8}}},
450 { 0x011, {{1,1}, {2,3}, {1,6}}},
451 { 0x013, {{1,1}, {2,3}, {1,3}}},
452 { 0x01A, {{1,1}, {1,2}, {1,4}}},
453 { 0x01C, {{1,1}, {1,2}, {1,8}}},
454 { 0x023, {{1,1}, {2,3}, {1,3}}},
455 { 0x02C, {{1,1}, {1,2}, {1,8}}},
456 { 0x048, {{1,2}, {1,2}, {1,4}}},
457 { 0x04A, {{1,2}, {1,2}, {1,6}}},
458 { 0x04C, {{1,2}, {1,2}, {1,8}}},
459 { 0x05A, {{1,2}, {1,3}, {1,6}}},
460 { 0x05C, {{1,2}, {1,3}, {1,6}}},
461 { 0x063, {{1,2}, {1,4}, {1,4}}},
462 { 0x06C, {{1,2}, {1,4}, {1,8}}},
463 { 0x091, {{1,3}, {1,3}, {1,6}}},
464 { 0x093, {{1,3}, {1,3}, {1,6}}},
465 { 0x0A3, {{1,3}, {1,6}, {1,6}}},
466 { 0x0DA, {{1,4}, {1,4}, {1,8}}},
467 { 0x0DC, {{1,4}, {1,4}, {1,8}}},
468 { 0x0EC, {{1,4}, {1,8}, {1,8}}},
469 { 0x123, {{1,4}, {1,4}, {1,8}}},
470 { 0x16C, {{1,4}, {1,8}, {1,8}}},
471};
472
473struct memclk_data {
474 unsigned char multiplier;
475 unsigned char divisor;
476};
477
478static struct memclk_data st40_memclk_table[8] = {
479 {1,1}, // 000
480 {1,2}, // 001
481 {1,3}, // 010
482 {2,3}, // 011
483 {1,4}, // 100
484 {1,6}, // 101
485 {1,8}, // 110
486 {1,8} // 111
487};
488
489static void st40_specific_time_init(unsigned int module_clock, unsigned short frqcr)
490{
491 unsigned int cpu_clock, master_clock, bus_clock, memory_clock;
492 struct frqcr_data *d;
493 int a;
494 unsigned long memclkcr;
495 struct memclk_data *e;
496
497 for (a = 0; a < ARRAY_SIZE(st40_frqcr_table); a++) {
498 d = &st40_frqcr_table[a];
499
500 if (d->frqcr == (frqcr & 0x1ff))
501 break;
502 }
503
504 if (a == ARRAY_SIZE(st40_frqcr_table)) {
505 d = st40_frqcr_table;
506
507 printk("ERROR: Unrecognised FRQCR value (0x%x), "
508 "using default multipliers\n", frqcr);
509 }
510
511 memclkcr = ctrl_inl(CLOCKGEN_MEMCLKCR);
512 e = &st40_memclk_table[memclkcr & MEMCLKCR_RATIO_MASK];
513
514 printk(KERN_INFO "Clock multipliers: CPU: %d/%d Bus: %d/%d "
515 "Mem: %d/%d Periph: %d/%d\n",
516 d->factor[0].multiplier, d->factor[0].divisor,
517 d->factor[1].multiplier, d->factor[1].divisor,
518 e->multiplier, e->divisor,
519 d->factor[2].multiplier, d->factor[2].divisor);
520
521 master_clock = module_clock * d->factor[2].divisor
522 / d->factor[2].multiplier;
523 bus_clock = master_clock * d->factor[1].multiplier
524 / d->factor[1].divisor;
525 memory_clock = master_clock * e->multiplier
526 / e->divisor;
527 cpu_clock = master_clock * d->factor[0].multiplier
528 / d->factor[0].divisor;
529
530 current_cpu_data.cpu_clock = cpu_clock;
531 current_cpu_data.master_clock = master_clock;
532 current_cpu_data.bus_clock = bus_clock;
533 current_cpu_data.memory_clock = memory_clock;
534 current_cpu_data.module_clock = module_clock;
535}
536#endif
537
538void __init time_init(void)
539{
540 unsigned int timer_freq = 0;
541 unsigned int ifc, pfc, bfc;
542 unsigned long interval;
543#ifdef CONFIG_CPU_SUBTYPE_ST40STB1
544 unsigned long pvr;
545 unsigned short frqcr;
546#endif
547
548 if (board_time_init)
549 board_time_init();
550
551 /*
552 * If we don't have an RTC (such as with the SH7300), don't attempt to
553 * probe the timer frequency. Rely on an either hardcoded peripheral
554 * clock value, or on the sh_pclk command line option. Note that we
555 * still need to have CONFIG_SH_PCLK_FREQ set in order for things like
556 * CLOCK_TICK_RATE to be sane.
557 */
558 current_cpu_data.module_clock = sh_pclk_freq;
559
560#ifdef CONFIG_SH_PCLK_CALC
561 /* XXX: Switch this over to a more generic test. */
562 {
563 unsigned int freq;
564
565 /*
566 * If we've specified a peripheral clock frequency, and we have
567 * an RTC, compare it against the autodetected value. Complain
568 * if there's a mismatch.
569 */
570 timer_freq = get_timer_frequency();
571 freq = timer_freq * 4;
572
573 if (sh_pclk_freq && (sh_pclk_freq/100*99 > freq || sh_pclk_freq/100*101 < freq)) {
574 printk(KERN_NOTICE "Calculated peripheral clock value "
575 "%d differs from sh_pclk value %d, fixing..\n",
576 freq, sh_pclk_freq);
577 current_cpu_data.module_clock = freq;
578 }
579 }
580#endif
581
582#ifdef CONFIG_CPU_SUBTYPE_ST40STB1
583 /* XXX: Update ST40 code to use board_time_init() */
584 pvr = ctrl_inl(CCN_PVR);
585 frqcr = ctrl_inw(FRQCR);
586 printk("time.c ST40 Probe: PVR %08lx, FRQCR %04hx\n", pvr, frqcr);
587
588 if (((pvr >> CCN_PVR_CHIP_SHIFT) & CCN_PVR_CHIP_MASK) == CCN_PVR_CHIP_ST40STB1)
589 st40_specific_time_init(current_cpu_data.module_clock, frqcr);
590 else
591#endif
592 get_current_frequency_divisors(&ifc, &bfc, &pfc);
593
594 if (rtc_get_time) {
595 rtc_get_time(&xtime);
596 } else {
597 xtime.tv_sec = mktime(2000, 1, 1, 0, 0, 0);
598 xtime.tv_nsec = 0;
599 }
600
601 set_normalized_timespec(&wall_to_monotonic,
602 -xtime.tv_sec, -xtime.tv_nsec);
603
604 if (board_timer_setup) {
605 board_timer_setup(&irq0);
606 } else {
607 setup_irq(TIMER_IRQ, &irq0);
608 }
609
610 /*
611 * for ST40 chips the current_cpu_data should already be set
612 * so not having valid pfc/bfc/ifc shouldn't be a problem
613 */
614 if (!current_cpu_data.master_clock)
615 current_cpu_data.master_clock = current_cpu_data.module_clock * pfc;
616 if (!current_cpu_data.bus_clock)
617 current_cpu_data.bus_clock = current_cpu_data.master_clock / bfc;
618 if (!current_cpu_data.cpu_clock)
619 current_cpu_data.cpu_clock = current_cpu_data.master_clock / ifc;
620
621 printk("CPU clock: %d.%02dMHz\n",
622 (current_cpu_data.cpu_clock / 1000000),
623 (current_cpu_data.cpu_clock % 1000000)/10000);
624 printk("Bus clock: %d.%02dMHz\n",
625 (current_cpu_data.bus_clock / 1000000),
626 (current_cpu_data.bus_clock % 1000000)/10000);
627#ifdef CONFIG_CPU_SUBTYPE_ST40STB1
628 printk("Memory clock: %d.%02dMHz\n",
629 (current_cpu_data.memory_clock / 1000000),
630 (current_cpu_data.memory_clock % 1000000)/10000);
631#endif
632 printk("Module clock: %d.%02dMHz\n",
633 (current_cpu_data.module_clock / 1000000),
634 (current_cpu_data.module_clock % 1000000)/10000);
635
636 interval = (current_cpu_data.module_clock/4 + HZ/2) / HZ;
637
638 printk("Interval = %ld\n", interval);
639
640 /* Start TMU0 */
641 ctrl_outb(0, TMU_TSTR);
642#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
643 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
644#endif
645 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
646 ctrl_outl(interval, TMU0_TCOR);
647 ctrl_outl(interval, TMU0_TCNT);
648 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
649
650#if defined(CONFIG_SH_KGDB)
651 /*
652 * Set up kgdb as requested. We do it here because the serial
653 * init uses the timer vars we just set up for figuring baud.
654 */
655 kgdb_init();
656#endif
657}
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
new file mode 100644
index 000000000000..7eb06719d844
--- /dev/null
+++ b/arch/sh/kernel/traps.c
@@ -0,0 +1,712 @@
1/* $Id: traps.c,v 1.17 2004/05/02 01:46:30 sugioka Exp $
2 *
3 * linux/arch/sh/traps.c
4 *
5 * SuperH version: Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000 Philipp Rumpf
7 * Copyright (C) 2000 David Howells
8 * Copyright (C) 2002, 2003 Paul Mundt
9 */
10
11/*
12 * 'Traps.c' handles hardware traps and faults after we have saved some
13 * state in 'entry.S'.
14 */
15#include <linux/config.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/string.h>
19#include <linux/errno.h>
20#include <linux/ptrace.h>
21#include <linux/timer.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/spinlock.h>
28#include <linux/module.h>
29#include <linux/kallsyms.h>
30
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/io.h>
34#include <asm/atomic.h>
35#include <asm/processor.h>
36#include <asm/sections.h>
37
38#ifdef CONFIG_SH_KGDB
39#include <asm/kgdb.h>
40#define CHK_REMOTE_DEBUG(regs) \
41{ \
42 if ((kgdb_debug_hook != (kgdb_debug_hook_t *) NULL) && (!user_mode(regs))) \
43 { \
44 (*kgdb_debug_hook)(regs); \
45 } \
46}
47#else
48#define CHK_REMOTE_DEBUG(regs)
49#endif
50
51#define DO_ERROR(trapnr, signr, str, name, tsk) \
52asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
53 unsigned long r6, unsigned long r7, \
54 struct pt_regs regs) \
55{ \
56 unsigned long error_code; \
57 \
58 /* Check if it's a DSP instruction */ \
59 if (is_dsp_inst(&regs)) { \
60 /* Enable DSP mode, and restart instruction. */ \
61 regs.sr |= SR_DSP; \
62 return; \
63 } \
64 \
65 asm volatile("stc r2_bank, %0": "=r" (error_code)); \
66 local_irq_enable(); \
67 tsk->thread.error_code = error_code; \
68 tsk->thread.trap_no = trapnr; \
69 CHK_REMOTE_DEBUG(&regs); \
70 force_sig(signr, tsk); \
71 die_if_no_fixup(str,&regs,error_code); \
72}
73
74#ifdef CONFIG_CPU_SH2
75#define TRAP_RESERVED_INST 4
76#define TRAP_ILLEGAL_SLOT_INST 6
77#else
78#define TRAP_RESERVED_INST 12
79#define TRAP_ILLEGAL_SLOT_INST 13
80#endif
81
82/*
83 * These constants are for searching for possible module text
84 * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
85 * a guess of how much space is likely to be vmalloced.
86 */
87#define VMALLOC_OFFSET (8*1024*1024)
88#define MODULE_RANGE (8*1024*1024)
89
90spinlock_t die_lock;
91
92void die(const char * str, struct pt_regs * regs, long err)
93{
94 static int die_counter;
95
96 console_verbose();
97 spin_lock_irq(&die_lock);
98 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
99 CHK_REMOTE_DEBUG(regs);
100 show_regs(regs);
101 spin_unlock_irq(&die_lock);
102 do_exit(SIGSEGV);
103}
104
105static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
106{
107 if (!user_mode(regs))
108 die(str, regs, err);
109}
110
111static int handle_unaligned_notify_count = 10;
112
113/*
114 * try and fix up kernelspace address errors
115 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
116 * - kernel/userspace interfaces cause a jump to an appropriate handler
117 * - other kernel errors are bad
118 * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
119 */
120static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
121{
122 if (!user_mode(regs))
123 {
124 const struct exception_table_entry *fixup;
125 fixup = search_exception_tables(regs->pc);
126 if (fixup) {
127 regs->pc = fixup->fixup;
128 return 0;
129 }
130 die(str, regs, err);
131 }
132 return -EFAULT;
133}
134
135/*
136 * handle an instruction that does an unaligned memory access by emulating the
137 * desired behaviour
138 * - note that PC _may not_ point to the faulting instruction
139 * (if that instruction is in a branch delay slot)
140 * - return 0 if emulation okay, -EFAULT on existential error
141 */
142static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
143{
144 int ret, index, count;
145 unsigned long *rm, *rn;
146 unsigned char *src, *dst;
147
148 index = (instruction>>8)&15; /* 0x0F00 */
149 rn = &regs->regs[index];
150
151 index = (instruction>>4)&15; /* 0x00F0 */
152 rm = &regs->regs[index];
153
154 count = 1<<(instruction&3);
155
156 ret = -EFAULT;
157 switch (instruction>>12) {
158 case 0: /* mov.[bwl] to/from memory via r0+rn */
159 if (instruction & 8) {
160 /* from memory */
161 src = (unsigned char*) *rm;
162 src += regs->regs[0];
163 dst = (unsigned char*) rn;
164 *(unsigned long*)dst = 0;
165
166#ifdef __LITTLE_ENDIAN__
167 if (copy_from_user(dst, src, count))
168 goto fetch_fault;
169
170 if ((count == 2) && dst[1] & 0x80) {
171 dst[2] = 0xff;
172 dst[3] = 0xff;
173 }
174#else
175 dst += 4-count;
176
177 if (__copy_user(dst, src, count))
178 goto fetch_fault;
179
180 if ((count == 2) && dst[2] & 0x80) {
181 dst[0] = 0xff;
182 dst[1] = 0xff;
183 }
184#endif
185 } else {
186 /* to memory */
187 src = (unsigned char*) rm;
188#if !defined(__LITTLE_ENDIAN__)
189 src += 4-count;
190#endif
191 dst = (unsigned char*) *rn;
192 dst += regs->regs[0];
193
194 if (copy_to_user(dst, src, count))
195 goto fetch_fault;
196 }
197 ret = 0;
198 break;
199
200 case 1: /* mov.l Rm,@(disp,Rn) */
201 src = (unsigned char*) rm;
202 dst = (unsigned char*) *rn;
203 dst += (instruction&0x000F)<<2;
204
205 if (copy_to_user(dst,src,4))
206 goto fetch_fault;
207 ret = 0;
208 break;
209
210 case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
211 if (instruction & 4)
212 *rn -= count;
213 src = (unsigned char*) rm;
214 dst = (unsigned char*) *rn;
215#if !defined(__LITTLE_ENDIAN__)
216 src += 4-count;
217#endif
218 if (copy_to_user(dst, src, count))
219 goto fetch_fault;
220 ret = 0;
221 break;
222
223 case 5: /* mov.l @(disp,Rm),Rn */
224 src = (unsigned char*) *rm;
225 src += (instruction&0x000F)<<2;
226 dst = (unsigned char*) rn;
227 *(unsigned long*)dst = 0;
228
229 if (copy_from_user(dst,src,4))
230 goto fetch_fault;
231 ret = 0;
232 break;
233
234 case 6: /* mov.[bwl] from memory, possibly with post-increment */
235 src = (unsigned char*) *rm;
236 if (instruction & 4)
237 *rm += count;
238 dst = (unsigned char*) rn;
239 *(unsigned long*)dst = 0;
240
241#ifdef __LITTLE_ENDIAN__
242 if (copy_from_user(dst, src, count))
243 goto fetch_fault;
244
245 if ((count == 2) && dst[1] & 0x80) {
246 dst[2] = 0xff;
247 dst[3] = 0xff;
248 }
249#else
250 dst += 4-count;
251
252 if (copy_from_user(dst, src, count))
253 goto fetch_fault;
254
255 if ((count == 2) && dst[2] & 0x80) {
256 dst[0] = 0xff;
257 dst[1] = 0xff;
258 }
259#endif
260 ret = 0;
261 break;
262
263 case 8:
264 switch ((instruction&0xFF00)>>8) {
265 case 0x81: /* mov.w R0,@(disp,Rn) */
266 src = (unsigned char*) &regs->regs[0];
267#if !defined(__LITTLE_ENDIAN__)
268 src += 2;
269#endif
270 dst = (unsigned char*) *rm; /* called Rn in the spec */
271 dst += (instruction&0x000F)<<1;
272
273 if (copy_to_user(dst, src, 2))
274 goto fetch_fault;
275 ret = 0;
276 break;
277
278 case 0x85: /* mov.w @(disp,Rm),R0 */
279 src = (unsigned char*) *rm;
280 src += (instruction&0x000F)<<1;
281 dst = (unsigned char*) &regs->regs[0];
282 *(unsigned long*)dst = 0;
283
284#if !defined(__LITTLE_ENDIAN__)
285 dst += 2;
286#endif
287
288 if (copy_from_user(dst, src, 2))
289 goto fetch_fault;
290
291#ifdef __LITTLE_ENDIAN__
292 if (dst[1] & 0x80) {
293 dst[2] = 0xff;
294 dst[3] = 0xff;
295 }
296#else
297 if (dst[2] & 0x80) {
298 dst[0] = 0xff;
299 dst[1] = 0xff;
300 }
301#endif
302 ret = 0;
303 break;
304 }
305 break;
306 }
307 return ret;
308
309 fetch_fault:
310 /* Argh. Address not only misaligned but also non-existent.
311 * Raise an EFAULT and see if it's trapped
312 */
313 return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
314}
315
316/*
317 * emulate the instruction in the delay slot
318 * - fetches the instruction from PC+2
319 */
320static inline int handle_unaligned_delayslot(struct pt_regs *regs)
321{
322 u16 instruction;
323
324 if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
325 /* the instruction-fetch faulted */
326 if (user_mode(regs))
327 return -EFAULT;
328
329 /* kernel */
330 die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
331 }
332
333 return handle_unaligned_ins(instruction,regs);
334}
335
336/*
337 * handle an instruction that does an unaligned memory access
338 * - have to be careful of branch delay-slot instructions that fault
339 * SH3:
340 * - if the branch would be taken PC points to the branch
341 * - if the branch would not be taken, PC points to delay-slot
342 * SH4:
343 * - PC always points to delayed branch
344 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
345 */
346
347/* Macros to determine offset from current PC for branch instructions */
348/* Explicit type coercion is used to force sign extension where needed */
349#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
350#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
351
352static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
353{
354 u_int rm;
355 int ret, index;
356
357 index = (instruction>>8)&15; /* 0x0F00 */
358 rm = regs->regs[index];
359
360 /* shout about the first ten userspace fixups */
361 if (user_mode(regs) && handle_unaligned_notify_count>0) {
362 handle_unaligned_notify_count--;
363
364 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
365 current->comm,current->pid,(u16*)regs->pc,instruction);
366 }
367
368 ret = -EFAULT;
369 switch (instruction&0xF000) {
370 case 0x0000:
371 if (instruction==0x000B) {
372 /* rts */
373 ret = handle_unaligned_delayslot(regs);
374 if (ret==0)
375 regs->pc = regs->pr;
376 }
377 else if ((instruction&0x00FF)==0x0023) {
378 /* braf @Rm */
379 ret = handle_unaligned_delayslot(regs);
380 if (ret==0)
381 regs->pc += rm + 4;
382 }
383 else if ((instruction&0x00FF)==0x0003) {
384 /* bsrf @Rm */
385 ret = handle_unaligned_delayslot(regs);
386 if (ret==0) {
387 regs->pr = regs->pc + 4;
388 regs->pc += rm + 4;
389 }
390 }
391 else {
392 /* mov.[bwl] to/from memory via r0+rn */
393 goto simple;
394 }
395 break;
396
397 case 0x1000: /* mov.l Rm,@(disp,Rn) */
398 goto simple;
399
400 case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
401 goto simple;
402
403 case 0x4000:
404 if ((instruction&0x00FF)==0x002B) {
405 /* jmp @Rm */
406 ret = handle_unaligned_delayslot(regs);
407 if (ret==0)
408 regs->pc = rm;
409 }
410 else if ((instruction&0x00FF)==0x000B) {
411 /* jsr @Rm */
412 ret = handle_unaligned_delayslot(regs);
413 if (ret==0) {
414 regs->pr = regs->pc + 4;
415 regs->pc = rm;
416 }
417 }
418 else {
419 /* mov.[bwl] to/from memory via r0+rn */
420 goto simple;
421 }
422 break;
423
424 case 0x5000: /* mov.l @(disp,Rm),Rn */
425 goto simple;
426
427 case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
428 goto simple;
429
430 case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
431 switch (instruction&0x0F00) {
432 case 0x0100: /* mov.w R0,@(disp,Rm) */
433 goto simple;
434 case 0x0500: /* mov.w @(disp,Rm),R0 */
435 goto simple;
436 case 0x0B00: /* bf lab - no delayslot*/
437 break;
438 case 0x0F00: /* bf/s lab */
439 ret = handle_unaligned_delayslot(regs);
440 if (ret==0) {
441#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
442 if ((regs->sr & 0x00000001) != 0)
443 regs->pc += 4; /* next after slot */
444 else
445#endif
446 regs->pc += SH_PC_8BIT_OFFSET(instruction);
447 }
448 break;
449 case 0x0900: /* bt lab - no delayslot */
450 break;
451 case 0x0D00: /* bt/s lab */
452 ret = handle_unaligned_delayslot(regs);
453 if (ret==0) {
454#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
455 if ((regs->sr & 0x00000001) == 0)
456 regs->pc += 4; /* next after slot */
457 else
458#endif
459 regs->pc += SH_PC_8BIT_OFFSET(instruction);
460 }
461 break;
462 }
463 break;
464
465 case 0xA000: /* bra label */
466 ret = handle_unaligned_delayslot(regs);
467 if (ret==0)
468 regs->pc += SH_PC_12BIT_OFFSET(instruction);
469 break;
470
471 case 0xB000: /* bsr label */
472 ret = handle_unaligned_delayslot(regs);
473 if (ret==0) {
474 regs->pr = regs->pc + 4;
475 regs->pc += SH_PC_12BIT_OFFSET(instruction);
476 }
477 break;
478 }
479 return ret;
480
481 /* handle non-delay-slot instruction */
482 simple:
483 ret = handle_unaligned_ins(instruction,regs);
484 if (ret==0)
485 regs->pc += 2;
486 return ret;
487}
488
489/*
490 * Handle various address error exceptions
491 */
492asmlinkage void do_address_error(struct pt_regs *regs,
493 unsigned long writeaccess,
494 unsigned long address)
495{
496 unsigned long error_code;
497 mm_segment_t oldfs;
498 u16 instruction;
499 int tmp;
500
501 asm volatile("stc r2_bank,%0": "=r" (error_code));
502
503 oldfs = get_fs();
504
505 if (user_mode(regs)) {
506 local_irq_enable();
507 current->thread.error_code = error_code;
508 current->thread.trap_no = (writeaccess) ? 8 : 7;
509
510 /* bad PC is not something we can fix */
511 if (regs->pc & 1)
512 goto uspace_segv;
513
514 set_fs(USER_DS);
515 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
516 /* Argh. Fault on the instruction itself.
517 This should never happen non-SMP
518 */
519 set_fs(oldfs);
520 goto uspace_segv;
521 }
522
523 tmp = handle_unaligned_access(instruction, regs);
524 set_fs(oldfs);
525
526 if (tmp==0)
527 return; /* sorted */
528
529 uspace_segv:
530 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
531 force_sig(SIGSEGV, current);
532 } else {
533 if (regs->pc & 1)
534 die("unaligned program counter", regs, error_code);
535
536 set_fs(KERNEL_DS);
537 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
538 /* Argh. Fault on the instruction itself.
539 This should never happen non-SMP
540 */
541 set_fs(oldfs);
542 die("insn faulting in do_address_error", regs, 0);
543 }
544
545 handle_unaligned_access(instruction, regs);
546 set_fs(oldfs);
547 }
548}
549
550#ifdef CONFIG_SH_DSP
551/*
552 * SH-DSP support gerg@snapgear.com.
553 */
554int is_dsp_inst(struct pt_regs *regs)
555{
556 unsigned short inst;
557
558 /*
559 * Safe guard if DSP mode is already enabled or we're lacking
560 * the DSP altogether.
561 */
562 if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
563 return 0;
564
565 get_user(inst, ((unsigned short *) regs->pc));
566
567 inst &= 0xf000;
568
569 /* Check for any type of DSP or support instruction */
570 if ((inst == 0xf000) || (inst == 0x4000))
571 return 1;
572
573 return 0;
574}
575#else
576#define is_dsp_inst(regs) (0)
577#endif /* CONFIG_SH_DSP */
578
579DO_ERROR(TRAP_RESERVED_INST, SIGILL, "reserved instruction", reserved_inst, current)
580DO_ERROR(TRAP_ILLEGAL_SLOT_INST, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
581
582asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
583 unsigned long r6, unsigned long r7,
584 struct pt_regs regs)
585{
586 long ex;
587 asm volatile("stc r2_bank, %0" : "=r" (ex));
588 die_if_kernel("exception", &regs, ex);
589}
590
591#if defined(CONFIG_SH_STANDARD_BIOS)
592void *gdb_vbr_vector;
593
594static inline void __init gdb_vbr_init(void)
595{
596 register unsigned long vbr;
597
598 /*
599 * Read the old value of the VBR register to initialise
600 * the vector through which debug and BIOS traps are
601 * delegated by the Linux trap handler.
602 */
603 asm volatile("stc vbr, %0" : "=r" (vbr));
604
605 gdb_vbr_vector = (void *)(vbr + 0x100);
606 printk("Setting GDB trap vector to 0x%08lx\n",
607 (unsigned long)gdb_vbr_vector);
608}
609#endif
610
611void __init per_cpu_trap_init(void)
612{
613 extern void *vbr_base;
614
615#ifdef CONFIG_SH_STANDARD_BIOS
616 gdb_vbr_init();
617#endif
618
619 /* NOTE: The VBR value should be at P1
620 (or P2, virtural "fixed" address space).
621 It's definitely should not in physical address. */
622
623 asm volatile("ldc %0, vbr"
624 : /* no output */
625 : "r" (&vbr_base)
626 : "memory");
627}
628
629void __init trap_init(void)
630{
631 extern void *exception_handling_table[];
632
633 exception_handling_table[TRAP_RESERVED_INST]
634 = (void *)do_reserved_inst;
635 exception_handling_table[TRAP_ILLEGAL_SLOT_INST]
636 = (void *)do_illegal_slot_inst;
637
638#ifdef CONFIG_CPU_SH4
639 if (!(cpu_data->flags & CPU_HAS_FPU)) {
640 /* For SH-4 lacking an FPU, treat floating point instructions
641 as reserved. */
642 /* entry 64 corresponds to EXPEVT=0x800 */
643 exception_handling_table[64] = (void *)do_reserved_inst;
644 exception_handling_table[65] = (void *)do_illegal_slot_inst;
645 }
646#endif
647
648 /* Setup VBR for boot cpu */
649 per_cpu_trap_init();
650}
651
652void show_stack(struct task_struct *tsk, unsigned long *sp)
653{
654 unsigned long *stack, addr;
655 unsigned long module_start = VMALLOC_START;
656 unsigned long module_end = VMALLOC_END;
657 int i = 1;
658
659 if (tsk && !sp) {
660 sp = (unsigned long *)tsk->thread.sp;
661 }
662
663 if (!sp) {
664 __asm__ __volatile__ (
665 "mov r15, %0\n\t"
666 "stc r7_bank, %1\n\t"
667 : "=r" (module_start),
668 "=r" (module_end)
669 );
670
671 sp = (unsigned long *)module_start;
672 }
673
674 stack = sp;
675
676 printk("\nCall trace: ");
677#ifdef CONFIG_KALLSYMS
678 printk("\n");
679#endif
680
681 while (!kstack_end(stack)) {
682 addr = *stack++;
683 if (((addr >= (unsigned long)_text) &&
684 (addr <= (unsigned long)_etext)) ||
685 ((addr >= module_start) && (addr <= module_end))) {
686 /*
687 * For 80-columns display, 6 entry is maximum.
688 * NOTE: '[<8c00abcd>] ' consumes 13 columns .
689 */
690#ifndef CONFIG_KALLSYMS
691 if (i && ((i % 6) == 0))
692 printk("\n ");
693#endif
694 printk("[<%08lx>] ", addr);
695 print_symbol("%s\n", addr);
696 i++;
697 }
698 }
699
700 printk("\n");
701}
702
703void show_task(unsigned long *sp)
704{
705 show_stack(NULL, sp);
706}
707
708void dump_stack(void)
709{
710 show_stack(NULL, NULL);
711}
712EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..51bdc1cf7838
--- /dev/null
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -0,0 +1,155 @@
1/* $Id: vmlinux.lds.S,v 1.8 2003/05/16 17:18:14 lethal Exp $
2 * ld script to make SuperH Linux kernel
3 * Written by Niibe Yutaka
4 */
5#include <linux/config.h>
6#include <asm-generic/vmlinux.lds.h>
7
8#ifdef CONFIG_CPU_LITTLE_ENDIAN
9OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
10#else
11OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
12#endif
13OUTPUT_ARCH(sh)
14ENTRY(_start)
15SECTIONS
16{
17 . = 0x80000000 + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
18 _text = .; /* Text and read-only data */
19 text = .; /* Text and read-only data */
20 .empty_zero_page : {
21 *(.empty_zero_page)
22 } = 0
23 .text : {
24 *(.text)
25 SCHED_TEXT
26 LOCK_TEXT
27 *(.fixup)
28 *(.gnu.warning)
29 } = 0x0009
30
31 . = ALIGN(16); /* Exception table */
32 __start___ex_table = .;
33 __ex_table : { *(__ex_table) }
34 __stop___ex_table = .;
35
36 RODATA
37
38 _etext = .; /* End of text section */
39
40 .data : { /* Data */
41 *(.data)
42
43 /* Align the initial ramdisk image (INITRD) on page boundaries. */
44 . = ALIGN(4096);
45 __rd_start = .;
46 *(.initrd)
47 . = ALIGN(4096);
48 __rd_end = .;
49
50 CONSTRUCTORS
51 }
52
53 . = ALIGN(4096);
54 .data.page_aligned : { *(.data.idt) }
55
56 . = ALIGN(32);
57 __per_cpu_start = .;
58 .data.percpu : { *(.data.percpu) }
59 __per_cpu_end = .;
60 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
61
62 _edata = .; /* End of data section */
63
64 . = ALIGN(8192); /* init_task */
65 .data.init_task : { *(.data.init_task) }
66 /* stack */
67 .stack : { stack = .; _stack = .; }
68
69 . = ALIGN(4096); /* Init code and data */
70 __init_begin = .;
71 _sinittext = .;
72 .init.text : { *(.init.text) }
73 _einittext = .;
74 .init.data : { *(.init.data) }
75 . = ALIGN(16);
76 __setup_start = .;
77 .init.setup : { *(.init.setup) }
78 __setup_end = .;
79 __initcall_start = .;
80 .initcall.init : {
81 *(.initcall1.init)
82 *(.initcall2.init)
83 *(.initcall3.init)
84 *(.initcall4.init)
85 *(.initcall5.init)
86 *(.initcall6.init)
87 *(.initcall7.init)
88 }
89 __initcall_end = .;
90 __con_initcall_start = .;
91 .con_initcall.init : { *(.con_initcall.init) }
92 __con_initcall_end = .;
93 SECURITY_INIT
94 __initramfs_start = .;
95 .init.ramfs : { *(.init.ramfs) }
96 __initramfs_end = .;
97 __machvec_start = .;
98 .init.machvec : { *(.init.machvec) }
99 __machvec_end = .;
100 . = ALIGN(4096);
101 __init_end = .;
102
103 . = ALIGN(4);
104 __bss_start = .; /* BSS */
105 .bss : { *(.bss) }
106
107 . = ALIGN(4);
108 _end = . ;
109
110 /* When something in the kernel is NOT compiled as a module, the
111 * module cleanup code and data are put into these segments. Both
112 * can then be thrown away, as cleanup code is never called unless
113 * it's a module.
114 */
115 /DISCARD/ : {
116 *(.exit.text)
117 *(.exit.data)
118 *(.exitcall.exit)
119 }
120
121 /* Stabs debugging sections. */
122 .stab 0 : { *(.stab) }
123 .stabstr 0 : { *(.stabstr) }
124 .stab.excl 0 : { *(.stab.excl) }
125 .stab.exclstr 0 : { *(.stab.exclstr) }
126 .stab.index 0 : { *(.stab.index) }
127 .stab.indexstr 0 : { *(.stab.indexstr) }
128 .comment 0 : { *(.comment) }
129 /* DWARF debug sections.
130 Symbols in the DWARF debugging section are relative to the beginning
131 of the section so we begin .debug at 0. */
132 /* DWARF 1 */
133 .debug 0 : { *(.debug) }
134 .line 0 : { *(.line) }
135 /* GNU DWARF 1 extensions */
136 .debug_srcinfo 0 : { *(.debug_srcinfo) }
137 .debug_sfnames 0 : { *(.debug_sfnames) }
138 /* DWARF 1.1 and DWARF 2 */
139 .debug_aranges 0 : { *(.debug_aranges) }
140 .debug_pubnames 0 : { *(.debug_pubnames) }
141 /* DWARF 2 */
142 .debug_info 0 : { *(.debug_info) }
143 .debug_abbrev 0 : { *(.debug_abbrev) }
144 .debug_line 0 : { *(.debug_line) }
145 .debug_frame 0 : { *(.debug_frame) }
146 .debug_str 0 : { *(.debug_str) }
147 .debug_loc 0 : { *(.debug_loc) }
148 .debug_macinfo 0 : { *(.debug_macinfo) }
149 /* SGI/MIPS DWARF 2 extensions */
150 .debug_weaknames 0 : { *(.debug_weaknames) }
151 .debug_funcnames 0 : { *(.debug_funcnames) }
152 .debug_typenames 0 : { *(.debug_typenames) }
153 .debug_varnames 0 : { *(.debug_varnames) }
154 /* These must appear regardless of . */
155}