aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu')
-rw-r--r--arch/sh/kernel/cpu/Makefile16
-rw-r--r--arch/sh/kernel/cpu/adc.c36
-rw-r--r--arch/sh/kernel/cpu/bus.c195
-rw-r--r--arch/sh/kernel/cpu/init.c222
-rw-r--r--arch/sh/kernel/cpu/irq_imask.c116
-rw-r--r--arch/sh/kernel/cpu/irq_ipr.c339
-rw-r--r--arch/sh/kernel/cpu/rtc.c136
-rw-r--r--arch/sh/kernel/cpu/sh2/Makefile6
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c39
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile6
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S199
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c97
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile10
-rw-r--r--arch/sh/kernel/cpu/sh4/ex.S384
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c335
-rw-r--r--arch/sh/kernel/cpu/sh4/irq_intc2.c222
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c138
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c453
-rw-r--r--arch/sh/kernel/cpu/ubc.S59
19 files changed, 3008 insertions, 0 deletions
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
new file mode 100644
index 000000000000..cd43714df61a
--- /dev/null
+++ b/arch/sh/kernel/cpu/Makefile
@@ -0,0 +1,16 @@
1#
2# Makefile for the Linux/SuperH CPU-specifc backends.
3#
4
5obj-y := irq_ipr.o irq_imask.o init.o bus.o
6
7obj-$(CONFIG_CPU_SH2) += sh2/
8obj-$(CONFIG_CPU_SH3) += sh3/
9obj-$(CONFIG_CPU_SH4) += sh4/
10
11obj-$(CONFIG_SH_RTC) += rtc.o
12obj-$(CONFIG_UBC_WAKEUP) += ubc.o
13obj-$(CONFIG_SH_ADC) += adc.o
14
15USE_STANDARD_AS_RULE := true
16
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c
new file mode 100644
index 000000000000..da3d6877f93d
--- /dev/null
+++ b/arch/sh/kernel/cpu/adc.c
@@ -0,0 +1,36 @@
1/*
2 * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
3 *
4 * Copyright (C) 2004 Andriy Skulysh <askulysh@image.kiev.ua>
5 */
6
7#include <linux/module.h>
8#include <asm/adc.h>
9#include <asm/io.h>
10
11
12int adc_single(unsigned int channel)
13{
14 int off;
15 unsigned char csr;
16
17 if (channel >= 8) return -1;
18
19 off = (channel & 0x03) << 2;
20
21 csr = ctrl_inb(ADCSR);
22 csr = channel | ADCSR_ADST | ADCSR_CKS;
23 ctrl_outb(csr, ADCSR);
24
25 do {
26 csr = ctrl_inb(ADCSR);
27 } while ((csr & ADCSR_ADF) == 0);
28
29 csr &= ~(ADCSR_ADF | ADCSR_ADST);
30 ctrl_outb(csr, ADCSR);
31
32 return (((ctrl_inb(ADDRAH + off) << 8) |
33 ctrl_inb(ADDRAL + off)) >> 6);
34}
35
36EXPORT_SYMBOL(adc_single);
diff --git a/arch/sh/kernel/cpu/bus.c b/arch/sh/kernel/cpu/bus.c
new file mode 100644
index 000000000000..ace82f4b4a59
--- /dev/null
+++ b/arch/sh/kernel/cpu/bus.c
@@ -0,0 +1,195 @@
1/*
2 * arch/sh/kernel/cpu/bus.c
3 *
4 * Virtual bus for SuperH.
5 *
6 * Copyright (C) 2004 Paul Mundt
7 *
8 * Shamelessly cloned from arch/arm/mach-omap/bus.c, which was written
9 * by:
10 *
11 * Copyright (C) 2003 - 2004 Nokia Corporation
12 * Written by Tony Lindgren <tony@atomide.com>
13 * Portions of code based on sa1111.c.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20#include <linux/kernel.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <asm/bus-sh.h>
25
26static int sh_bus_match(struct device *dev, struct device_driver *drv)
27{
28 struct sh_driver *shdrv = to_sh_driver(drv);
29 struct sh_dev *shdev = to_sh_dev(dev);
30
31 return shdev->dev_id == shdrv->dev_id;
32}
33
34static int sh_bus_suspend(struct device *dev, u32 state)
35{
36 struct sh_dev *shdev = to_sh_dev(dev);
37 struct sh_driver *shdrv = to_sh_driver(dev->driver);
38
39 if (shdrv && shdrv->suspend)
40 return shdrv->suspend(shdev, state);
41
42 return 0;
43}
44
45static int sh_bus_resume(struct device *dev)
46{
47 struct sh_dev *shdev = to_sh_dev(dev);
48 struct sh_driver *shdrv = to_sh_driver(dev->driver);
49
50 if (shdrv && shdrv->resume)
51 return shdrv->resume(shdev);
52
53 return 0;
54}
55
56static struct device sh_bus_devices[SH_NR_BUSES] = {
57 {
58 .bus_id = SH_BUS_NAME_VIRT,
59 },
60};
61
62struct bus_type sh_bus_types[SH_NR_BUSES] = {
63 {
64 .name = SH_BUS_NAME_VIRT,
65 .match = sh_bus_match,
66 .suspend = sh_bus_suspend,
67 .resume = sh_bus_resume,
68 },
69};
70
71static int sh_device_probe(struct device *dev)
72{
73 struct sh_dev *shdev = to_sh_dev(dev);
74 struct sh_driver *shdrv = to_sh_driver(dev->driver);
75
76 if (shdrv && shdrv->probe)
77 return shdrv->probe(shdev);
78
79 return -ENODEV;
80}
81
82static int sh_device_remove(struct device *dev)
83{
84 struct sh_dev *shdev = to_sh_dev(dev);
85 struct sh_driver *shdrv = to_sh_driver(dev->driver);
86
87 if (shdrv && shdrv->remove)
88 return shdrv->remove(shdev);
89
90 return 0;
91}
92
93int sh_device_register(struct sh_dev *dev)
94{
95 if (!dev)
96 return -EINVAL;
97
98 if (dev->bus_id < 0 || dev->bus_id >= SH_NR_BUSES) {
99 printk(KERN_ERR "%s: bus_id invalid: %s bus: %d\n",
100 __FUNCTION__, dev->name, dev->bus_id);
101 return -EINVAL;
102 }
103
104 dev->dev.parent = &sh_bus_devices[dev->bus_id];
105 dev->dev.bus = &sh_bus_types[dev->bus_id];
106
107 /* This is needed for USB OHCI to work */
108 if (dev->dma_mask)
109 dev->dev.dma_mask = dev->dma_mask;
110
111 snprintf(dev->dev.bus_id, BUS_ID_SIZE, "%s%u",
112 dev->name, dev->dev_id);
113
114 printk(KERN_INFO "Registering SH device '%s'. Parent at %s\n",
115 dev->dev.bus_id, dev->dev.parent->bus_id);
116
117 return device_register(&dev->dev);
118}
119
120void sh_device_unregister(struct sh_dev *dev)
121{
122 device_unregister(&dev->dev);
123}
124
125int sh_driver_register(struct sh_driver *drv)
126{
127 if (!drv)
128 return -EINVAL;
129
130 if (drv->bus_id < 0 || drv->bus_id >= SH_NR_BUSES) {
131 printk(KERN_ERR "%s: bus_id invalid: bus: %d device %d\n",
132 __FUNCTION__, drv->bus_id, drv->dev_id);
133 return -EINVAL;
134 }
135
136 drv->drv.probe = sh_device_probe;
137 drv->drv.remove = sh_device_remove;
138 drv->drv.bus = &sh_bus_types[drv->bus_id];
139
140 return driver_register(&drv->drv);
141}
142
143void sh_driver_unregister(struct sh_driver *drv)
144{
145 driver_unregister(&drv->drv);
146}
147
148static int __init sh_bus_init(void)
149{
150 int i, ret = 0;
151
152 for (i = 0; i < SH_NR_BUSES; i++) {
153 ret = device_register(&sh_bus_devices[i]);
154 if (ret != 0) {
155 printk(KERN_ERR "Unable to register bus device %s\n",
156 sh_bus_devices[i].bus_id);
157 continue;
158 }
159
160 ret = bus_register(&sh_bus_types[i]);
161 if (ret != 0) {
162 printk(KERN_ERR "Unable to register bus %s\n",
163 sh_bus_types[i].name);
164 device_unregister(&sh_bus_devices[i]);
165 }
166 }
167
168 printk(KERN_INFO "SH Virtual Bus initialized\n");
169
170 return ret;
171}
172
173static void __exit sh_bus_exit(void)
174{
175 int i;
176
177 for (i = 0; i < SH_NR_BUSES; i++) {
178 bus_unregister(&sh_bus_types[i]);
179 device_unregister(&sh_bus_devices[i]);
180 }
181}
182
183module_init(sh_bus_init);
184module_exit(sh_bus_exit);
185
186MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
187MODULE_DESCRIPTION("SH Virtual Bus");
188MODULE_LICENSE("GPL");
189
190EXPORT_SYMBOL(sh_bus_types);
191EXPORT_SYMBOL(sh_device_register);
192EXPORT_SYMBOL(sh_device_unregister);
193EXPORT_SYMBOL(sh_driver_register);
194EXPORT_SYMBOL(sh_driver_unregister);
195
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
new file mode 100644
index 000000000000..cf94e8ef17c5
--- /dev/null
+++ b/arch/sh/kernel/cpu/init.c
@@ -0,0 +1,222 @@
1/*
2 * arch/sh/kernel/cpu/init.c
3 *
4 * CPU init code
5 *
6 * Copyright (C) 2002, 2003 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <asm/processor.h>
15#include <asm/uaccess.h>
16#include <asm/system.h>
17#include <asm/cacheflush.h>
18#include <asm/cache.h>
19#include <asm/io.h>
20
21extern void detect_cpu_and_cache_system(void);
22
23/*
24 * Generic wrapper for command line arguments to disable on-chip
25 * peripherals (nofpu, nodsp, and so forth).
26 */
27#define onchip_setup(x) \
28static int x##_disabled __initdata = 0; \
29 \
30static int __init x##_setup(char *opts) \
31{ \
32 x##_disabled = 1; \
33 return 0; \
34} \
35__setup("no" __stringify(x), x##_setup);
36
37onchip_setup(fpu);
38onchip_setup(dsp);
39
40/*
41 * Generic first-level cache init
42 */
43static void __init cache_init(void)
44{
45 unsigned long ccr, flags;
46
47 if (cpu_data->type == CPU_SH_NONE)
48 panic("Unknown CPU");
49
50 jump_to_P2();
51 ccr = ctrl_inl(CCR);
52
53 /*
54 * If the cache is already enabled .. flush it.
55 */
56 if (ccr & CCR_CACHE_ENABLE) {
57 unsigned long ways, waysize, addrstart;
58
59 waysize = cpu_data->dcache.sets;
60
61 /*
62 * If the OC is already in RAM mode, we only have
63 * half of the entries to flush..
64 */
65 if (ccr & CCR_CACHE_ORA)
66 waysize >>= 1;
67
68 waysize <<= cpu_data->dcache.entry_shift;
69
70#ifdef CCR_CACHE_EMODE
71 /* If EMODE is not set, we only have 1 way to flush. */
72 if (!(ccr & CCR_CACHE_EMODE))
73 ways = 1;
74 else
75#endif
76 ways = cpu_data->dcache.ways;
77
78 addrstart = CACHE_OC_ADDRESS_ARRAY;
79 do {
80 unsigned long addr;
81
82 for (addr = addrstart;
83 addr < addrstart + waysize;
84 addr += cpu_data->dcache.linesz)
85 ctrl_outl(0, addr);
86
87 addrstart += cpu_data->dcache.way_incr;
88 } while (--ways);
89 }
90
91 /*
92 * Default CCR values .. enable the caches
93 * and invalidate them immediately..
94 */
95 flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
96
97#ifdef CCR_CACHE_EMODE
98 /* Force EMODE if possible */
99 if (cpu_data->dcache.ways > 1)
100 flags |= CCR_CACHE_EMODE;
101#endif
102
103#ifdef CONFIG_SH_WRITETHROUGH
104 /* Turn on Write-through caching */
105 flags |= CCR_CACHE_WT;
106#else
107 /* .. or default to Write-back */
108 flags |= CCR_CACHE_CB;
109#endif
110
111#ifdef CONFIG_SH_OCRAM
112 /* Turn on OCRAM -- halve the OC */
113 flags |= CCR_CACHE_ORA;
114 cpu_data->dcache.sets >>= 1;
115#endif
116
117 ctrl_outl(flags, CCR);
118 back_to_P1();
119}
120
121#ifdef CONFIG_SH_DSP
122static void __init release_dsp(void)
123{
124 unsigned long sr;
125
126 /* Clear SR.DSP bit */
127 __asm__ __volatile__ (
128 "stc\tsr, %0\n\t"
129 "and\t%1, %0\n\t"
130 "ldc\t%0, sr\n\t"
131 : "=&r" (sr)
132 : "r" (~SR_DSP)
133 );
134}
135
136static void __init dsp_init(void)
137{
138 unsigned long sr;
139
140 /*
141 * Set the SR.DSP bit, wait for one instruction, and then read
142 * back the SR value.
143 */
144 __asm__ __volatile__ (
145 "stc\tsr, %0\n\t"
146 "or\t%1, %0\n\t"
147 "ldc\t%0, sr\n\t"
148 "nop\n\t"
149 "stc\tsr, %0\n\t"
150 : "=&r" (sr)
151 : "r" (SR_DSP)
152 );
153
154 /* If the DSP bit is still set, this CPU has a DSP */
155 if (sr & SR_DSP)
156 cpu_data->flags |= CPU_HAS_DSP;
157
158 /* Now that we've determined the DSP status, clear the DSP bit. */
159 release_dsp();
160}
161#endif /* CONFIG_SH_DSP */
162
163/**
164 * sh_cpu_init
165 *
166 * This is our initial entry point for each CPU, and is invoked on the boot
167 * CPU prior to calling start_kernel(). For SMP, a combination of this and
168 * start_secondary() will bring up each processor to a ready state prior
169 * to hand forking the idle loop.
170 *
171 * We do all of the basic processor init here, including setting up the
172 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
173 * hit (and subsequently platform_setup()) things like determining the
174 * CPU subtype and initial configuration will all be done.
175 *
176 * Each processor family is still responsible for doing its own probing
177 * and cache configuration in detect_cpu_and_cache_system().
178 */
179asmlinkage void __init sh_cpu_init(void)
180{
181 /* First, probe the CPU */
182 detect_cpu_and_cache_system();
183
184 /* Init the cache */
185 cache_init();
186
187 /* Disable the FPU */
188 if (fpu_disabled) {
189 printk("FPU Disabled\n");
190 cpu_data->flags &= ~CPU_HAS_FPU;
191 disable_fpu();
192 }
193
194 /* FPU initialization */
195 if ((cpu_data->flags & CPU_HAS_FPU)) {
196 clear_thread_flag(TIF_USEDFPU);
197 clear_used_math();
198 }
199
200#ifdef CONFIG_SH_DSP
201 /* Probe for DSP */
202 dsp_init();
203
204 /* Disable the DSP */
205 if (dsp_disabled) {
206 printk("DSP Disabled\n");
207 cpu_data->flags &= ~CPU_HAS_DSP;
208 release_dsp();
209 }
210#endif
211
212#ifdef CONFIG_UBC_WAKEUP
213 /*
214 * Some brain-damaged loaders decided it would be a good idea to put
215 * the UBC to sleep. This causes some issues when it comes to things
216 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
217 * we wake it up and hope that all is well.
218 */
219 ubc_wakeup();
220#endif
221}
222
diff --git a/arch/sh/kernel/cpu/irq_imask.c b/arch/sh/kernel/cpu/irq_imask.c
new file mode 100644
index 000000000000..f76901e732fb
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq_imask.c
@@ -0,0 +1,116 @@
1/* $Id: irq_imask.c,v 1.1.2.1 2002/11/17 10:53:43 mrbrown Exp $
2 *
3 * linux/arch/sh/kernel/irq_imask.c
4 *
5 * Copyright (C) 1999, 2000 Niibe Yutaka
6 *
7 * Simple interrupt handling using IMASK of SR register.
8 *
9 */
10
11/* NOTE: Will not work on level 15 */
12
13
14#include <linux/ptrace.h>
15#include <linux/errno.h>
16#include <linux/kernel_stat.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/interrupt.h>
20#include <linux/init.h>
21#include <linux/bitops.h>
22
23#include <asm/system.h>
24#include <asm/irq.h>
25
26#include <linux/spinlock.h>
27#include <linux/cache.h>
28#include <linux/irq.h>
29
30/* Bitmap of IRQ masked */
31static unsigned long imask_mask = 0x7fff;
32static int interrupt_priority = 0;
33
34static void enable_imask_irq(unsigned int irq);
35static void disable_imask_irq(unsigned int irq);
36static void shutdown_imask_irq(unsigned int irq);
37static void mask_and_ack_imask(unsigned int);
38static void end_imask_irq(unsigned int irq);
39
40#define IMASK_PRIORITY 15
41
42static unsigned int startup_imask_irq(unsigned int irq)
43{
44 /* Nothing to do */
45 return 0; /* never anything pending */
46}
47
48static struct hw_interrupt_type imask_irq_type = {
49 "SR.IMASK",
50 startup_imask_irq,
51 shutdown_imask_irq,
52 enable_imask_irq,
53 disable_imask_irq,
54 mask_and_ack_imask,
55 end_imask_irq
56};
57
58void static inline set_interrupt_registers(int ip)
59{
60 unsigned long __dummy;
61
62 asm volatile("ldc %2, r6_bank\n\t"
63 "stc sr, %0\n\t"
64 "and #0xf0, %0\n\t"
65 "shlr2 %0\n\t"
66 "cmp/eq #0x3c, %0\n\t"
67 "bt/s 1f ! CLI-ed\n\t"
68 " stc sr, %0\n\t"
69 "and %1, %0\n\t"
70 "or %2, %0\n\t"
71 "ldc %0, sr\n"
72 "1:"
73 : "=&z" (__dummy)
74 : "r" (~0xf0), "r" (ip << 4)
75 : "t");
76}
77
78static void disable_imask_irq(unsigned int irq)
79{
80 clear_bit(irq, &imask_mask);
81 if (interrupt_priority < IMASK_PRIORITY - irq)
82 interrupt_priority = IMASK_PRIORITY - irq;
83
84 set_interrupt_registers(interrupt_priority);
85}
86
87static void enable_imask_irq(unsigned int irq)
88{
89 set_bit(irq, &imask_mask);
90 interrupt_priority = IMASK_PRIORITY - ffz(imask_mask);
91
92 set_interrupt_registers(interrupt_priority);
93}
94
95static void mask_and_ack_imask(unsigned int irq)
96{
97 disable_imask_irq(irq);
98}
99
100static void end_imask_irq(unsigned int irq)
101{
102 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
103 enable_imask_irq(irq);
104}
105
106static void shutdown_imask_irq(unsigned int irq)
107{
108 /* Nothing to do */
109}
110
111void make_imask_irq(unsigned int irq)
112{
113 disable_irq_nosync(irq);
114 irq_desc[irq].handler = &imask_irq_type;
115 enable_irq(irq);
116}
diff --git a/arch/sh/kernel/cpu/irq_ipr.c b/arch/sh/kernel/cpu/irq_ipr.c
new file mode 100644
index 000000000000..7ea3d2d030e5
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq_ipr.c
@@ -0,0 +1,339 @@
1/* $Id: irq_ipr.c,v 1.1.2.1 2002/11/17 10:53:43 mrbrown Exp $
2 *
3 * linux/arch/sh/kernel/irq_ipr.c
4 *
5 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
6 * Copyright (C) 2000 Kazumoto Kojima
7 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
8 *
9 * Interrupt handling for IPR-based IRQ.
10 *
11 * Supported system:
12 * On-chip supporting modules (TMU, RTC, etc.).
13 * On-chip supporting modules for SH7709/SH7709A/SH7729/SH7300.
14 * Hitachi SolutionEngine external I/O:
15 * MS7709SE01, MS7709ASE01, and MS7750SE01
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/init.h>
21#include <linux/irq.h>
22#include <linux/module.h>
23
24#include <asm/system.h>
25#include <asm/io.h>
26#include <asm/machvec.h>
27
28struct ipr_data {
29 unsigned int addr; /* Address of Interrupt Priority Register */
30 int shift; /* Shifts of the 16-bit data */
31 int priority; /* The priority */
32};
33static struct ipr_data ipr_data[NR_IRQS];
34
35static void enable_ipr_irq(unsigned int irq);
36static void disable_ipr_irq(unsigned int irq);
37
38/* shutdown is same as "disable" */
39#define shutdown_ipr_irq disable_ipr_irq
40
41static void mask_and_ack_ipr(unsigned int);
42static void end_ipr_irq(unsigned int irq);
43
44static unsigned int startup_ipr_irq(unsigned int irq)
45{
46 enable_ipr_irq(irq);
47 return 0; /* never anything pending */
48}
49
50static struct hw_interrupt_type ipr_irq_type = {
51 "IPR-IRQ",
52 startup_ipr_irq,
53 shutdown_ipr_irq,
54 enable_ipr_irq,
55 disable_ipr_irq,
56 mask_and_ack_ipr,
57 end_ipr_irq
58};
59
60static void disable_ipr_irq(unsigned int irq)
61{
62 unsigned long val, flags;
63 unsigned int addr = ipr_data[irq].addr;
64 unsigned short mask = 0xffff ^ (0x0f << ipr_data[irq].shift);
65
66 /* Set the priority in IPR to 0 */
67 local_irq_save(flags);
68 val = ctrl_inw(addr);
69 val &= mask;
70 ctrl_outw(val, addr);
71 local_irq_restore(flags);
72}
73
74static void enable_ipr_irq(unsigned int irq)
75{
76 unsigned long val, flags;
77 unsigned int addr = ipr_data[irq].addr;
78 int priority = ipr_data[irq].priority;
79 unsigned short value = (priority << ipr_data[irq].shift);
80
81 /* Set priority in IPR back to original value */
82 local_irq_save(flags);
83 val = ctrl_inw(addr);
84 val |= value;
85 ctrl_outw(val, addr);
86 local_irq_restore(flags);
87}
88
89static void mask_and_ack_ipr(unsigned int irq)
90{
91 disable_ipr_irq(irq);
92
93#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
94 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
95 /* This is needed when we use edge triggered setting */
96 /* XXX: Is it really needed? */
97 if (IRQ0_IRQ <= irq && irq <= IRQ5_IRQ) {
98 /* Clear external interrupt request */
99 int a = ctrl_inb(INTC_IRR0);
100 a &= ~(1 << (irq - IRQ0_IRQ));
101 ctrl_outb(a, INTC_IRR0);
102 }
103#endif
104}
105
106static void end_ipr_irq(unsigned int irq)
107{
108 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
109 enable_ipr_irq(irq);
110}
111
112void make_ipr_irq(unsigned int irq, unsigned int addr, int pos, int priority)
113{
114 disable_irq_nosync(irq);
115 ipr_data[irq].addr = addr;
116 ipr_data[irq].shift = pos*4; /* POSition (0-3) x 4 means shift */
117 ipr_data[irq].priority = priority;
118
119 irq_desc[irq].handler = &ipr_irq_type;
120 disable_ipr_irq(irq);
121}
122
123#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
124 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
125 defined(CONFIG_CPU_SUBTYPE_SH7709)
126static unsigned char pint_map[256];
127static unsigned long portcr_mask = 0;
128
129static void enable_pint_irq(unsigned int irq);
130static void disable_pint_irq(unsigned int irq);
131
132/* shutdown is same as "disable" */
133#define shutdown_pint_irq disable_pint_irq
134
135static void mask_and_ack_pint(unsigned int);
136static void end_pint_irq(unsigned int irq);
137
138static unsigned int startup_pint_irq(unsigned int irq)
139{
140 enable_pint_irq(irq);
141 return 0; /* never anything pending */
142}
143
144static struct hw_interrupt_type pint_irq_type = {
145 "PINT-IRQ",
146 startup_pint_irq,
147 shutdown_pint_irq,
148 enable_pint_irq,
149 disable_pint_irq,
150 mask_and_ack_pint,
151 end_pint_irq
152};
153
154static void disable_pint_irq(unsigned int irq)
155{
156 unsigned long val, flags;
157
158 local_irq_save(flags);
159 val = ctrl_inw(INTC_INTER);
160 val &= ~(1 << (irq - PINT_IRQ_BASE));
161 ctrl_outw(val, INTC_INTER); /* disable PINTn */
162 portcr_mask &= ~(3 << (irq - PINT_IRQ_BASE)*2);
163 local_irq_restore(flags);
164}
165
166static void enable_pint_irq(unsigned int irq)
167{
168 unsigned long val, flags;
169
170 local_irq_save(flags);
171 val = ctrl_inw(INTC_INTER);
172 val |= 1 << (irq - PINT_IRQ_BASE);
173 ctrl_outw(val, INTC_INTER); /* enable PINTn */
174 portcr_mask |= 3 << (irq - PINT_IRQ_BASE)*2;
175 local_irq_restore(flags);
176}
177
178static void mask_and_ack_pint(unsigned int irq)
179{
180 disable_pint_irq(irq);
181}
182
183static void end_pint_irq(unsigned int irq)
184{
185 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
186 enable_pint_irq(irq);
187}
188
189void make_pint_irq(unsigned int irq)
190{
191 disable_irq_nosync(irq);
192 irq_desc[irq].handler = &pint_irq_type;
193 disable_pint_irq(irq);
194}
195#endif
196
197void __init init_IRQ(void)
198{
199#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
200 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
201 defined(CONFIG_CPU_SUBTYPE_SH7709)
202 int i;
203#endif
204
205 make_ipr_irq(TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY);
206 make_ipr_irq(TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY);
207#if defined(CONFIG_SH_RTC)
208 make_ipr_irq(RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY);
209#endif
210
211#ifdef SCI_ERI_IRQ
212 make_ipr_irq(SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
213 make_ipr_irq(SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
214 make_ipr_irq(SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY);
215#endif
216
217#ifdef SCIF1_ERI_IRQ
218 make_ipr_irq(SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
219 make_ipr_irq(SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
220 make_ipr_irq(SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
221 make_ipr_irq(SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY);
222#endif
223
224#if defined(CONFIG_CPU_SUBTYPE_SH7300)
225 make_ipr_irq(SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY);
226 make_ipr_irq(DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY);
227 make_ipr_irq(DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY);
228 make_ipr_irq(VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY);
229#endif
230
231#ifdef SCIF_ERI_IRQ
232 make_ipr_irq(SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
233 make_ipr_irq(SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
234 make_ipr_irq(SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
235 make_ipr_irq(SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY);
236#endif
237
238#ifdef IRDA_ERI_IRQ
239 make_ipr_irq(IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
240 make_ipr_irq(IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
241 make_ipr_irq(IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
242 make_ipr_irq(IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY);
243#endif
244
245#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
246 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
247 /*
248 * Initialize the Interrupt Controller (INTC)
249 * registers to their power on values
250 */
251
252 /*
253 * Enable external irq (INTC IRQ mode).
254 * You should set corresponding bits of PFC to "00"
255 * to enable these interrupts.
256 */
257 make_ipr_irq(IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY);
258 make_ipr_irq(IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY);
259 make_ipr_irq(IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY);
260 make_ipr_irq(IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY);
261 make_ipr_irq(IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY);
262 make_ipr_irq(IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY);
263#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
264 make_ipr_irq(PINT0_IRQ, PINT0_IPR_ADDR, PINT0_IPR_POS, PINT0_PRIORITY);
265 make_ipr_irq(PINT8_IRQ, PINT8_IPR_ADDR, PINT8_IPR_POS, PINT8_PRIORITY);
266 enable_ipr_irq(PINT0_IRQ);
267 enable_ipr_irq(PINT8_IRQ);
268
269 for(i = 0; i < 16; i++)
270 make_pint_irq(PINT_IRQ_BASE + i);
271 for(i = 0; i < 256; i++)
272 {
273 if(i & 1) pint_map[i] = 0;
274 else if(i & 2) pint_map[i] = 1;
275 else if(i & 4) pint_map[i] = 2;
276 else if(i & 8) pint_map[i] = 3;
277 else if(i & 0x10) pint_map[i] = 4;
278 else if(i & 0x20) pint_map[i] = 5;
279 else if(i & 0x40) pint_map[i] = 6;
280 else if(i & 0x80) pint_map[i] = 7;
281 }
282#endif /* !CONFIG_CPU_SUBTYPE_SH7300 */
283#endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 || CONFIG_CPU_SUBTYPE_SH7300*/
284
285#ifdef CONFIG_CPU_SUBTYPE_ST40
286 init_IRQ_intc2();
287#endif
288
289 /* Perform the machine specific initialisation */
290 if (sh_mv.mv_init_irq != NULL) {
291 sh_mv.mv_init_irq();
292 }
293}
294#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
295 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
296int ipr_irq_demux(int irq)
297{
298#if !defined(CONFIG_CPU_SUBTYPE_SH7300)
299 unsigned long creg, dreg, d, sav;
300
301 if(irq == PINT0_IRQ)
302 {
303#if defined(CONFIG_CPU_SUBTYPE_SH7707)
304 creg = PORT_PACR;
305 dreg = PORT_PADR;
306#else
307 creg = PORT_PCCR;
308 dreg = PORT_PCDR;
309#endif
310 sav = ctrl_inw(creg);
311 ctrl_outw(sav | portcr_mask, creg);
312 d = (~ctrl_inb(dreg) ^ ctrl_inw(INTC_ICR2)) & ctrl_inw(INTC_INTER) & 0xff;
313 ctrl_outw(sav, creg);
314 if(d == 0) return irq;
315 return PINT_IRQ_BASE + pint_map[d];
316 }
317 else if(irq == PINT8_IRQ)
318 {
319#if defined(CONFIG_CPU_SUBTYPE_SH7707)
320 creg = PORT_PBCR;
321 dreg = PORT_PBDR;
322#else
323 creg = PORT_PFCR;
324 dreg = PORT_PFDR;
325#endif
326 sav = ctrl_inw(creg);
327 ctrl_outw(sav | (portcr_mask >> 16), creg);
328 d = (~ctrl_inb(dreg) ^ (ctrl_inw(INTC_ICR2) >> 8)) & (ctrl_inw(INTC_INTER) >> 8) & 0xff;
329 ctrl_outw(sav, creg);
330 if(d == 0) return irq;
331 return PINT_IRQ_BASE + 8 + pint_map[d];
332 }
333#endif
334 return irq;
335}
336#endif
337
338EXPORT_SYMBOL(make_ipr_irq);
339
diff --git a/arch/sh/kernel/cpu/rtc.c b/arch/sh/kernel/cpu/rtc.c
new file mode 100644
index 000000000000..f8361f5e788b
--- /dev/null
+++ b/arch/sh/kernel/cpu/rtc.c
@@ -0,0 +1,136 @@
1/*
2 * linux/arch/sh/kernel/rtc.c -- SH3 / SH4 on-chip RTC support
3 *
4 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
5 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
6 */
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/time.h>
12
13#include <asm/io.h>
14#include <asm/rtc.h>
15
16#ifndef BCD_TO_BIN
17#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
18#endif
19
20#ifndef BIN_TO_BCD
21#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
22#endif
23
24void sh_rtc_gettimeofday(struct timespec *ts)
25{
26 unsigned int sec128, sec, sec2, min, hr, wk, day, mon, yr, yr100, cf_bit;
27 unsigned long flags;
28
29 again:
30 do {
31 local_irq_save(flags);
32 ctrl_outb(0, RCR1); /* Clear CF-bit */
33 sec128 = ctrl_inb(R64CNT);
34 sec = ctrl_inb(RSECCNT);
35 min = ctrl_inb(RMINCNT);
36 hr = ctrl_inb(RHRCNT);
37 wk = ctrl_inb(RWKCNT);
38 day = ctrl_inb(RDAYCNT);
39 mon = ctrl_inb(RMONCNT);
40#if defined(CONFIG_CPU_SH4)
41 yr = ctrl_inw(RYRCNT);
42 yr100 = (yr >> 8);
43 yr &= 0xff;
44#else
45 yr = ctrl_inb(RYRCNT);
46 yr100 = (yr == 0x99) ? 0x19 : 0x20;
47#endif
48 sec2 = ctrl_inb(R64CNT);
49 cf_bit = ctrl_inb(RCR1) & RCR1_CF;
50 local_irq_restore(flags);
51 } while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0);
52
53 BCD_TO_BIN(yr100);
54 BCD_TO_BIN(yr);
55 BCD_TO_BIN(mon);
56 BCD_TO_BIN(day);
57 BCD_TO_BIN(hr);
58 BCD_TO_BIN(min);
59 BCD_TO_BIN(sec);
60
61 if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
62 hr > 23 || min > 59 || sec > 59) {
63 printk(KERN_ERR
64 "SH RTC: invalid value, resetting to 1 Jan 2000\n");
65 local_irq_save(flags);
66 ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
67 ctrl_outb(0, RSECCNT);
68 ctrl_outb(0, RMINCNT);
69 ctrl_outb(0, RHRCNT);
70 ctrl_outb(6, RWKCNT);
71 ctrl_outb(1, RDAYCNT);
72 ctrl_outb(1, RMONCNT);
73#if defined(CONFIG_CPU_SH4)
74 ctrl_outw(0x2000, RYRCNT);
75#else
76 ctrl_outb(0, RYRCNT);
77#endif
78 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
79 goto again;
80 }
81
82#if RTC_BIT_INVERTED != 0
83 if ((sec128 & RTC_BIT_INVERTED))
84 sec--;
85#endif
86
87 ts->tv_sec = mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
88 ts->tv_nsec = ((sec128 * 1000000) / 128) * 1000;
89}
90
91/*
92 * Changed to only care about tv_sec, and not the full timespec struct
93 * (i.e. tv_nsec). It can easily be switched to timespec for future cpus
94 * that support setting usec or nsec RTC values.
95 */
96int sh_rtc_settimeofday(const time_t secs)
97{
98 int retval = 0;
99 int real_seconds, real_minutes, cmos_minutes;
100 unsigned long flags;
101
102 local_irq_save(flags);
103 ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
104
105 cmos_minutes = ctrl_inb(RMINCNT);
106 BCD_TO_BIN(cmos_minutes);
107
108 /*
109 * since we're only adjusting minutes and seconds,
110 * don't interfere with hour overflow. This avoids
111 * messing with unknown time zones but requires your
112 * RTC not to be off by more than 15 minutes
113 */
114 real_seconds = secs % 60;
115 real_minutes = secs / 60;
116 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
117 real_minutes += 30; /* correct for half hour time zone */
118 real_minutes %= 60;
119
120 if (abs(real_minutes - cmos_minutes) < 30) {
121 BIN_TO_BCD(real_seconds);
122 BIN_TO_BCD(real_minutes);
123 ctrl_outb(real_seconds, RSECCNT);
124 ctrl_outb(real_minutes, RMINCNT);
125 } else {
126 printk(KERN_WARNING
127 "set_rtc_time: can't update from %d to %d\n",
128 cmos_minutes, real_minutes);
129 retval = -1;
130 }
131
132 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
133 local_irq_restore(flags);
134
135 return retval;
136}
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile
new file mode 100644
index 000000000000..389353fba608
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Linux/SuperH SH-2 backends.
3#
4
5obj-y := probe.o
6
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
new file mode 100644
index 000000000000..f17a2a0d588e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -0,0 +1,39 @@
1/*
2 * arch/sh/kernel/cpu/sh2/probe.c
3 *
4 * CPU Subtype Probing for SH-2.
5 *
6 * Copyright (C) 2002 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13
14#include <linux/init.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17
18int __init detect_cpu_and_cache_system(void)
19{
20 /*
21 * For now, assume SH7604 .. fix this later.
22 */
23 cpu_data->type = CPU_SH7604;
24 cpu_data->dcache.ways = 4;
25 cpu_data->dcache.way_shift = 6;
26 cpu_data->dcache.sets = 64;
27 cpu_data->dcache.entry_shift = 4;
28 cpu_data->dcache.linesz = L1_CACHE_BYTES;
29 cpu_data->dcache.flags = 0;
30
31 /*
32 * SH-2 doesn't have separate caches
33 */
34 cpu_data->dcache.flags |= SH_CACHE_COMBINED;
35 cpu_data->icache = cpu_data->dcache;
36
37 return 0;
38}
39
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
new file mode 100644
index 000000000000..a64532e4dc63
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Linux/SuperH SH-3 backends.
3#
4
5obj-y := ex.o probe.o
6
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
new file mode 100644
index 000000000000..966c0858b714
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -0,0 +1,199 @@
1/*
2 * arch/sh/kernel/cpu/sh3/ex.S
3 *
4 * The SH-3 exception vector table.
5
6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 */
14#include <linux/linkage.h>
15#include <linux/config.h>
16
17 .align 2
18 .data
19
20ENTRY(exception_handling_table)
21 .long exception_error /* 000 */
22 .long exception_error
23#if defined(CONFIG_MMU)
24 .long tlb_miss_load /* 040 */
25 .long tlb_miss_store
26 .long initial_page_write
27 .long tlb_protection_violation_load
28 .long tlb_protection_violation_store
29 .long address_error_load
30 .long address_error_store /* 100 */
31#else
32 .long exception_error ! tlb miss load /* 040 */
33 .long exception_error ! tlb miss store
34 .long exception_error ! initial page write
35 .long exception_error ! tlb prot violation load
36 .long exception_error ! tlb prot violation store
37 .long exception_error ! address error load
38 .long exception_error ! address error store /* 100 */
39#endif
40 .long exception_error ! fpu_exception /* 120 */
41 .long exception_error /* 140 */
42 .long system_call ! Unconditional Trap /* 160 */
43 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
44 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
45ENTRY(nmi_slot)
46#if defined (CONFIG_KGDB_NMI)
47 .long debug_enter /* 1C0 */ ! Allow trap to debugger
48#else
49 .long exception_none /* 1C0 */ ! Not implemented yet
50#endif
51ENTRY(user_break_point_trap)
52 .long break_point_trap /* 1E0 */
53ENTRY(interrupt_table)
54 ! external hardware
55 .long do_IRQ ! 0000 /* 200 */
56 .long do_IRQ ! 0001
57 .long do_IRQ ! 0010
58 .long do_IRQ ! 0011
59 .long do_IRQ ! 0100
60 .long do_IRQ ! 0101
61 .long do_IRQ ! 0110
62 .long do_IRQ ! 0111
63 .long do_IRQ ! 1000 /* 300 */
64 .long do_IRQ ! 1001
65 .long do_IRQ ! 1010
66 .long do_IRQ ! 1011
67 .long do_IRQ ! 1100
68 .long do_IRQ ! 1101
69 .long do_IRQ ! 1110
70 .long exception_error
71 ! Internal hardware
72 .long do_IRQ ! TMU0 tuni0 /* 400 */
73 .long do_IRQ ! TMU1 tuni1
74 .long do_IRQ ! TMU2 tuni2
75 .long do_IRQ ! ticpi2
76 .long do_IRQ ! RTC ati
77 .long do_IRQ ! pri
78 .long do_IRQ ! cui
79 .long do_IRQ ! SCI eri
80 .long do_IRQ ! rxi /* 500 */
81 .long do_IRQ ! txi
82 .long do_IRQ ! tei
83 .long do_IRQ ! WDT iti /* 560 */
84 .long do_IRQ ! REF rcmi
85 .long do_IRQ ! rovi
86 .long do_IRQ
87 .long do_IRQ /* 5E0 */
88#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
89 defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
90 .long do_IRQ ! 32 IRQ irq0 /* 600 */
91 .long do_IRQ ! 33 irq1
92 .long do_IRQ ! 34 irq2
93 .long do_IRQ ! 35 irq3
94 .long do_IRQ ! 36 irq4
95 .long do_IRQ ! 37 irq5
96 .long do_IRQ ! 38
97 .long do_IRQ ! 39
98 .long do_IRQ ! 40 PINT pint0-7 /* 700 */
99 .long do_IRQ ! 41 pint8-15
100 .long do_IRQ ! 42
101 .long do_IRQ ! 43
102 .long do_IRQ ! 44
103 .long do_IRQ ! 45
104 .long do_IRQ ! 46
105 .long do_IRQ ! 47
106 .long do_IRQ ! 48 DMAC dei0 /* 800 */
107 .long do_IRQ ! 49 dei1
108 .long do_IRQ ! 50 dei2
109 .long do_IRQ ! 51 dei3
110 .long do_IRQ ! 52 IrDA eri1
111 .long do_IRQ ! 53 rxi1
112 .long do_IRQ ! 54 bri1
113 .long do_IRQ ! 55 txi1
114 .long do_IRQ ! 56 SCIF eri2
115 .long do_IRQ ! 57 rxi2
116 .long do_IRQ ! 58 bri2
117 .long do_IRQ ! 59 txi2
118 .long do_IRQ ! 60 ADC adi /* 980 */
119#if defined(CONFIG_CPU_SUBTYPE_SH7705)
120 .long exception_none ! 61 /* 9A0 */
121 .long exception_none ! 62
122 .long exception_none ! 63
123 .long exception_none ! 64 /* A00 */
124 .long do_IRQ ! 65 USB usi0
125 .long do_IRQ ! 66 usi1
126 .long exception_none ! 67
127 .long exception_none ! 68
128 .long exception_none ! 69
129 .long exception_none ! 70
130 .long exception_none ! 71
131 .long exception_none ! 72 /* B00 */
132 .long exception_none ! 73
133 .long exception_none ! 74
134 .long exception_none ! 75
135 .long exception_none ! 76
136 .long exception_none ! 77
137 .long exception_none ! 78
138 .long exception_none ! 79
139 .long do_IRQ ! 80 TPU0 tpi0 /* C00 */
140 .long do_IRQ ! 81 TPU1 tpi1
141 .long exception_none ! 82
142 .long exception_none ! 83
143 .long do_IRQ ! 84 TPU2 tpi2
144 .long do_IRQ ! 85 TPU3 tpi3 /* CA0 */
145#endif
146#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7300)
147 .long do_IRQ ! 61 LCDC lcdi /* 9A0 */
148 .long do_IRQ ! 62 PCC pcc0i
149 .long do_IRQ ! 63 pcc1i /* 9E0 */
150#endif
151#if defined(CONFIG_CPU_SUBTYPE_SH7300)
152 .long do_IRQ ! 64
153 .long do_IRQ ! 65
154 .long do_IRQ ! 66
155 .long do_IRQ ! 67
156 .long do_IRQ ! 68
157 .long do_IRQ ! 69
158 .long do_IRQ ! 70
159 .long do_IRQ ! 71
160 .long do_IRQ ! 72
161 .long do_IRQ ! 73
162 .long do_IRQ ! 74
163 .long do_IRQ ! 75
164 .long do_IRQ ! 76
165 .long do_IRQ ! 77
166 .long do_IRQ ! 78
167 .long do_IRQ ! 79
168 .long do_IRQ ! 80 SCIF0(SH7300)
169 .long do_IRQ ! 81
170 .long do_IRQ ! 82
171 .long do_IRQ ! 83
172 .long do_IRQ ! 84
173 .long do_IRQ ! 85
174 .long do_IRQ ! 86
175 .long do_IRQ ! 87
176 .long do_IRQ ! 88
177 .long do_IRQ ! 89
178 .long do_IRQ ! 90
179 .long do_IRQ ! 91
180 .long do_IRQ ! 92
181 .long do_IRQ ! 93
182 .long do_IRQ ! 94
183 .long do_IRQ ! 95
184 .long do_IRQ ! 96
185 .long do_IRQ ! 97
186 .long do_IRQ ! 98
187 .long do_IRQ ! 99
188 .long do_IRQ ! 100
189 .long do_IRQ ! 101
190 .long do_IRQ ! 102
191 .long do_IRQ ! 103
192 .long do_IRQ ! 104
193 .long do_IRQ ! 105
194 .long do_IRQ ! 106
195 .long do_IRQ ! 107
196 .long do_IRQ ! 108
197#endif
198#endif
199
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
new file mode 100644
index 000000000000..5cdc88638601
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -0,0 +1,97 @@
1/*
2 * arch/sh/kernel/cpu/sh3/probe.c
3 *
4 * CPU Subtype Probing for SH-3.
5 *
6 * Copyright (C) 1999, 2000 Niibe Yutaka
7 * Copyright (C) 2002 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/init.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/io.h>
18
19int __init detect_cpu_and_cache_system(void)
20{
21 unsigned long addr0, addr1, data0, data1, data2, data3;
22
23 jump_to_P2();
24 /*
25 * Check if the entry shadows or not.
26 * When shadowed, it's 128-entry system.
27 * Otherwise, it's 256-entry system.
28 */
29 addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
30 addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
31
32 /* First, write back & invalidate */
33 data0 = ctrl_inl(addr0);
34 ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
35 data1 = ctrl_inl(addr1);
36 ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
37
38 /* Next, check if there's shadow or not */
39 data0 = ctrl_inl(addr0);
40 data0 ^= SH_CACHE_VALID;
41 ctrl_outl(data0, addr0);
42 data1 = ctrl_inl(addr1);
43 data2 = data1 ^ SH_CACHE_VALID;
44 ctrl_outl(data2, addr1);
45 data3 = ctrl_inl(addr0);
46
47 /* Lastly, invaliate them. */
48 ctrl_outl(data0&~SH_CACHE_VALID, addr0);
49 ctrl_outl(data2&~SH_CACHE_VALID, addr1);
50
51 back_to_P1();
52
53 cpu_data->dcache.ways = 4;
54 cpu_data->dcache.entry_shift = 4;
55 cpu_data->dcache.linesz = L1_CACHE_BYTES;
56 cpu_data->dcache.flags = 0;
57
58 /*
59 * 7709A/7729 has 16K cache (256-entry), while 7702 has only
60 * 2K(direct) 7702 is not supported (yet)
61 */
62 if (data0 == data1 && data2 == data3) { /* Shadow */
63 cpu_data->dcache.way_incr = (1 << 11);
64 cpu_data->dcache.entry_mask = 0x7f0;
65 cpu_data->dcache.sets = 128;
66 cpu_data->type = CPU_SH7708;
67
68 cpu_data->flags |= CPU_HAS_MMU_PAGE_ASSOC;
69 } else { /* 7709A or 7729 */
70 cpu_data->dcache.way_incr = (1 << 12);
71 cpu_data->dcache.entry_mask = 0xff0;
72 cpu_data->dcache.sets = 256;
73 cpu_data->type = CPU_SH7729;
74
75#if defined(CONFIG_CPU_SUBTYPE_SH7705)
76 cpu_data->type = CPU_SH7705;
77
78#if defined(CONFIG_SH7705_CACHE_32KB)
79 cpu_data->dcache.way_incr = (1 << 13);
80 cpu_data->dcache.entry_mask = 0x1ff0;
81 cpu_data->dcache.sets = 512;
82 ctrl_outl(CCR_CACHE_32KB, CCR3);
83#else
84 ctrl_outl(CCR_CACHE_16KB, CCR3);
85#endif
86#endif
87 }
88
89 /*
90 * SH-3 doesn't have separate caches
91 */
92 cpu_data->dcache.flags |= SH_CACHE_COMBINED;
93 cpu_data->icache = cpu_data->dcache;
94
95 return 0;
96}
97
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
new file mode 100644
index 000000000000..ead1071eac73
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Linux/SuperH SH-4 backends.
3#
4
5obj-y := ex.o probe.o
6
7obj-$(CONFIG_SH_FPU) += fpu.o
8obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += irq_intc2.o
9obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
10
diff --git a/arch/sh/kernel/cpu/sh4/ex.S b/arch/sh/kernel/cpu/sh4/ex.S
new file mode 100644
index 000000000000..8221e9d15515
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/ex.S
@@ -0,0 +1,384 @@
1/*
2 * arch/sh/kernel/cpu/sh4/ex.S
3 *
4 * The SH-4 exception vector table.
5
6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 */
14#include <linux/linkage.h>
15#include <linux/config.h>
16
17 .align 2
18 .data
19
20ENTRY(exception_handling_table)
21 .long exception_error /* 000 */
22 .long exception_error
23#if defined(CONFIG_MMU)
24 .long tlb_miss_load /* 040 */
25 .long tlb_miss_store
26 .long initial_page_write
27 .long tlb_protection_violation_load
28 .long tlb_protection_violation_store
29 .long address_error_load
30 .long address_error_store /* 100 */
31#else
32 .long exception_error ! tlb miss load /* 040 */
33 .long exception_error ! tlb miss store
34 .long exception_error ! initial page write
35 .long exception_error ! tlb prot violation load
36 .long exception_error ! tlb prot violation store
37 .long exception_error ! address error load
38 .long exception_error ! address error store /* 100 */
39#endif
40#if defined(CONFIG_SH_FPU)
41 .long do_fpu_error /* 120 */
42#else
43 .long exception_error /* 120 */
44#endif
45 .long exception_error /* 140 */
46 .long system_call ! Unconditional Trap /* 160 */
47 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
48 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
49ENTRY(nmi_slot)
50#if defined (CONFIG_KGDB_NMI)
51 .long debug_enter /* 1C0 */ ! Allow trap to debugger
52#else
53 .long exception_none /* 1C0 */ ! Not implemented yet
54#endif
55ENTRY(user_break_point_trap)
56 .long break_point_trap /* 1E0 */
57ENTRY(interrupt_table)
58 ! external hardware
59 .long do_IRQ ! 0000 /* 200 */
60 .long do_IRQ ! 0001
61 .long do_IRQ ! 0010
62 .long do_IRQ ! 0011
63 .long do_IRQ ! 0100
64 .long do_IRQ ! 0101
65 .long do_IRQ ! 0110
66 .long do_IRQ ! 0111
67 .long do_IRQ ! 1000 /* 300 */
68 .long do_IRQ ! 1001
69 .long do_IRQ ! 1010
70 .long do_IRQ ! 1011
71 .long do_IRQ ! 1100
72 .long do_IRQ ! 1101
73 .long do_IRQ ! 1110
74 .long exception_error
75 ! Internal hardware
76 .long do_IRQ ! TMU0 tuni0 /* 400 */
77 .long do_IRQ ! TMU1 tuni1
78 .long do_IRQ ! TMU2 tuni2
79 .long do_IRQ ! ticpi2
80#if defined(CONFIG_CPU_SUBTYPE_SH7760)
81 .long exception_error
82 .long exception_error
83 .long exception_error
84 .long exception_error
85 .long exception_error /* 500 */
86 .long exception_error
87 .long exception_error
88#else
89 .long do_IRQ ! RTC ati
90 .long do_IRQ ! pri
91 .long do_IRQ ! cui
92 .long do_IRQ ! SCI eri
93 .long do_IRQ ! rxi /* 500 */
94 .long do_IRQ ! txi
95 .long do_IRQ ! tei
96#endif
97 .long do_IRQ ! WDT iti /* 560 */
98 .long do_IRQ ! REF rcmi
99 .long do_IRQ ! rovi
100 .long do_IRQ
101 .long do_IRQ /* 5E0 */
102 .long do_IRQ ! 32 Hitachi UDI /* 600 */
103 .long do_IRQ ! 33 GPIO
104 .long do_IRQ ! 34 DMAC dmte0
105 .long do_IRQ ! 35 dmte1
106 .long do_IRQ ! 36 dmte2
107 .long do_IRQ ! 37 dmte3
108 .long do_IRQ ! 38 dmae
109 .long exception_error ! 39 /* 6E0 */
110#if defined(CONFIG_CPU_SUBTYPE_SH7760)
111 .long exception_error /* 700 */
112 .long exception_error
113 .long exception_error
114 .long exception_error /* 760 */
115#else
116 .long do_IRQ ! 40 SCIF eri /* 700 */
117 .long do_IRQ ! 41 rxi
118 .long do_IRQ ! 42 bri
119 .long do_IRQ ! 43 txi
120#endif
121#if CONFIG_NR_ONCHIP_DMA_CHANNELS == 8
122 .long do_IRQ ! 44 DMAC dmte4 /* 780 */
123 .long do_IRQ ! 45 dmte5
124 .long do_IRQ ! 46 dmte6
125 .long do_IRQ ! 47 dmte7 /* 7E0 */
126#else
127 .long exception_error ! 44 /* 780 */
128 .long exception_error ! 45
129 .long exception_error ! 46
130 .long exception_error ! 47
131#endif
132#if defined(CONFIG_SH_FPU)
133 .long do_fpu_state_restore ! 48 /* 800 */
134 .long do_fpu_state_restore ! 49 /* 820 */
135#else
136 .long exception_error
137 .long exception_error
138#endif
139#if defined(CONFIG_CPU_SUBTYPE_SH7751)
140 .long exception_error /* 840 */
141 .long exception_error
142 .long exception_error
143 .long exception_error
144 .long exception_error
145 .long exception_error
146 .long exception_error /* 900 */
147 .long exception_error
148 .long exception_error
149 .long exception_error
150 .long exception_error
151 .long exception_error
152 .long exception_error
153 .long exception_error
154 .long do_IRQ ! PCI serr /* A00 */
155 .long do_IRQ ! dma3
156 .long do_IRQ ! dma2
157 .long do_IRQ ! dma1
158 .long do_IRQ ! dma0
159 .long do_IRQ ! pwon
160 .long do_IRQ ! pwdwn
161 .long do_IRQ ! err
162 .long do_IRQ ! TMU3 tuni3 /* B00 */
163 .long exception_error
164 .long exception_error
165 .long exception_error
166 .long do_IRQ ! TMU4 tuni4 /* B80 */
167#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
168 .long do_IRQ ! IRQ irq6 /* 840 */
169 .long do_IRQ ! irq7
170 .long do_IRQ ! SCIF eri0
171 .long do_IRQ ! rxi0
172 .long do_IRQ ! bri0
173 .long do_IRQ ! txi0
174 .long do_IRQ ! HCAN2 cani0 /* 900 */
175 .long do_IRQ ! cani1
176 .long do_IRQ ! SSI ssii0
177 .long do_IRQ ! ssii1
178 .long do_IRQ ! HAC haci0
179 .long do_IRQ ! haci1
180 .long do_IRQ ! IIC iici0
181 .long do_IRQ ! iici1
182 .long do_IRQ ! USB usbi /* A00 */
183 .long do_IRQ ! LCDC vint
184 .long exception_error
185 .long exception_error
186 .long do_IRQ ! DMABRG dmabrgi0
187 .long do_IRQ ! dmabrgi1
188 .long do_IRQ ! dmabrgi2
189 .long exception_error
190 .long do_IRQ ! SCIF eri1 /* B00 */
191 .long do_IRQ ! rxi1
192 .long do_IRQ ! bri1
193 .long do_IRQ ! txi1
194 .long do_IRQ ! eri2
195 .long do_IRQ ! rxi2
196 .long do_IRQ ! bri2
197 .long do_IRQ ! txi2
198 .long do_IRQ ! SIM simeri /* C00 */
199 .long do_IRQ ! simrxi
200 .long do_IRQ ! simtxi
201 .long do_IRQ ! simtei
202 .long do_IRQ ! HSPI spii
203 .long exception_error
204 .long exception_error
205 .long exception_error
206 .long do_IRQ ! MMCIF mmci0 /* D00 */
207 .long do_IRQ ! mmci1
208 .long do_IRQ ! mmci2
209 .long do_IRQ ! mmci3
210 .long exception_error
211 .long exception_error
212 .long exception_error
213 .long exception_error
214 .long exception_error /* E00 */
215 .long exception_error
216 .long exception_error
217 .long exception_error
218 .long do_IRQ ! MFI mfii
219 .long exception_error
220 .long exception_error
221 .long exception_error
222 .long exception_error /* F00 */
223 .long exception_error
224 .long exception_error
225 .long exception_error
226 .long do_IRQ ! ADC adi
227 .long do_IRQ ! CMT cmti /* FA0 */
228#elif defined(CONFIG_CPU_SUBTYPE_SH73180)
229 .long do_IRQ ! 50 0x840
230 .long do_IRQ ! 51 0x860
231 .long do_IRQ ! 52 0x880
232 .long do_IRQ ! 53 0x8a0
233 .long do_IRQ ! 54 0x8c0
234 .long do_IRQ ! 55 0x8e0
235 .long do_IRQ ! 56 0x900
236 .long do_IRQ ! 57 0x920
237 .long do_IRQ ! 58 0x940
238 .long do_IRQ ! 59 0x960
239 .long do_IRQ ! 60 0x980
240 .long do_IRQ ! 61 0x9a0
241 .long do_IRQ ! 62 0x9c0
242 .long do_IRQ ! 63 0x9e0
243 .long do_IRQ ! 64 0xa00
244 .long do_IRQ ! 65 0xa20
245 .long do_IRQ ! 66 0xa40
246 .long do_IRQ ! 67 0xa60
247 .long do_IRQ ! 68 0xa80
248 .long do_IRQ ! 69 0xaa0
249 .long do_IRQ ! 70 0xac0
250 .long do_IRQ ! 71 0xae0
251 .long do_IRQ ! 72 0xb00
252 .long do_IRQ ! 73 0xb20
253 .long do_IRQ ! 74 0xb40
254 .long do_IRQ ! 75 0xb60
255 .long do_IRQ ! 76 0xb80
256 .long do_IRQ ! 77 0xba0
257 .long do_IRQ ! 78 0xbc0
258 .long do_IRQ ! 79 0xbe0
259 .long do_IRQ ! 80 0xc00
260 .long do_IRQ ! 81 0xc20
261 .long do_IRQ ! 82 0xc40
262 .long do_IRQ ! 83 0xc60
263 .long do_IRQ ! 84 0xc80
264 .long do_IRQ ! 85 0xca0
265 .long do_IRQ ! 86 0xcc0
266 .long do_IRQ ! 87 0xce0
267 .long do_IRQ ! 88 0xd00
268 .long do_IRQ ! 89 0xd20
269 .long do_IRQ ! 90 0xd40
270 .long do_IRQ ! 91 0xd60
271 .long do_IRQ ! 92 0xd80
272 .long do_IRQ ! 93 0xda0
273 .long do_IRQ ! 94 0xdc0
274 .long do_IRQ ! 95 0xde0
275 .long do_IRQ ! 96 0xe00
276 .long do_IRQ ! 97 0xe20
277 .long do_IRQ ! 98 0xe40
278 .long do_IRQ ! 99 0xe60
279 .long do_IRQ ! 100 0xe80
280 .long do_IRQ ! 101 0xea0
281 .long do_IRQ ! 102 0xec0
282 .long do_IRQ ! 103 0xee0
283 .long do_IRQ ! 104 0xf00
284 .long do_IRQ ! 105 0xf20
285 .long do_IRQ ! 106 0xf40
286 .long do_IRQ ! 107 0xf60
287 .long do_IRQ ! 108 0xf80
288#elif defined(CONFIG_CPU_SUBTYPE_ST40STB1)
289 .long exception_error ! 50 0x840
290 .long exception_error ! 51 0x860
291 .long exception_error ! 52 0x880
292 .long exception_error ! 53 0x8a0
293 .long exception_error ! 54 0x8c0
294 .long exception_error ! 55 0x8e0
295 .long exception_error ! 56 0x900
296 .long exception_error ! 57 0x920
297 .long exception_error ! 58 0x940
298 .long exception_error ! 59 0x960
299 .long exception_error ! 60 0x980
300 .long exception_error ! 61 0x9a0
301 .long exception_error ! 62 0x9c0
302 .long exception_error ! 63 0x9e0
303 .long do_IRQ ! 64 0xa00 PCI serr
304 .long do_IRQ ! 65 0xa20 err
305 .long do_IRQ ! 66 0xa40 ad
306 .long do_IRQ ! 67 0xa60 pwr_dwn
307 .long exception_error ! 68 0xa80
308 .long exception_error ! 69 0xaa0
309 .long exception_error ! 70 0xac0
310 .long exception_error ! 71 0xae0
311 .long do_IRQ ! 72 0xb00 DMA INT0
312 .long do_IRQ ! 73 0xb20 INT1
313 .long do_IRQ ! 74 0xb40 INT2
314 .long do_IRQ ! 75 0xb60 INT3
315 .long do_IRQ ! 76 0xb80 INT4
316 .long exception_error ! 77 0xba0
317 .long do_IRQ ! 78 0xbc0 DMA ERR
318 .long exception_error ! 79 0xbe0
319 .long do_IRQ ! 80 0xc00 PIO0
320 .long do_IRQ ! 81 0xc20 PIO1
321 .long do_IRQ ! 82 0xc40 PIO2
322 .long exception_error ! 83 0xc60
323 .long exception_error ! 84 0xc80
324 .long exception_error ! 85 0xca0
325 .long exception_error ! 86 0xcc0
326 .long exception_error ! 87 0xce0
327 .long exception_error ! 88 0xd00
328 .long exception_error ! 89 0xd20
329 .long exception_error ! 90 0xd40
330 .long exception_error ! 91 0xd60
331 .long exception_error ! 92 0xd80
332 .long exception_error ! 93 0xda0
333 .long exception_error ! 94 0xdc0
334 .long exception_error ! 95 0xde0
335 .long exception_error ! 96 0xe00
336 .long exception_error ! 97 0xe20
337 .long exception_error ! 98 0xe40
338 .long exception_error ! 99 0xe60
339 .long exception_error ! 100 0xe80
340 .long exception_error ! 101 0xea0
341 .long exception_error ! 102 0xec0
342 .long exception_error ! 103 0xee0
343 .long exception_error ! 104 0xf00
344 .long exception_error ! 105 0xf20
345 .long exception_error ! 106 0xf40
346 .long exception_error ! 107 0xf60
347 .long exception_error ! 108 0xf80
348 .long exception_error ! 109 0xfa0
349 .long exception_error ! 110 0xfc0
350 .long exception_error ! 111 0xfe0
351 .long do_IRQ ! 112 0x1000 Mailbox
352 .long exception_error ! 113 0x1020
353 .long exception_error ! 114 0x1040
354 .long exception_error ! 115 0x1060
355 .long exception_error ! 116 0x1080
356 .long exception_error ! 117 0x10a0
357 .long exception_error ! 118 0x10c0
358 .long exception_error ! 119 0x10e0
359 .long exception_error ! 120 0x1100
360 .long exception_error ! 121 0x1120
361 .long exception_error ! 122 0x1140
362 .long exception_error ! 123 0x1160
363 .long exception_error ! 124 0x1180
364 .long exception_error ! 125 0x11a0
365 .long exception_error ! 126 0x11c0
366 .long exception_error ! 127 0x11e0
367 .long exception_error ! 128 0x1200
368 .long exception_error ! 129 0x1220
369 .long exception_error ! 130 0x1240
370 .long exception_error ! 131 0x1260
371 .long exception_error ! 132 0x1280
372 .long exception_error ! 133 0x12a0
373 .long exception_error ! 134 0x12c0
374 .long exception_error ! 135 0x12e0
375 .long exception_error ! 136 0x1300
376 .long exception_error ! 137 0x1320
377 .long exception_error ! 138 0x1340
378 .long exception_error ! 139 0x1360
379 .long do_IRQ ! 140 0x1380 EMPI INV_ADDR
380 .long exception_error ! 141 0x13a0
381 .long exception_error ! 142 0x13c0
382 .long exception_error ! 143 0x13e0
383#endif
384
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
new file mode 100644
index 000000000000..f486c07e10e2
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -0,0 +1,335 @@
1/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
2 *
3 * linux/arch/sh/kernel/fpu.c
4 *
5 * Save/restore floating point context for signal handlers.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
12 *
13 * FIXME! These routines can be optimized in big endian case.
14 */
15
16#include <linux/sched.h>
17#include <linux/signal.h>
18#include <asm/processor.h>
19#include <asm/io.h>
20
21/* The PR (precision) bit in the FP Status Register must be clear when
22 * an frchg instruction is executed, otherwise the instruction is undefined.
23 * Executing frchg with PR set causes a trap on some SH4 implementations.
24 */
25
26#define FPSCR_RCHG 0x00000000
27
28
29/*
30 * Save FPU registers onto task structure.
31 * Assume called with FPU enabled (SR.FD=0).
32 */
33void
34save_fpu(struct task_struct *tsk, struct pt_regs *regs)
35{
36 unsigned long dummy;
37
38 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
39 enable_fpu();
40 asm volatile("sts.l fpul, @-%0\n\t"
41 "sts.l fpscr, @-%0\n\t"
42 "lds %2, fpscr\n\t"
43 "frchg\n\t"
44 "fmov.s fr15, @-%0\n\t"
45 "fmov.s fr14, @-%0\n\t"
46 "fmov.s fr13, @-%0\n\t"
47 "fmov.s fr12, @-%0\n\t"
48 "fmov.s fr11, @-%0\n\t"
49 "fmov.s fr10, @-%0\n\t"
50 "fmov.s fr9, @-%0\n\t"
51 "fmov.s fr8, @-%0\n\t"
52 "fmov.s fr7, @-%0\n\t"
53 "fmov.s fr6, @-%0\n\t"
54 "fmov.s fr5, @-%0\n\t"
55 "fmov.s fr4, @-%0\n\t"
56 "fmov.s fr3, @-%0\n\t"
57 "fmov.s fr2, @-%0\n\t"
58 "fmov.s fr1, @-%0\n\t"
59 "fmov.s fr0, @-%0\n\t"
60 "frchg\n\t"
61 "fmov.s fr15, @-%0\n\t"
62 "fmov.s fr14, @-%0\n\t"
63 "fmov.s fr13, @-%0\n\t"
64 "fmov.s fr12, @-%0\n\t"
65 "fmov.s fr11, @-%0\n\t"
66 "fmov.s fr10, @-%0\n\t"
67 "fmov.s fr9, @-%0\n\t"
68 "fmov.s fr8, @-%0\n\t"
69 "fmov.s fr7, @-%0\n\t"
70 "fmov.s fr6, @-%0\n\t"
71 "fmov.s fr5, @-%0\n\t"
72 "fmov.s fr4, @-%0\n\t"
73 "fmov.s fr3, @-%0\n\t"
74 "fmov.s fr2, @-%0\n\t"
75 "fmov.s fr1, @-%0\n\t"
76 "fmov.s fr0, @-%0\n\t"
77 "lds %3, fpscr\n\t"
78 : "=r" (dummy)
79 : "0" ((char *)(&tsk->thread.fpu.hard.status)),
80 "r" (FPSCR_RCHG),
81 "r" (FPSCR_INIT)
82 : "memory");
83
84 disable_fpu();
85 release_fpu(regs);
86}
87
88static void
89restore_fpu(struct task_struct *tsk)
90{
91 unsigned long dummy;
92
93 enable_fpu();
94 asm volatile("lds %2, fpscr\n\t"
95 "fmov.s @%0+, fr0\n\t"
96 "fmov.s @%0+, fr1\n\t"
97 "fmov.s @%0+, fr2\n\t"
98 "fmov.s @%0+, fr3\n\t"
99 "fmov.s @%0+, fr4\n\t"
100 "fmov.s @%0+, fr5\n\t"
101 "fmov.s @%0+, fr6\n\t"
102 "fmov.s @%0+, fr7\n\t"
103 "fmov.s @%0+, fr8\n\t"
104 "fmov.s @%0+, fr9\n\t"
105 "fmov.s @%0+, fr10\n\t"
106 "fmov.s @%0+, fr11\n\t"
107 "fmov.s @%0+, fr12\n\t"
108 "fmov.s @%0+, fr13\n\t"
109 "fmov.s @%0+, fr14\n\t"
110 "fmov.s @%0+, fr15\n\t"
111 "frchg\n\t"
112 "fmov.s @%0+, fr0\n\t"
113 "fmov.s @%0+, fr1\n\t"
114 "fmov.s @%0+, fr2\n\t"
115 "fmov.s @%0+, fr3\n\t"
116 "fmov.s @%0+, fr4\n\t"
117 "fmov.s @%0+, fr5\n\t"
118 "fmov.s @%0+, fr6\n\t"
119 "fmov.s @%0+, fr7\n\t"
120 "fmov.s @%0+, fr8\n\t"
121 "fmov.s @%0+, fr9\n\t"
122 "fmov.s @%0+, fr10\n\t"
123 "fmov.s @%0+, fr11\n\t"
124 "fmov.s @%0+, fr12\n\t"
125 "fmov.s @%0+, fr13\n\t"
126 "fmov.s @%0+, fr14\n\t"
127 "fmov.s @%0+, fr15\n\t"
128 "frchg\n\t"
129 "lds.l @%0+, fpscr\n\t"
130 "lds.l @%0+, fpul\n\t"
131 : "=r" (dummy)
132 : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
133 : "memory");
134 disable_fpu();
135}
136
137/*
138 * Load the FPU with signalling NANS. This bit pattern we're using
139 * has the property that no matter wether considered as single or as
140 * double precission represents signaling NANS.
141 */
142
143static void
144fpu_init(void)
145{
146 enable_fpu();
147 asm volatile("lds %0, fpul\n\t"
148 "lds %1, fpscr\n\t"
149 "fsts fpul, fr0\n\t"
150 "fsts fpul, fr1\n\t"
151 "fsts fpul, fr2\n\t"
152 "fsts fpul, fr3\n\t"
153 "fsts fpul, fr4\n\t"
154 "fsts fpul, fr5\n\t"
155 "fsts fpul, fr6\n\t"
156 "fsts fpul, fr7\n\t"
157 "fsts fpul, fr8\n\t"
158 "fsts fpul, fr9\n\t"
159 "fsts fpul, fr10\n\t"
160 "fsts fpul, fr11\n\t"
161 "fsts fpul, fr12\n\t"
162 "fsts fpul, fr13\n\t"
163 "fsts fpul, fr14\n\t"
164 "fsts fpul, fr15\n\t"
165 "frchg\n\t"
166 "fsts fpul, fr0\n\t"
167 "fsts fpul, fr1\n\t"
168 "fsts fpul, fr2\n\t"
169 "fsts fpul, fr3\n\t"
170 "fsts fpul, fr4\n\t"
171 "fsts fpul, fr5\n\t"
172 "fsts fpul, fr6\n\t"
173 "fsts fpul, fr7\n\t"
174 "fsts fpul, fr8\n\t"
175 "fsts fpul, fr9\n\t"
176 "fsts fpul, fr10\n\t"
177 "fsts fpul, fr11\n\t"
178 "fsts fpul, fr12\n\t"
179 "fsts fpul, fr13\n\t"
180 "fsts fpul, fr14\n\t"
181 "fsts fpul, fr15\n\t"
182 "frchg\n\t"
183 "lds %2, fpscr\n\t"
184 : /* no output */
185 : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
186 disable_fpu();
187}
188
189/**
190 * denormal_to_double - Given denormalized float number,
191 * store double float
192 *
193 * @fpu: Pointer to sh_fpu_hard structure
194 * @n: Index to FP register
195 */
196static void
197denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
198{
199 unsigned long du, dl;
200 unsigned long x = fpu->fpul;
201 int exp = 1023 - 126;
202
203 if (x != 0 && (x & 0x7f800000) == 0) {
204 du = (x & 0x80000000);
205 while ((x & 0x00800000) == 0) {
206 x <<= 1;
207 exp--;
208 }
209 x &= 0x007fffff;
210 du |= (exp << 20) | (x >> 3);
211 dl = x << 29;
212
213 fpu->fp_regs[n] = du;
214 fpu->fp_regs[n+1] = dl;
215 }
216}
217
218/**
219 * ieee_fpe_handler - Handle denormalized number exception
220 *
221 * @regs: Pointer to register structure
222 *
223 * Returns 1 when it's handled (should not cause exception).
224 */
225static int
226ieee_fpe_handler (struct pt_regs *regs)
227{
228 unsigned short insn = *(unsigned short *) regs->pc;
229 unsigned short finsn;
230 unsigned long nextpc;
231 int nib[4] = {
232 (insn >> 12) & 0xf,
233 (insn >> 8) & 0xf,
234 (insn >> 4) & 0xf,
235 insn & 0xf};
236
237 if (nib[0] == 0xb ||
238 (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
239 regs->pr = regs->pc + 4;
240
241 if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
242 nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
243 finsn = *(unsigned short *) (regs->pc + 2);
244 } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
245 if (regs->sr & 1)
246 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
247 else
248 nextpc = regs->pc + 4;
249 finsn = *(unsigned short *) (regs->pc + 2);
250 } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
251 if (regs->sr & 1)
252 nextpc = regs->pc + 4;
253 else
254 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
255 finsn = *(unsigned short *) (regs->pc + 2);
256 } else if (nib[0] == 0x4 && nib[3] == 0xb &&
257 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
258 nextpc = regs->regs[nib[1]];
259 finsn = *(unsigned short *) (regs->pc + 2);
260 } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
261 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
262 nextpc = regs->pc + 4 + regs->regs[nib[1]];
263 finsn = *(unsigned short *) (regs->pc + 2);
264 } else if (insn == 0x000b) { /* rts */
265 nextpc = regs->pr;
266 finsn = *(unsigned short *) (regs->pc + 2);
267 } else {
268 nextpc = regs->pc + 2;
269 finsn = insn;
270 }
271
272 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
273 struct task_struct *tsk = current;
274
275 save_fpu(tsk, regs);
276 if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
277 /* FPU error */
278 denormal_to_double (&tsk->thread.fpu.hard,
279 (finsn >> 8) & 0xf);
280 tsk->thread.fpu.hard.fpscr &=
281 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
282 grab_fpu(regs);
283 restore_fpu(tsk);
284 set_tsk_thread_flag(tsk, TIF_USEDFPU);
285 } else {
286 tsk->thread.trap_no = 11;
287 tsk->thread.error_code = 0;
288 force_sig(SIGFPE, tsk);
289 }
290
291 regs->pc = nextpc;
292 return 1;
293 }
294
295 return 0;
296}
297
298asmlinkage void
299do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
300 struct pt_regs regs)
301{
302 struct task_struct *tsk = current;
303
304 if (ieee_fpe_handler (&regs))
305 return;
306
307 regs.pc += 2;
308 save_fpu(tsk, &regs);
309 tsk->thread.trap_no = 11;
310 tsk->thread.error_code = 0;
311 force_sig(SIGFPE, tsk);
312}
313
314asmlinkage void
315do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
316 unsigned long r7, struct pt_regs regs)
317{
318 struct task_struct *tsk = current;
319
320 grab_fpu(&regs);
321 if (!user_mode(&regs)) {
322 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
323 return;
324 }
325
326 if (used_math()) {
327 /* Using the FPU again. */
328 restore_fpu(tsk);
329 } else {
330 /* First time FPU user. */
331 fpu_init();
332 set_used_math();
333 }
334 set_tsk_thread_flag(tsk, TIF_USEDFPU);
335}
diff --git a/arch/sh/kernel/cpu/sh4/irq_intc2.c b/arch/sh/kernel/cpu/sh4/irq_intc2.c
new file mode 100644
index 000000000000..099ebbf89745
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/irq_intc2.c
@@ -0,0 +1,222 @@
1/*
2 * linux/arch/sh/kernel/irq_intc2.c
3 *
4 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Interrupt handling for INTC2-based IRQ.
10 *
11 * These are the "new Hitachi style" interrupts, as present on the
12 * Hitachi 7751 and the STM ST40 STB1.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/irq.h>
18
19#include <asm/system.h>
20#include <asm/io.h>
21#include <asm/machvec.h>
22
23
24struct intc2_data {
25 unsigned char msk_offset;
26 unsigned char msk_shift;
27#ifdef CONFIG_CPU_SUBTYPE_ST40
28 int (*clear_irq) (int);
29#endif
30};
31
32
33static struct intc2_data intc2_data[NR_INTC2_IRQS];
34
35static void enable_intc2_irq(unsigned int irq);
36static void disable_intc2_irq(unsigned int irq);
37
38/* shutdown is same as "disable" */
39#define shutdown_intc2_irq disable_intc2_irq
40
41static void mask_and_ack_intc2(unsigned int);
42static void end_intc2_irq(unsigned int irq);
43
44static unsigned int startup_intc2_irq(unsigned int irq)
45{
46 enable_intc2_irq(irq);
47 return 0; /* never anything pending */
48}
49
50static struct hw_interrupt_type intc2_irq_type = {
51 "INTC2-IRQ",
52 startup_intc2_irq,
53 shutdown_intc2_irq,
54 enable_intc2_irq,
55 disable_intc2_irq,
56 mask_and_ack_intc2,
57 end_intc2_irq
58};
59
60static void disable_intc2_irq(unsigned int irq)
61{
62 int irq_offset = irq - INTC2_FIRST_IRQ;
63 int msk_shift, msk_offset;
64
65 // Sanity check
66 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
67 return;
68
69 msk_shift = intc2_data[irq_offset].msk_shift;
70 msk_offset = intc2_data[irq_offset].msk_offset;
71
72 ctrl_outl(1<<msk_shift,
73 INTC2_BASE+INTC2_INTMSK_OFFSET+msk_offset);
74}
75
76static void enable_intc2_irq(unsigned int irq)
77{
78 int irq_offset = irq - INTC2_FIRST_IRQ;
79 int msk_shift, msk_offset;
80
81 /* Sanity check */
82 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
83 return;
84
85 msk_shift = intc2_data[irq_offset].msk_shift;
86 msk_offset = intc2_data[irq_offset].msk_offset;
87
88 ctrl_outl(1<<msk_shift,
89 INTC2_BASE+INTC2_INTMSKCLR_OFFSET+msk_offset);
90}
91
92static void mask_and_ack_intc2(unsigned int irq)
93{
94 disable_intc2_irq(irq);
95}
96
97static void end_intc2_irq(unsigned int irq)
98{
99 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
100 enable_intc2_irq(irq);
101
102#ifdef CONFIG_CPU_SUBTYPE_ST40
103 if (intc2_data[irq - INTC2_FIRST_IRQ].clear_irq)
104 intc2_data[irq - INTC2_FIRST_IRQ].clear_irq (irq);
105#endif
106}
107
108/*
109 * Setup an INTC2 style interrupt.
110 * NOTE: Unlike IPR interrupts, parameters are not shifted by this code,
111 * allowing the use of the numbers straight out of the datasheet.
112 * For example:
113 * PIO1 which is INTPRI00[19,16] and INTMSK00[13]
114 * would be: ^ ^ ^ ^
115 * | | | |
116 * make_intc2_irq(84, 0, 16, 0, 13);
117 */
118void make_intc2_irq(unsigned int irq,
119 unsigned int ipr_offset, unsigned int ipr_shift,
120 unsigned int msk_offset, unsigned int msk_shift,
121 unsigned int priority)
122{
123 int irq_offset = irq - INTC2_FIRST_IRQ;
124 unsigned int flags;
125 unsigned long ipr;
126
127 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
128 return;
129
130 disable_irq_nosync(irq);
131
132 /* Fill the data we need */
133 intc2_data[irq_offset].msk_offset = msk_offset;
134 intc2_data[irq_offset].msk_shift = msk_shift;
135#ifdef CONFIG_CPU_SUBTYPE_ST40
136 intc2_data[irq_offset].clear_irq = NULL;
137#endif
138
139 /* Set the priority level */
140 local_irq_save(flags);
141
142 ipr=ctrl_inl(INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
143 ipr&=~(0xf<<ipr_shift);
144 ipr|=(priority)<<ipr_shift;
145 ctrl_outl(ipr, INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
146
147 local_irq_restore(flags);
148
149 irq_desc[irq].handler=&intc2_irq_type;
150
151 disable_intc2_irq(irq);
152}
153
154#ifdef CONFIG_CPU_SUBTYPE_ST40
155
156struct intc2_init {
157 unsigned short irq;
158 unsigned char ipr_offset, ipr_shift;
159 unsigned char msk_offset, msk_shift;
160};
161
162static struct intc2_init intc2_init_data[] __initdata = {
163 {64, 0, 0, 0, 0}, /* PCI serr */
164 {65, 0, 4, 0, 1}, /* PCI err */
165 {66, 0, 4, 0, 2}, /* PCI ad */
166 {67, 0, 4, 0, 3}, /* PCI pwd down */
167 {72, 0, 8, 0, 5}, /* DMAC INT0 */
168 {73, 0, 8, 0, 6}, /* DMAC INT1 */
169 {74, 0, 8, 0, 7}, /* DMAC INT2 */
170 {75, 0, 8, 0, 8}, /* DMAC INT3 */
171 {76, 0, 8, 0, 9}, /* DMAC INT4 */
172 {78, 0, 8, 0, 11}, /* DMAC ERR */
173 {80, 0, 12, 0, 12}, /* PIO0 */
174 {84, 0, 16, 0, 13}, /* PIO1 */
175 {88, 0, 20, 0, 14}, /* PIO2 */
176 {112, 4, 0, 4, 0}, /* Mailbox */
177#ifdef CONFIG_CPU_SUBTYPE_ST40GX1
178 {116, 4, 4, 4, 4}, /* SSC0 */
179 {120, 4, 8, 4, 8}, /* IR Blaster */
180 {124, 4, 12, 4, 12}, /* USB host */
181 {128, 4, 16, 4, 16}, /* Video processor BLITTER */
182 {132, 4, 20, 4, 20}, /* UART0 */
183 {134, 4, 20, 4, 22}, /* UART2 */
184 {136, 4, 24, 4, 24}, /* IO_PIO0 */
185 {140, 4, 28, 4, 28}, /* EMPI */
186 {144, 8, 0, 8, 0}, /* MAFE */
187 {148, 8, 4, 8, 4}, /* PWM */
188 {152, 8, 8, 8, 8}, /* SSC1 */
189 {156, 8, 12, 8, 12}, /* IO_PIO1 */
190 {160, 8, 16, 8, 16}, /* USB target */
191 {164, 8, 20, 8, 20}, /* UART1 */
192 {168, 8, 24, 8, 24}, /* Teletext */
193 {172, 8, 28, 8, 28}, /* VideoSync VTG */
194 {173, 8, 28, 8, 29}, /* VideoSync DVP0 */
195 {174, 8, 28, 8, 30}, /* VideoSync DVP1 */
196#endif
197};
198
199void __init init_IRQ_intc2(void)
200{
201 struct intc2_init *p;
202
203 printk(KERN_ALERT "init_IRQ_intc2\n");
204
205 for (p = intc2_init_data;
206 p<intc2_init_data+ARRAY_SIZE(intc2_init_data);
207 p++) {
208 make_intc2_irq(p->irq, p->ipr_offset, p->ipr_shift,
209 p-> msk_offset, p->msk_shift, 13);
210 }
211}
212
213/* Adds a termination callback to the interrupt */
214void intc2_add_clear_irq(int irq, int (*fn)(int))
215{
216 if (irq < INTC2_FIRST_IRQ)
217 return;
218
219 intc2_data[irq - INTC2_FIRST_IRQ].clear_irq = fn;
220}
221
222#endif /* CONFIG_CPU_SUBTYPE_ST40 */
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
new file mode 100644
index 000000000000..42427b79697b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -0,0 +1,138 @@
1/*
2 * arch/sh/kernel/cpu/sh4/probe.c
3 *
4 * CPU Subtype Probing for SH-4.
5 *
6 * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/init.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/io.h>
18
19int __init detect_cpu_and_cache_system(void)
20{
21 unsigned long pvr, prr, cvr;
22 unsigned long size;
23
24 static unsigned long sizes[16] = {
25 [1] = (1 << 12),
26 [2] = (1 << 13),
27 [4] = (1 << 14),
28 [8] = (1 << 15),
29 [9] = (1 << 16)
30 };
31
32 pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffff;
33 prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
34 cvr = (ctrl_inl(CCN_CVR));
35
36 /*
37 * Setup some sane SH-4 defaults for the icache
38 */
39 cpu_data->icache.way_incr = (1 << 13);
40 cpu_data->icache.entry_shift = 5;
41 cpu_data->icache.entry_mask = 0x1fe0;
42 cpu_data->icache.sets = 256;
43 cpu_data->icache.ways = 1;
44 cpu_data->icache.linesz = L1_CACHE_BYTES;
45
46 /*
47 * And again for the dcache ..
48 */
49 cpu_data->dcache.way_incr = (1 << 14);
50 cpu_data->dcache.entry_shift = 5;
51 cpu_data->dcache.entry_mask = 0x3fe0;
52 cpu_data->dcache.sets = 512;
53 cpu_data->dcache.ways = 1;
54 cpu_data->dcache.linesz = L1_CACHE_BYTES;
55
56 /* Set the FPU flag, virtually all SH-4's have one */
57 cpu_data->flags |= CPU_HAS_FPU;
58
59 /*
60 * Probe the underlying processor version/revision and
61 * adjust cpu_data setup accordingly.
62 */
63 switch (pvr) {
64 case 0x205:
65 cpu_data->type = CPU_SH7750;
66 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
67 break;
68 case 0x206:
69 cpu_data->type = CPU_SH7750S;
70 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
71 break;
72 case 0x1100:
73 cpu_data->type = CPU_SH7751;
74 break;
75 case 0x2000:
76 cpu_data->type = CPU_SH73180;
77 cpu_data->icache.ways = 4;
78 cpu_data->dcache.ways = 4;
79 cpu_data->flags &= ~CPU_HAS_FPU;
80 break;
81 case 0x8000:
82 cpu_data->type = CPU_ST40RA;
83 break;
84 case 0x8100:
85 cpu_data->type = CPU_ST40GX1;
86 break;
87 case 0x700:
88 cpu_data->type = CPU_SH4_501;
89 cpu_data->icache.ways = 2;
90 cpu_data->dcache.ways = 2;
91
92 /* No FPU on the SH4-500 series.. */
93 cpu_data->flags &= ~CPU_HAS_FPU;
94 break;
95 case 0x600:
96 cpu_data->type = CPU_SH4_202;
97 cpu_data->icache.ways = 2;
98 cpu_data->dcache.ways = 2;
99 break;
100 case 0x500 ... 0x501:
101 switch (prr) {
102 case 0x10: cpu_data->type = CPU_SH7750R; break;
103 case 0x11: cpu_data->type = CPU_SH7751R; break;
104 case 0x50: cpu_data->type = CPU_SH7760; break;
105 }
106
107 cpu_data->icache.ways = 2;
108 cpu_data->dcache.ways = 2;
109
110 break;
111 default:
112 cpu_data->type = CPU_SH_NONE;
113 break;
114 }
115
116 /*
117 * On anything that's not a direct-mapped cache, look to the CVR
118 * for I/D-cache specifics.
119 */
120 if (cpu_data->icache.ways > 1) {
121 size = sizes[(cvr >> 20) & 0xf];
122 cpu_data->icache.way_incr = (size >> 1);
123 cpu_data->icache.sets = (size >> 6);
124 cpu_data->icache.entry_mask =
125 (cpu_data->icache.way_incr - (1 << 5));
126 }
127
128 if (cpu_data->dcache.ways > 1) {
129 size = sizes[(cvr >> 16) & 0xf];
130 cpu_data->dcache.way_incr = (size >> 1);
131 cpu_data->dcache.sets = (size >> 6);
132 cpu_data->dcache.entry_mask =
133 (cpu_data->dcache.way_incr - (1 << 5));
134 }
135
136 return 0;
137}
138
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
new file mode 100644
index 000000000000..8437ea7430fe
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -0,0 +1,453 @@
1/*
2 * arch/sh/kernel/cpu/sq.c
3 *
4 * General management API for SH-4 integrated Store Queues
5 *
6 * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
7 * Copyright (C) 2001, 2002 M. R. Brown
8 *
9 * Some of this code has been adopted directly from the old arch/sh/mm/sq.c
10 * hack that was part of the LinuxDC project. For all intents and purposes,
11 * this is a completely new interface that really doesn't have much in common
12 * with the old zone-based approach at all. In fact, it's only listed here for
13 * general completeness.
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/config.h>
23#include <linux/slab.h>
24#include <linux/list.h>
25#include <linux/proc_fs.h>
26#include <linux/miscdevice.h>
27#include <linux/vmalloc.h>
28
29#include <asm/io.h>
30#include <asm/page.h>
31#include <asm/mmu_context.h>
32#include <asm/cpu/sq.h>
33
34static LIST_HEAD(sq_mapping_list);
35static DEFINE_SPINLOCK(sq_mapping_lock);
36
37/**
38 * sq_flush - Flush (prefetch) the store queue cache
39 * @addr: the store queue address to flush
40 *
41 * Executes a prefetch instruction on the specified store queue cache,
42 * so that the cached data is written to physical memory.
43 */
44inline void sq_flush(void *addr)
45{
46 __asm__ __volatile__ ("pref @%0" : : "r" (addr) : "memory");
47}
48
49/**
50 * sq_flush_range - Flush (prefetch) a specific SQ range
51 * @start: the store queue address to start flushing from
52 * @len: the length to flush
53 *
54 * Flushes the store queue cache from @start to @start + @len in a
55 * linear fashion.
56 */
57void sq_flush_range(unsigned long start, unsigned int len)
58{
59 volatile unsigned long *sq = (unsigned long *)start;
60 unsigned long dummy;
61
62 /* Flush the queues */
63 for (len >>= 5; len--; sq += 8)
64 sq_flush((void *)sq);
65
66 /* Wait for completion */
67 dummy = ctrl_inl(P4SEG_STORE_QUE);
68
69 ctrl_outl(0, P4SEG_STORE_QUE + 0);
70 ctrl_outl(0, P4SEG_STORE_QUE + 8);
71}
72
73static struct sq_mapping *__sq_alloc_mapping(unsigned long virt, unsigned long phys, unsigned long size, const char *name)
74{
75 struct sq_mapping *map;
76
77 if (virt + size > SQ_ADDRMAX)
78 return ERR_PTR(-ENOSPC);
79
80 map = kmalloc(sizeof(struct sq_mapping), GFP_KERNEL);
81 if (!map)
82 return ERR_PTR(-ENOMEM);
83
84 INIT_LIST_HEAD(&map->list);
85
86 map->sq_addr = virt;
87 map->addr = phys;
88 map->size = size + 1;
89 map->name = name;
90
91 list_add(&map->list, &sq_mapping_list);
92
93 return map;
94}
95
96static unsigned long __sq_get_next_addr(void)
97{
98 if (!list_empty(&sq_mapping_list)) {
99 struct list_head *pos, *tmp;
100
101 /*
102 * Read one off the list head, as it will have the highest
103 * mapped allocation. Set the next one up right above it.
104 *
105 * This is somewhat sub-optimal, as we don't look at
106 * gaps between allocations or anything lower then the
107 * highest-level allocation.
108 *
109 * However, in the interest of performance and the general
110 * lack of desire to do constant list rebalancing, we don't
111 * worry about it.
112 */
113 list_for_each_safe(pos, tmp, &sq_mapping_list) {
114 struct sq_mapping *entry;
115
116 entry = list_entry(pos, typeof(*entry), list);
117
118 return entry->sq_addr + entry->size;
119 }
120 }
121
122 return P4SEG_STORE_QUE;
123}
124
125/**
126 * __sq_remap - Perform a translation from the SQ to a phys addr
127 * @map: sq mapping containing phys and store queue addresses.
128 *
129 * Maps the store queue address specified in the mapping to the physical
130 * address specified in the mapping.
131 */
132static struct sq_mapping *__sq_remap(struct sq_mapping *map)
133{
134 unsigned long flags, pteh, ptel;
135 struct vm_struct *vma;
136 pgprot_t pgprot;
137
138 /*
139 * Without an MMU (or with it turned off), this is much more
140 * straightforward, as we can just load up each queue's QACR with
141 * the physical address appropriately masked.
142 */
143
144 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
145 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
146
147#ifdef CONFIG_MMU
148 /*
149 * With an MMU on the other hand, things are slightly more involved.
150 * Namely, we have to have a direct mapping between the SQ addr and
151 * the associated physical address in the UTLB by way of setting up
152 * a virt<->phys translation by hand. We do this by simply specifying
153 * the SQ addr in UTLB.VPN and the associated physical address in
154 * UTLB.PPN.
155 *
156 * Notably, even though this is a special case translation, and some
157 * of the configuration bits are meaningless, we're still required
158 * to have a valid ASID context in PTEH.
159 *
160 * We could also probably get by without explicitly setting PTEA, but
161 * we do it here just for good measure.
162 */
163 spin_lock_irqsave(&sq_mapping_lock, flags);
164
165 pteh = map->sq_addr;
166 ctrl_outl((pteh & MMU_VPN_MASK) | get_asid(), MMU_PTEH);
167
168 ptel = map->addr & PAGE_MASK;
169 ctrl_outl(((ptel >> 28) & 0xe) | (ptel & 0x1), MMU_PTEA);
170
171 pgprot = pgprot_noncached(PAGE_KERNEL);
172
173 ptel &= _PAGE_FLAGS_HARDWARE_MASK;
174 ptel |= pgprot_val(pgprot);
175 ctrl_outl(ptel, MMU_PTEL);
176
177 __asm__ __volatile__ ("ldtlb" : : : "memory");
178
179 spin_unlock_irqrestore(&sq_mapping_lock, flags);
180
181 /*
182 * Next, we need to map ourselves in the kernel page table, so that
183 * future accesses after a TLB flush will be handled when we take a
184 * page fault.
185 *
186 * Theoretically we could just do this directly and not worry about
187 * setting up the translation by hand ahead of time, but for the
188 * cases where we want a one-shot SQ mapping followed by a quick
189 * writeout before we hit the TLB flush, we do it anyways. This way
190 * we at least save ourselves the initial page fault overhead.
191 */
192 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
193 if (!vma)
194 return ERR_PTR(-ENOMEM);
195
196 vma->phys_addr = map->addr;
197
198 if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
199 map->size, pgprot_val(pgprot))) {
200 vunmap(vma->addr);
201 return NULL;
202 }
203#endif /* CONFIG_MMU */
204
205 return map;
206}
207
208/**
209 * sq_remap - Map a physical address through the Store Queues
210 * @phys: Physical address of mapping.
211 * @size: Length of mapping.
212 * @name: User invoking mapping.
213 *
214 * Remaps the physical address @phys through the next available store queue
215 * address of @size length. @name is logged at boot time as well as through
216 * the procfs interface.
217 *
218 * A pre-allocated and filled sq_mapping pointer is returned, and must be
219 * cleaned up with a call to sq_unmap() when the user is done with the
220 * mapping.
221 */
222struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *name)
223{
224 struct sq_mapping *map;
225 unsigned long virt, end;
226 unsigned int psz;
227
228 /* Don't allow wraparound or zero size */
229 end = phys + size - 1;
230 if (!size || end < phys)
231 return NULL;
232 /* Don't allow anyone to remap normal memory.. */
233 if (phys < virt_to_phys(high_memory))
234 return NULL;
235
236 phys &= PAGE_MASK;
237
238 size = PAGE_ALIGN(end + 1) - phys;
239 virt = __sq_get_next_addr();
240 psz = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
241 map = __sq_alloc_mapping(virt, phys, size, name);
242
243 printk("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
244 map->name ? map->name : "???",
245 psz, psz == 1 ? " " : "s",
246 map->sq_addr, map->addr);
247
248 return __sq_remap(map);
249}
250
251/**
252 * sq_unmap - Unmap a Store Queue allocation
253 * @map: Pre-allocated Store Queue mapping.
254 *
255 * Unmaps the store queue allocation @map that was previously created by
256 * sq_remap(). Also frees up the pte that was previously inserted into
257 * the kernel page table and discards the UTLB translation.
258 */
259void sq_unmap(struct sq_mapping *map)
260{
261 if (map->sq_addr > (unsigned long)high_memory)
262 vfree((void *)(map->sq_addr & PAGE_MASK));
263
264 list_del(&map->list);
265 kfree(map);
266}
267
268/**
269 * sq_clear - Clear a store queue range
270 * @addr: Address to start clearing from.
271 * @len: Length to clear.
272 *
273 * A quick zero-fill implementation for clearing out memory that has been
274 * remapped through the store queues.
275 */
276void sq_clear(unsigned long addr, unsigned int len)
277{
278 int i;
279
280 /* Clear out both queues linearly */
281 for (i = 0; i < 8; i++) {
282 ctrl_outl(0, addr + i + 0);
283 ctrl_outl(0, addr + i + 8);
284 }
285
286 sq_flush_range(addr, len);
287}
288
289/**
290 * sq_vma_unmap - Unmap a VMA range
291 * @area: VMA containing range.
292 * @addr: Start of range.
293 * @len: Length of range.
294 *
295 * Searches the sq_mapping_list for a mapping matching the sq addr @addr,
296 * and subsequently frees up the entry. Further cleanup is done by generic
297 * code.
298 */
299static void sq_vma_unmap(struct vm_area_struct *area,
300 unsigned long addr, size_t len)
301{
302 struct list_head *pos, *tmp;
303
304 list_for_each_safe(pos, tmp, &sq_mapping_list) {
305 struct sq_mapping *entry;
306
307 entry = list_entry(pos, typeof(*entry), list);
308
309 if (entry->sq_addr == addr) {
310 /*
311 * We could probably get away without doing the tlb flush
312 * here, as generic code should take care of most of this
313 * when unmapping the rest of the VMA range for us. Leave
314 * it in for added sanity for the time being..
315 */
316 __flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK);
317
318 list_del(&entry->list);
319 kfree(entry);
320
321 return;
322 }
323 }
324}
325
326/**
327 * sq_vma_sync - Sync a VMA range
328 * @area: VMA containing range.
329 * @start: Start of range.
330 * @len: Length of range.
331 * @flags: Additional flags.
332 *
333 * Synchronizes an sq mapped range by flushing the store queue cache for
334 * the duration of the mapping.
335 *
336 * Used internally for user mappings, which must use msync() to prefetch
337 * the store queue cache.
338 */
339static int sq_vma_sync(struct vm_area_struct *area,
340 unsigned long start, size_t len, unsigned int flags)
341{
342 sq_flush_range(start, len);
343
344 return 0;
345}
346
347static struct vm_operations_struct sq_vma_ops = {
348 .unmap = sq_vma_unmap,
349 .sync = sq_vma_sync,
350};
351
352/**
353 * sq_mmap - mmap() for /dev/cpu/sq
354 * @file: unused.
355 * @vma: VMA to remap.
356 *
357 * Remap the specified vma @vma through the store queues, and setup associated
358 * information for the new mapping. Also build up the page tables for the new
359 * area.
360 */
361static int sq_mmap(struct file *file, struct vm_area_struct *vma)
362{
363 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
364 unsigned long size = vma->vm_end - vma->vm_start;
365 struct sq_mapping *map;
366
367 /*
368 * We're not interested in any arbitrary virtual address that has
369 * been stuck in the VMA, as we already know what addresses we
370 * want. Save off the size, and reposition the VMA to begin at
371 * the next available sq address.
372 */
373 vma->vm_start = __sq_get_next_addr();
374 vma->vm_end = vma->vm_start + size;
375
376 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
377
378 vma->vm_flags |= VM_IO | VM_RESERVED;
379
380 map = __sq_alloc_mapping(vma->vm_start, offset, size, "Userspace");
381
382 if (io_remap_pfn_range(vma, map->sq_addr, map->addr >> PAGE_SHIFT,
383 size, vma->vm_page_prot))
384 return -EAGAIN;
385
386 vma->vm_ops = &sq_vma_ops;
387
388 return 0;
389}
390
391#ifdef CONFIG_PROC_FS
392static int sq_mapping_read_proc(char *buf, char **start, off_t off,
393 int len, int *eof, void *data)
394{
395 struct list_head *pos;
396 char *p = buf;
397
398 list_for_each_prev(pos, &sq_mapping_list) {
399 struct sq_mapping *entry;
400
401 entry = list_entry(pos, typeof(*entry), list);
402
403 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr,
404 entry->sq_addr + entry->size - 1, entry->addr,
405 entry->name);
406 }
407
408 return p - buf;
409}
410#endif
411
412static struct file_operations sq_fops = {
413 .owner = THIS_MODULE,
414 .mmap = sq_mmap,
415};
416
417static struct miscdevice sq_dev = {
418 .minor = STORE_QUEUE_MINOR,
419 .name = "sq",
420 .devfs_name = "cpu/sq",
421 .fops = &sq_fops,
422};
423
424static int __init sq_api_init(void)
425{
426 printk(KERN_NOTICE "sq: Registering store queue API.\n");
427
428#ifdef CONFIG_PROC_FS
429 create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0);
430#endif
431
432 return misc_register(&sq_dev);
433}
434
435static void __exit sq_api_exit(void)
436{
437 misc_deregister(&sq_dev);
438}
439
440module_init(sq_api_init);
441module_exit(sq_api_exit);
442
443MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
444MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
445MODULE_LICENSE("GPL");
446MODULE_ALIAS_MISCDEV(STORE_QUEUE_MINOR);
447
448EXPORT_SYMBOL(sq_remap);
449EXPORT_SYMBOL(sq_unmap);
450EXPORT_SYMBOL(sq_clear);
451EXPORT_SYMBOL(sq_flush);
452EXPORT_SYMBOL(sq_flush_range);
453
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S
new file mode 100644
index 000000000000..0c569b20e1c1
--- /dev/null
+++ b/arch/sh/kernel/cpu/ubc.S
@@ -0,0 +1,59 @@
1/*
2 * arch/sh/kernel/ubc.S
3 *
4 * Set of management routines for the User Break Controller (UBC)
5 *
6 * Copyright (C) 2002 Paul Mundt
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#include <linux/linkage.h>
14#include <asm/ubc.h>
15
16#define STBCR2 0xffc00010
17
18ENTRY(ubc_sleep)
19 mov #0, r0
20
21 mov.l 1f, r1 ! Zero out UBC_BBRA ..
22 mov.w r0, @r1
23
24 mov.l 2f, r1 ! .. same for BBRB ..
25 mov.w r0, @r1
26
27 mov.l 3f, r1 ! .. and again for BRCR.
28 mov.w r0, @r1
29
30 mov.w @r1, r0 ! Dummy read BRCR
31
32 mov.l 4f, r1 ! Set MSTP5 in STBCR2
33 mov.b @r1, r0
34 or #0x01, r0
35 mov.b r0, @r1
36
37 mov.b @r1, r0 ! Two dummy reads ..
38 mov.b @r1, r0
39
40 rts
41 nop
42
43ENTRY(ubc_wakeup)
44 mov.l 4f, r1 ! Clear MSTP5
45 mov.b @r1, r0
46 and #0xfe, r0
47 mov.b r0, @r1
48
49 mov.b @r1, r0 ! Two more dummy reads ..
50 mov.b @r1, r0
51
52 rts
53 nop
54
551: .long UBC_BBRA
562: .long UBC_BBRB
573: .long UBC_BRCR
584: .long STBCR2
59